Compare commits

..

3 Commits

Author SHA1 Message Date
you 5c2c71c6ee fix(#791): update memory-accounting to reflect eliminated spTxIndex
Remove perSubpathEntryBytes constant and all O(path²) subpath cost
calculations from estimateStoreTxBytes and estimateStoreTxBytesTypical.
Update comments to note that spTxIndex was eliminated in #791 and
singleton subpath entries are dropped from spIndex.
2026-04-19 00:08:28 +00:00
you 8d89f7d3e9 fix(#791): drop singleton subpath entries to save ~150MB at scale
After the initial subpath index build, iterate and delete all entries
where count==1. At scale, 60-70% of subpath keys are singletons that
appear only once — they add no analytical value and consume significant
memory (each entry is a string key + map bucket overhead).

Newly-seen singletons during incremental ingest will remain in the
index until the next restart. This is acceptable since singleton
subpaths are noise for ranking/display purposes.

Add a startup log line showing how many entries were kept vs dropped.
2026-04-19 00:03:37 +00:00
you 7abd05ff7f fix(#791): eliminate spTxIndex to save ~280MB at 3.4M subpaths
Remove the spTxIndex map (map[string][]*StoreTx) that stored per-subpath
transmission pointer slices. This was the largest single heap consumer
during startup on databases with many unique subpaths.

Replace the only read site (GetSubpathDetail) with an O(packets) scan
that matches subpaths on the fly. This is acceptable because the
endpoint is only called on drill-down, not listing.

Remove all write sites (addTxToSubpathIndexFull, removeTxFromSubpathIndexFull)
and collapse them back into the simpler addTxToSubpathIndex/removeTxFromSubpathIndex
functions that only maintain counts.
2026-04-19 00:03:13 +00:00
68 changed files with 1292 additions and 8313 deletions
+1 -1
View File
@@ -1 +1 @@
{"schemaVersion":1,"label":"e2e tests","message":"82 passed","color":"brightgreen"}
{"schemaVersion":1,"label":"e2e tests","message":"45 passed","color":"brightgreen"}
+1 -1
View File
@@ -1 +1 @@
{"schemaVersion":1,"label":"frontend coverage","message":"37.26%","color":"red"}
{"schemaVersion":1,"label":"frontend coverage","message":"39.68%","color":"red"}
+21 -13
View File
@@ -135,7 +135,7 @@ jobs:
e2e-test:
name: "🎭 Playwright E2E Tests"
needs: [go-test]
runs-on: ubuntu-latest
runs-on: [self-hosted, Linux]
defaults:
run:
shell: bash
@@ -145,6 +145,13 @@ jobs:
with:
fetch-depth: 0
- name: Free disk space
run: |
# Prune old runner diagnostic logs (can accumulate 50MB+)
find ~/actions-runner/_diag/ -name '*.log' -mtime +3 -delete 2>/dev/null || true
# Show available disk space
df -h / | tail -1
- name: Set up Node.js 22
uses: actions/setup-node@v5
with:
@@ -245,11 +252,17 @@ jobs:
build-and-publish:
name: "🏗️ Build & Publish Docker Image"
needs: [e2e-test]
runs-on: ubuntu-latest
runs-on: [self-hosted, meshcore-runner-2]
steps:
- name: Checkout code
uses: actions/checkout@v5
- name: Free disk space
run: |
docker system prune -af 2>/dev/null || true
docker builder prune -af 2>/dev/null || true
df -h /
- name: Compute build metadata
id: meta
run: |
@@ -277,10 +290,6 @@ jobs:
if: github.event_name == 'push'
uses: docker/setup-buildx-action@v3
- name: Set up QEMU (arm64 runtime stage)
if: github.event_name == 'push'
uses: docker/setup-qemu-action@v3
- name: Log in to GHCR
if: github.event_name == 'push'
uses: docker/login-action@v3
@@ -308,7 +317,7 @@ jobs:
with:
context: .
push: true
platforms: linux/amd64,linux/arm64
platforms: linux/amd64
tags: ${{ steps.docker-meta.outputs.tags }}
labels: ${{ steps.docker-meta.outputs.labels }}
build-args: |
@@ -359,7 +368,7 @@ jobs:
# ───────────────────────────────────────────────────────────────
deploy:
name: "🚀 Deploy Staging"
if: false # disabled: staging VM offline, manual deploy required
if: github.event_name == 'push'
needs: [build-and-publish]
runs-on: [self-hosted, meshcore-runner-2]
steps:
@@ -423,11 +432,10 @@ jobs:
- name: Smoke test staging API
run: |
PORT="${STAGING_GO_HTTP_PORT:-80}"
if curl -sf "http://localhost:${PORT}/api/stats" | grep -q engine; then
if curl -sf http://localhost:82/api/stats | grep -q engine; then
echo "Staging verified — engine field present ✅"
else
echo "Staging /api/stats did not return engine field (port ${PORT})"
echo "Staging /api/stats did not return engine field"
exit 1
fi
@@ -448,8 +456,8 @@ jobs:
publish:
name: "📝 Publish Badges & Summary"
if: github.event_name == 'push'
needs: [build-and-publish]
runs-on: ubuntu-latest
needs: [deploy]
runs-on: [self-hosted, Linux]
steps:
- name: Checkout code
uses: actions/checkout@v5
+7 -15
View File
@@ -1,35 +1,28 @@
# Build stage always runs natively on the builder's arch ($BUILDPLATFORM)
# and cross-compiles to $TARGETOS/$TARGETARCH via Go toolchain. No QEMU.
FROM --platform=$BUILDPLATFORM golang:1.22-alpine AS builder
FROM golang:1.22-alpine AS builder
RUN apk add --no-cache build-base
ARG APP_VERSION=unknown
ARG GIT_COMMIT=unknown
ARG BUILD_TIME=unknown
# Provided by buildx for multi-arch builds
ARG TARGETOS
ARG TARGETARCH
# Build server (pure-Go sqlite — no CGO needed, cross-compiles cleanly)
# Build server
WORKDIR /build/server
COPY cmd/server/go.mod cmd/server/go.sum ./
COPY internal/geofilter/ ../../internal/geofilter/
COPY internal/sigvalidate/ ../../internal/sigvalidate/
COPY internal/packetpath/ ../../internal/packetpath/
RUN go mod download
COPY cmd/server/ ./
RUN CGO_ENABLED=0 GOOS=${TARGETOS} GOARCH=${TARGETARCH} \
go build -ldflags "-X main.Version=${APP_VERSION} -X main.Commit=${GIT_COMMIT} -X main.BuildTime=${BUILD_TIME}" -o /corescope-server .
RUN go build -ldflags "-X main.Version=${APP_VERSION} -X main.Commit=${GIT_COMMIT} -X main.BuildTime=${BUILD_TIME}" -o /corescope-server .
# Build ingestor
WORKDIR /build/ingestor
COPY cmd/ingestor/go.mod cmd/ingestor/go.sum ./
COPY internal/geofilter/ ../../internal/geofilter/
COPY internal/sigvalidate/ ../../internal/sigvalidate/
COPY internal/packetpath/ ../../internal/packetpath/
RUN go mod download
COPY cmd/ingestor/ ./
RUN CGO_ENABLED=0 GOOS=${TARGETOS} GOARCH=${TARGETARCH} \
go build -o /corescope-ingestor .
RUN go build -o /corescope-ingestor .
# Build decrypt CLI
WORKDIR /build/decrypt
@@ -37,8 +30,7 @@ COPY cmd/decrypt/go.mod cmd/decrypt/go.sum ./
COPY internal/channel/ ../../internal/channel/
RUN go mod download
COPY cmd/decrypt/ ./
RUN CGO_ENABLED=0 GOOS=${TARGETOS} GOARCH=${TARGETARCH} \
go build -ldflags="-s -w" -o /corescope-decrypt .
RUN CGO_ENABLED=0 go build -ldflags="-s -w" -o /corescope-decrypt .
# Runtime image
FROM alpine:3.20
+6 -32
View File
@@ -11,7 +11,6 @@ import (
"sync/atomic"
"time"
"github.com/meshcore-analyzer/packetpath"
_ "modernc.org/sqlite"
)
@@ -190,7 +189,7 @@ func applySchema(db *sql.DB) error {
db.Exec(`DROP VIEW IF EXISTS packets_v`)
_, vErr := db.Exec(`
CREATE VIEW packets_v AS
SELECT o.id, COALESCE(o.raw_hex, t.raw_hex) AS raw_hex,
SELECT o.id, t.raw_hex,
datetime(o.timestamp, 'unixepoch') AS timestamp,
obs.id AS observer_id, obs.name AS observer_name,
o.direction, o.snr, o.rssi, o.score, t.hash, t.route_type,
@@ -409,15 +408,6 @@ func applySchema(db *sql.DB) error {
log.Println("[migration] dropped_packets table created")
}
// Migration: add raw_hex column to observations (#881)
row = db.QueryRow("SELECT 1 FROM _migrations WHERE name = 'observations_raw_hex_v1'")
if row.Scan(&migDone) != nil {
log.Println("[migration] Adding raw_hex column to observations...")
db.Exec(`ALTER TABLE observations ADD COLUMN raw_hex TEXT`)
db.Exec(`INSERT INTO _migrations (name) VALUES ('observations_raw_hex_v1')`)
log.Println("[migration] observations.raw_hex column added")
}
return nil
}
@@ -443,13 +433,8 @@ func (s *Store) prepareStatements() error {
}
s.stmtInsertObservation, err = s.db.Prepare(`
INSERT INTO observations (transmission_id, observer_idx, direction, snr, rssi, score, path_json, timestamp, raw_hex)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
ON CONFLICT(transmission_id, observer_idx, COALESCE(path_json, '')) DO UPDATE SET
snr = COALESCE(excluded.snr, snr),
rssi = COALESCE(excluded.rssi, rssi),
score = COALESCE(excluded.score, score),
raw_hex = COALESCE(excluded.raw_hex, raw_hex)
INSERT OR IGNORE INTO observations (transmission_id, observer_idx, direction, snr, rssi, score, path_json, timestamp)
VALUES (?, ?, ?, ?, ?, ?, ?, ?)
`)
if err != nil {
return err
@@ -595,7 +580,7 @@ func (s *Store) InsertTransmission(data *PacketData) (bool, error) {
_, err = s.stmtInsertObservation.Exec(
txID, observerIdx, data.Direction,
data.SNR, data.RSSI, data.Score,
data.PathJSON, epochTs, nilIfEmpty(data.RawHex),
data.PathJSON, epochTs,
)
if err != nil {
s.Stats.WriteErrors.Add(1)
@@ -942,22 +927,11 @@ type MQTTPacketMessage struct {
}
// BuildPacketData constructs a PacketData from a decoded packet and MQTT message.
// path_json is derived directly from raw_hex header bytes (not decoded.Path.Hops)
// to guarantee the stored path always matches the raw bytes. This matters for
// TRACE packets where decoded.Path.Hops is overwritten with payload hops (#886).
func BuildPacketData(msg *MQTTPacketMessage, decoded *DecodedPacket, observerID, region string) *PacketData {
now := time.Now().UTC().Format(time.RFC3339)
pathJSON := "[]"
// For TRACE packets, path_json must be the payload-decoded route hops
// (decoded.Path.Hops), NOT the raw_hex header bytes which are SNR values.
// For all other packet types, derive path from raw_hex (#886).
if !packetpath.PathBytesAreHops(byte(decoded.Header.PayloadType)) {
if len(decoded.Path.Hops) > 0 {
b, _ := json.Marshal(decoded.Path.Hops)
pathJSON = string(b)
}
} else if hops, err := packetpath.DecodePathFromRawHex(msg.Raw); err == nil && len(hops) > 0 {
b, _ := json.Marshal(hops)
if len(decoded.Path.Hops) > 0 {
b, _ := json.Marshal(decoded.Path.Hops)
pathJSON = string(b)
}
-241
View File
@@ -2,7 +2,6 @@ package main
import (
"database/sql"
"encoding/json"
"fmt"
"os"
"path/filepath"
@@ -11,8 +10,6 @@ import (
"sync/atomic"
"testing"
"time"
"github.com/meshcore-analyzer/packetpath"
)
func tempDBPath(t *testing.T) string {
@@ -1885,241 +1882,3 @@ func TestExtractObserverMetaNewFields(t *testing.T) {
t.Errorf("RecvErrors = %v, want 3", meta.RecvErrors)
}
}
// TestInsertObservationSNRFillIn verifies that when the same observation is
// received twice — first without SNR, then with SNR — the SNR is filled in
// rather than silently discarded. The unique dedup index is
// (transmission_id, observer_idx, COALESCE(path_json, '')); observer_idx must
// be non-NULL for the conflict to fire (SQLite treats NULL != NULL).
func TestInsertObservationSNRFillIn(t *testing.T) {
s, err := OpenStore(tempDBPath(t))
if err != nil {
t.Fatal(err)
}
defer s.Close()
// Register the observer so observer_idx is non-NULL (required for dedup).
if err := s.UpsertObserver("pymc-obs1", "PyMC Observer", "SJC", nil); err != nil {
t.Fatal(err)
}
// First arrival: same observer, no SNR/RSSI (e.g. broker replay without RF fields).
data1 := &PacketData{
RawHex: "0A00D69FD7A5A7475DB07337749AE61FA53A4788E976",
Timestamp: "2026-04-20T00:00:00Z",
Hash: "snrfillin0001hash",
RouteType: 1,
ObserverID: "pymc-obs1",
SNR: nil,
RSSI: nil,
}
if _, err := s.InsertTransmission(data1); err != nil {
t.Fatal(err)
}
var snr1, rssi1 *float64
s.db.QueryRow("SELECT snr, rssi FROM observations LIMIT 1").Scan(&snr1, &rssi1)
if snr1 != nil || rssi1 != nil {
t.Fatalf("precondition: first insert should have nil SNR/RSSI, got snr=%v rssi=%v", snr1, rssi1)
}
// Second arrival: same packet, same observer, now WITH SNR/RSSI.
snr := 10.5
rssi := -88.0
data2 := &PacketData{
RawHex: data1.RawHex,
Timestamp: data1.Timestamp,
Hash: data1.Hash,
RouteType: data1.RouteType,
ObserverID: "pymc-obs1",
SNR: &snr,
RSSI: &rssi,
}
if _, err := s.InsertTransmission(data2); err != nil {
t.Fatal(err)
}
var snr2, rssi2 *float64
s.db.QueryRow("SELECT snr, rssi FROM observations LIMIT 1").Scan(&snr2, &rssi2)
if snr2 == nil || *snr2 != snr {
t.Errorf("SNR not filled in by second arrival: got %v, want %v", snr2, snr)
}
if rssi2 == nil || *rssi2 != rssi {
t.Errorf("RSSI not filled in by second arrival: got %v, want %v", rssi2, rssi)
}
// Third arrival: same packet again, SNR absent — must NOT overwrite existing SNR.
data3 := &PacketData{
RawHex: data1.RawHex,
Timestamp: data1.Timestamp,
Hash: data1.Hash,
RouteType: data1.RouteType,
ObserverID: "pymc-obs1",
SNR: nil,
RSSI: nil,
}
if _, err := s.InsertTransmission(data3); err != nil {
t.Fatal(err)
}
var snr3, rssi3 *float64
s.db.QueryRow("SELECT snr, rssi FROM observations LIMIT 1").Scan(&snr3, &rssi3)
if snr3 == nil || *snr3 != snr {
t.Errorf("SNR overwritten by null arrival: got %v, want %v", snr3, snr)
}
if rssi3 == nil || *rssi3 != rssi {
t.Errorf("RSSI overwritten by null arrival: got %v, want %v", rssi3, rssi)
}
}
// TestPerObservationRawHex verifies that two MQTT packets for the same hash
// from different observers store distinct raw_hex per observation (#881).
func TestPerObservationRawHex(t *testing.T) {
store, err := OpenStore(tempDBPath(t))
if err != nil {
t.Fatal(err)
}
defer store.Close()
// Register two observers
store.UpsertObserver("obs-A", "Observer A", "", nil)
store.UpsertObserver("obs-B", "Observer B", "", nil)
hash := "abc123def456"
rawA := "c0ffee01"
rawB := "c0ffee0201aa"
dir := "RX"
// First observation from observer A
pdA := &PacketData{
RawHex: rawA,
Hash: hash,
Timestamp: "2026-04-21T10:00:00Z",
ObserverID: "obs-A",
Direction: &dir,
PathJSON: "[]",
}
isNew, err := store.InsertTransmission(pdA)
if err != nil {
t.Fatalf("insert A: %v", err)
}
if !isNew {
t.Fatal("expected new transmission")
}
// Second observation from observer B (same hash, different raw bytes)
pdB := &PacketData{
RawHex: rawB,
Hash: hash,
Timestamp: "2026-04-21T10:00:01Z",
ObserverID: "obs-B",
Direction: &dir,
PathJSON: `["aabb"]`,
}
isNew2, err := store.InsertTransmission(pdB)
if err != nil {
t.Fatalf("insert B: %v", err)
}
if isNew2 {
t.Fatal("expected duplicate transmission")
}
// Query observations and verify per-observation raw_hex
rows, err := store.db.Query(`
SELECT o.raw_hex, obs.id
FROM observations o
LEFT JOIN observers obs ON obs.rowid = o.observer_idx
ORDER BY o.id ASC
`)
if err != nil {
t.Fatalf("query: %v", err)
}
defer rows.Close()
type obsResult struct {
rawHex string
observerID string
}
var results []obsResult
for rows.Next() {
var rh, oid sql.NullString
if err := rows.Scan(&rh, &oid); err != nil {
t.Fatal(err)
}
results = append(results, obsResult{
rawHex: rh.String,
observerID: oid.String,
})
}
if len(results) != 2 {
t.Fatalf("expected 2 observations, got %d", len(results))
}
if results[0].rawHex != rawA {
t.Errorf("obs A raw_hex: got %q, want %q", results[0].rawHex, rawA)
}
if results[1].rawHex != rawB {
t.Errorf("obs B raw_hex: got %q, want %q", results[1].rawHex, rawB)
}
if results[0].rawHex == results[1].rawHex {
t.Error("both observations have same raw_hex — should differ")
}
}
// TestBuildPacketData_TraceUsesPayloadHops verifies that TRACE packets use
// payload-decoded route hops in path_json (NOT the raw_hex header SNR bytes).
// Issue #886 / #887.
func TestBuildPacketData_TraceUsesPayloadHops(t *testing.T) {
// TRACE packet: header path has SNR bytes [30,2D,0D,23], but decoded.Path.Hops
// is overwritten to payload hops [67,33,D6,33,67].
rawHex := "2604302D0D2359FEE7B100000000006733D63367"
decoded, err := DecodePacket(rawHex, nil, false)
if err != nil {
t.Fatal(err)
}
// decoded.Path.Hops should be the TRACE-replaced hops (payload hops)
if len(decoded.Path.Hops) != 5 {
t.Fatalf("expected 5 decoded hops, got %d", len(decoded.Path.Hops))
}
msg := &MQTTPacketMessage{Raw: rawHex}
pd := BuildPacketData(msg, decoded, "test-obs", "TST")
// For TRACE: path_json MUST be the payload-decoded route hops, NOT the SNR bytes
expectedPathJSON := `["67","33","D6","33","67"]`
if pd.PathJSON != expectedPathJSON {
t.Errorf("path_json = %s, want %s (TRACE must use payload hops)", pd.PathJSON, expectedPathJSON)
}
// Verify that DecodePathFromRawHex returns the SNR bytes (header path) which differ
headerHops, herr := packetpath.DecodePathFromRawHex(rawHex)
if herr != nil {
t.Fatal(herr)
}
headerJSON, _ := json.Marshal(headerHops)
if string(headerJSON) == expectedPathJSON {
t.Error("header path (SNR) should differ from payload hops for TRACE")
}
}
// TestBuildPacketData_NonTracePathJSON verifies non-TRACE packets also derive path from raw_hex.
func TestBuildPacketData_NonTracePathJSON(t *testing.T) {
// A simple ADVERT packet (payload type 0) with 2 hops, hash_size 1
// Header 0x09 = FLOOD(1), ADVERT(2), version 0
// Path byte 0x02 = hash_size 1, hash_count 2
// Path bytes: AA BB
rawHex := "0902AABB" + "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"
decoded, err := DecodePacket(rawHex, nil, false)
if err != nil {
t.Fatal(err)
}
msg := &MQTTPacketMessage{Raw: rawHex}
pd := BuildPacketData(msg, decoded, "obs1", "TST")
expectedPathJSON := `["AA","BB"]`
if pd.PathJSON != expectedPathJSON {
t.Errorf("path_json = %s, want %s", pd.PathJSON, expectedPathJSON)
}
}
+1 -3
View File
@@ -12,7 +12,6 @@ import (
"strings"
"unicode/utf8"
"github.com/meshcore-analyzer/packetpath"
"github.com/meshcore-analyzer/sigvalidate"
)
@@ -193,9 +192,8 @@ func decodePath(pathByte byte, buf []byte, offset int) (Path, int) {
}, totalBytes
}
// isTransportRoute delegates to packetpath.IsTransportRoute.
func isTransportRoute(routeType int) bool {
return packetpath.IsTransportRoute(routeType)
return routeType == RouteTransportFlood || routeType == RouteTransportDirect
}
func decodeEncryptedPayload(typeName string, buf []byte) Payload {
-104
View File
@@ -11,7 +11,6 @@ import (
"strings"
"testing"
"github.com/meshcore-analyzer/packetpath"
"github.com/meshcore-analyzer/sigvalidate"
)
@@ -1823,106 +1822,3 @@ func TestDecodeAdvertWithSignatureValidation(t *testing.T) {
t.Error("SignatureValid should be nil when validation disabled")
}
}
// === Tests for DecodePathFromRawHex (issue #886) ===
func TestDecodePathFromRawHex_HashSize1(t *testing.T) {
// Header byte 0x26 = route_type DIRECT, payload TRACE
// Path byte 0x04 = hash_size 1 (bits 7-6 = 00 → 0+1=1), hash_count 4
// Path bytes: 30 2D 0D 23
raw := "2604302D0D2359FEE7B100000000006733D63367"
hops, err := packetpath.DecodePathFromRawHex(raw)
if err != nil {
t.Fatal(err)
}
expected := []string{"30", "2D", "0D", "23"}
if len(hops) != len(expected) {
t.Fatalf("got %d hops, want %d", len(hops), len(expected))
}
for i, h := range hops {
if h != expected[i] {
t.Errorf("hop[%d] = %s, want %s", i, h, expected[i])
}
}
}
func TestDecodePathFromRawHex_HashSize2(t *testing.T) {
// Path byte 0x42 = hash_size 2 (bits 7-6 = 01 → 1+1=2), hash_count 2
// Header 0x09 = FLOOD route (rt=1), payload ADVERT (pt=2)
// Path bytes: AABB CCDD (4 bytes = 2 hops * 2 bytes)
raw := "0942AABBCCDD" + "00000000000000"
hops, err := packetpath.DecodePathFromRawHex(raw)
if err != nil {
t.Fatal(err)
}
expected := []string{"AABB", "CCDD"}
if len(hops) != len(expected) {
t.Fatalf("got %d hops, want %d", len(hops), len(expected))
}
for i, h := range hops {
if h != expected[i] {
t.Errorf("hop[%d] = %s, want %s", i, h, expected[i])
}
}
}
func TestDecodePathFromRawHex_HashSize3(t *testing.T) {
// Path byte 0x81 = hash_size 3 (bits 7-6 = 10 → 2+1=3), hash_count 1
// Header 0x09 = FLOOD route (rt=1), payload ADVERT
raw := "0981AABBCC" + "0000000000"
hops, err := packetpath.DecodePathFromRawHex(raw)
if err != nil {
t.Fatal(err)
}
if len(hops) != 1 || hops[0] != "AABBCC" {
t.Fatalf("got %v, want [AABBCC]", hops)
}
}
func TestDecodePathFromRawHex_HashSize4(t *testing.T) {
// Path byte 0xC1 = hash_size 4 (bits 7-6 = 11 → 3+1=4), hash_count 1
// Header 0x09 = FLOOD route (rt=1)
raw := "09C1AABBCCDD" + "0000000000"
hops, err := packetpath.DecodePathFromRawHex(raw)
if err != nil {
t.Fatal(err)
}
if len(hops) != 1 || hops[0] != "AABBCCDD" {
t.Fatalf("got %v, want [AABBCCDD]", hops)
}
}
func TestDecodePathFromRawHex_DirectZeroHops(t *testing.T) {
// Path byte 0x00 = hash_size 1, hash_count 0
// Header 0x0A = DIRECT route (rt=2), payload ADVERT
raw := "0A00" + "0000000000"
hops, err := packetpath.DecodePathFromRawHex(raw)
if err != nil {
t.Fatal(err)
}
if len(hops) != 0 {
t.Fatalf("got %d hops, want 0", len(hops))
}
}
func TestDecodePathFromRawHex_Transport(t *testing.T) {
// Route type 3 = TRANSPORT_DIRECT → 4 transport code bytes before path byte
// Header 0x27 = route_type 3, payload TRACE
// Transport codes: 1122 3344
// Path byte 0x02 = hash_size 1, hash_count 2
// Path bytes: AA BB
raw := "2711223344" + "02AABB" + "0000000000"
hops, err := packetpath.DecodePathFromRawHex(raw)
if err != nil {
t.Fatal(err)
}
expected := []string{"AA", "BB"}
if len(hops) != len(expected) {
t.Fatalf("got %d hops, want %d", len(hops), len(expected))
}
for i, h := range hops {
if h != expected[i] {
t.Errorf("hop[%d] = %s, want %s", i, h, expected[i])
}
}
}
-4
View File
@@ -13,10 +13,6 @@ replace github.com/meshcore-analyzer/geofilter => ../../internal/geofilter
replace github.com/meshcore-analyzer/sigvalidate => ../../internal/sigvalidate
require github.com/meshcore-analyzer/packetpath v0.0.0
replace github.com/meshcore-analyzer/packetpath => ../../internal/packetpath
require (
github.com/dustin/go-humanize v1.0.1 // indirect
github.com/google/uuid v1.6.0 // indirect
+15 -18
View File
@@ -207,6 +207,21 @@ func handleMessage(store *Store, tag string, source MQTTSource, m mqtt.Message,
topic := m.Topic()
parts := strings.Split(topic, "/")
// IATA filter
if len(source.IATAFilter) > 0 && len(parts) > 1 {
region := parts[1]
matched := false
for _, f := range source.IATAFilter {
if f == region {
matched = true
break
}
}
if !matched {
return
}
}
var msg map[string]interface{}
if err := json.Unmarshal(m.Payload(), &msg); err != nil {
return
@@ -218,9 +233,6 @@ func handleMessage(store *Store, tag string, source MQTTSource, m mqtt.Message,
}
// Status topic: meshcore/<region>/<observer_id>/status
// IATA filter does NOT apply here — observer metadata (noise_floor, battery, etc.)
// is region-independent and should be accepted from all observers regardless of
// which IATA regions are configured for packet ingestion.
if len(parts) >= 4 && parts[3] == "status" {
observerID := parts[2]
name, _ := msg["origin"].(string)
@@ -249,21 +261,6 @@ func handleMessage(store *Store, tag string, source MQTTSource, m mqtt.Message,
return
}
// IATA filter applies to packet messages only — not status messages above.
if len(source.IATAFilter) > 0 && len(parts) > 1 {
region := parts[1]
matched := false
for _, f := range source.IATAFilter {
if f == region {
matched = true
break
}
}
if !matched {
return
}
}
// Format 1: Raw packet (meshcoretomqtt / Cisien format)
rawHex, _ := msg["raw"].(string)
if rawHex != "" {
-41
View File
@@ -739,44 +739,3 @@ func TestToFloat64WithUnits(t *testing.T) {
}
}
}
// TestIATAFilterDoesNotDropStatusMessages verifies that status messages from
// out-of-region observers are still processed (noise_floor, battery, etc.)
// even when an IATA filter is configured for packet data.
func TestIATAFilterDoesNotDropStatusMessages(t *testing.T) {
store := newTestStore(t)
source := MQTTSource{Name: "test", IATAFilter: []string{"SJC"}}
// BFL observer sends a status message with noise_floor — outside the IATA filter.
msg := &mockMessage{
topic: "meshcore/BFL/bfl-obs1/status",
payload: []byte(`{"origin":"BFLObserver","stats":{"noise_floor":-105.0}}`),
}
handleMessage(store, "test", source, msg, nil, &Config{})
var name string
var noiseFloor *float64
err := store.db.QueryRow("SELECT name, noise_floor FROM observers WHERE id = 'bfl-obs1'").Scan(&name, &noiseFloor)
if err != nil {
t.Fatalf("observer not found after status from out-of-region observer: %v", err)
}
if name != "BFLObserver" {
t.Errorf("name=%q, want BFLObserver", name)
}
if noiseFloor == nil || *noiseFloor != -105.0 {
t.Errorf("noise_floor=%v, want -105.0 — status message was dropped by IATA filter when it should not be", noiseFloor)
}
// Verify that a packet from BFL is still filtered.
rawHex := "0A00D69FD7A5A7475DB07337749AE61FA53A4788E976"
pktMsg := &mockMessage{
topic: "meshcore/BFL/bfl-obs1/packets",
payload: []byte(`{"raw":"` + rawHex + `"}`),
}
handleMessage(store, "test", source, pktMsg, nil, &Config{})
var count int
store.db.QueryRow("SELECT COUNT(*) FROM transmissions").Scan(&count)
if count != 0 {
t.Error("packet from out-of-region BFL should still be filtered by IATA")
}
}
+2 -2
View File
@@ -229,7 +229,7 @@ func createTestDBAt(tb testing.TB, dbPath string, numTx int) {
id INTEGER PRIMARY KEY,
transmission_id INTEGER, observer_id TEXT, observer_name TEXT,
direction TEXT, snr REAL, rssi REAL, score INTEGER,
path_json TEXT, timestamp TEXT, raw_hex TEXT
path_json TEXT, timestamp TEXT
)`)
execOrFail(`CREATE TABLE IF NOT EXISTS observers (rowid INTEGER PRIMARY KEY, id TEXT, name TEXT)`)
execOrFail(`CREATE TABLE IF NOT EXISTS nodes (
@@ -280,7 +280,7 @@ func createTestDBWithObs(tb testing.TB, dbPath string, numTx int) {
)`)
execOrFail(`CREATE TABLE IF NOT EXISTS observations (
id INTEGER PRIMARY KEY, transmission_id INTEGER, observer_id TEXT, observer_name TEXT,
direction TEXT, snr REAL, rssi REAL, score INTEGER, path_json TEXT, timestamp TEXT, raw_hex TEXT
direction TEXT, snr REAL, rssi REAL, score INTEGER, path_json TEXT, timestamp TEXT
)`)
execOrFail(`CREATE TABLE IF NOT EXISTS observers (rowid INTEGER PRIMARY KEY, id TEXT, name TEXT)`)
execOrFail(`CREATE TABLE IF NOT EXISTS nodes (
-57
View File
@@ -1,57 +0,0 @@
package main
import (
"encoding/json"
"net/http"
"net/http/httptest"
"testing"
)
// TestPacketsChannelFilter verifies /api/packets?channel=... actually filters
// (regression test for #812).
func TestPacketsChannelFilter(t *testing.T) {
_, router := setupTestServer(t)
get := func(url string) map[string]interface{} {
req := httptest.NewRequest("GET", url, nil)
w := httptest.NewRecorder()
router.ServeHTTP(w, req)
if w.Code != http.StatusOK {
t.Fatalf("GET %s: expected 200, got %d", url, w.Code)
}
var body map[string]interface{}
if err := json.Unmarshal(w.Body.Bytes(), &body); err != nil {
t.Fatalf("decode %s: %v", url, err)
}
return body
}
all := get("/api/packets?limit=50")
allTotal := int(all["total"].(float64))
if allTotal < 2 {
t.Fatalf("expected baseline >= 2 packets, got %d", allTotal)
}
test := get("/api/packets?limit=50&channel=%23test")
testTotal := int(test["total"].(float64))
if testTotal == 0 {
t.Fatalf("channel=#test: expected >= 1 match, got 0 (filter ignored?)")
}
if testTotal >= allTotal {
t.Fatalf("channel=#test: expected fewer packets than baseline (%d), got %d", allTotal, testTotal)
}
// Every returned packet must be a CHAN/GRP_TXT (payload_type=5) on #test.
pkts, _ := test["packets"].([]interface{})
for _, p := range pkts {
m := p.(map[string]interface{})
if pt, _ := m["payload_type"].(float64); int(pt) != 5 {
t.Errorf("channel=#test: returned non-GRP_TXT packet (payload_type=%v)", m["payload_type"])
}
}
none := get("/api/packets?limit=50&channel=nonexistentchannel")
if int(none["total"].(float64)) != 0 {
t.Fatalf("channel=nonexistentchannel: expected total=0, got %v", none["total"])
}
}
+33 -201
View File
@@ -16,8 +16,7 @@ const (
SkewWarning SkewSeverity = "warning" // 5 min 1 hour
SkewCritical SkewSeverity = "critical" // 1 hour 30 days
SkewAbsurd SkewSeverity = "absurd" // > 30 days
SkewNoClock SkewSeverity = "no_clock" // > 365 days — uninitialized RTC
SkewBimodalClock SkewSeverity = "bimodal_clock" // mixed good+bad recent samples (flaky RTC)
SkewNoClock SkewSeverity = "no_clock" // > 365 days — uninitialized RTC
)
// Default thresholds in seconds.
@@ -34,38 +33,6 @@ const (
// maxReasonableDriftPerDay caps drift display. Physically impossible
// drift rates (> 1 day/day) indicate insufficient or outlier samples.
maxReasonableDriftPerDay = 86400.0
// recentSkewWindowCount is the number of most-recent advert samples
// used to derive the "current" skew for severity classification (see
// issue #789). The all-time median is poisoned by historical bad
// samples (e.g. a node that was off and then GPS-corrected); severity
// must reflect current health, not lifetime statistics.
recentSkewWindowCount = 5
// recentSkewWindowSec bounds the recent-window in time as well: only
// samples from the last N seconds count as "recent" for severity.
// The effective window is min(recentSkewWindowCount, samples in 1h).
recentSkewWindowSec = 3600
// bimodalSkewThresholdSec is the absolute skew threshold (1 hour)
// above which a sample is considered "bad" — likely firmware emitting
// a nonsense timestamp from an uninitialized RTC, not real drift.
// Chosen to match the warning/critical severity boundary: real clock
// drift rarely exceeds 1 hour, while epoch-0 RTCs produce ~1.7B sec.
bimodalSkewThresholdSec = 3600.0
// maxPlausibleSkewJumpSec is the largest skew change between
// consecutive samples that we treat as physical drift. Anything larger
// (e.g. a GPS sync that jumps the clock by minutes/days) is rejected
// as an outlier when computing drift. Real microcontroller drift is
// fractions of a second per advert; 60s is a generous safety factor.
maxPlausibleSkewJumpSec = 60.0
// theilSenMaxPoints caps the number of points fed to Theil-Sen
// regression (O(n²) in pairs). For nodes with thousands of samples we
// keep the most-recent points, which are also the most relevant for
// current drift.
theilSenMaxPoints = 200
)
// classifySkew maps absolute skew (seconds) to a severity level.
@@ -109,7 +76,6 @@ type NodeClockSkew struct {
MeanSkewSec float64 `json:"meanSkewSec"` // corrected mean skew (positive = node ahead)
MedianSkewSec float64 `json:"medianSkewSec"` // corrected median skew
LastSkewSec float64 `json:"lastSkewSec"` // most recent corrected skew
RecentMedianSkewSec float64 `json:"recentMedianSkewSec"` // median across most-recent samples (drives severity, see #789)
DriftPerDaySec float64 `json:"driftPerDaySec"` // linear drift rate (sec/day)
Severity SkewSeverity `json:"severity"`
SampleCount int `json:"sampleCount"`
@@ -117,9 +83,6 @@ type NodeClockSkew struct {
LastAdvertTS int64 `json:"lastAdvertTS"` // most recent advert timestamp
LastObservedTS int64 `json:"lastObservedTS"` // most recent observation timestamp
Samples []SkewSample `json:"samples,omitempty"` // time-series for sparklines
GoodFraction float64 `json:"goodFraction"` // fraction of recent samples with |skew| <= 1h
RecentBadSampleCount int `json:"recentBadSampleCount"` // count of recent samples with |skew| > 1h
RecentSampleCount int `json:"recentSampleCount"` // total recent samples in window
NodeName string `json:"nodeName,omitempty"` // populated in fleet responses
NodeRole string `json:"nodeRole,omitempty"` // populated in fleet responses
}
@@ -456,95 +419,12 @@ func (s *PacketStore) getNodeClockSkewLocked(pubkey string) *NodeClockSkew {
medSkew := median(allSkews)
meanSkew := mean(allSkews)
absMedian := math.Abs(medSkew)
severity := classifySkew(absMedian)
// Severity is derived from RECENT samples only (issue #789). The
// all-time median is poisoned by historical bad data — a node that
// was off for hours and then GPS-corrected can have median = -59M sec
// while its current skew is -0.8s. Operators need severity to reflect
// current health, so they trust the dashboard.
//
// Sort tsSkews by time and take the last recentSkewWindowCount samples
// (or all samples within recentSkewWindowSec of the latest, whichever
// gives FEWER samples — we want the more-current view; a chatty node
// can fit dozens of samples in 1h, in which case the count cap wins).
sort.Slice(tsSkews, func(i, j int) bool { return tsSkews[i].ts < tsSkews[j].ts })
recentSkew := lastSkew
var recentVals []float64
if n := len(tsSkews); n > 0 {
latestTS := tsSkews[n-1].ts
// Index-based window: last K samples.
startByCount := n - recentSkewWindowCount
if startByCount < 0 {
startByCount = 0
}
// Time-based window: samples newer than latestTS - windowSec.
startByTime := n - 1
for i := n - 1; i >= 0; i-- {
if latestTS-tsSkews[i].ts <= recentSkewWindowSec {
startByTime = i
} else {
break
}
}
// Pick the narrower (larger-index) of the two windows — the most
// current view of the node's clock health.
start := startByCount
if startByTime > start {
start = startByTime
}
recentVals = make([]float64, 0, n-start)
for i := start; i < n; i++ {
recentVals = append(recentVals, tsSkews[i].skew)
}
if len(recentVals) > 0 {
recentSkew = median(recentVals)
}
}
// ── Bimodal detection (#845) ─────────────────────────────────────────
// Split recent samples into "good" (|skew| <= 1h, real clock) and
// "bad" (|skew| > 1h, firmware nonsense from uninitialized RTC).
// Classification order (first match wins):
// no_clock — goodFraction < 0.10 (essentially no real clock)
// bimodal_clock — 0.10 <= goodFraction < 0.80 AND badCount > 0
// ok/warn/etc. — goodFraction >= 0.80 (normal, outliers filtered)
var goodSamples []float64
for _, v := range recentVals {
if math.Abs(v) <= bimodalSkewThresholdSec {
goodSamples = append(goodSamples, v)
}
}
recentSampleCount := len(recentVals)
recentBadCount := recentSampleCount - len(goodSamples)
var goodFraction float64
if recentSampleCount > 0 {
goodFraction = float64(len(goodSamples)) / float64(recentSampleCount)
}
var severity SkewSeverity
if goodFraction < 0.10 {
// Essentially no real clock — classify as no_clock regardless
// of the raw skew magnitude.
severity = SkewNoClock
} else if goodFraction < 0.80 && recentBadCount > 0 {
// Bimodal: use median of GOOD samples as the "real" skew.
severity = SkewBimodalClock
if len(goodSamples) > 0 {
recentSkew = median(goodSamples)
}
} else {
// Normal path: if there are good samples, use their median
// (filters out rare outliers in ≥80% good case).
if len(goodSamples) > 0 && recentBadCount > 0 {
recentSkew = median(goodSamples)
}
severity = classifySkew(math.Abs(recentSkew))
}
// For no_clock / bimodal_clock nodes, skip drift when data is unreliable.
// For no_clock nodes (uninitialized RTC), skip drift — data is meaningless.
var drift float64
if severity != SkewNoClock && severity != SkewBimodalClock && len(tsSkews) >= minDriftSamples {
if severity != SkewNoClock && len(tsSkews) >= minDriftSamples {
drift = computeDrift(tsSkews)
// Cap physically impossible drift rates.
if math.Abs(drift) > maxReasonableDriftPerDay {
@@ -552,28 +432,25 @@ func (s *PacketStore) getNodeClockSkewLocked(pubkey string) *NodeClockSkew {
}
}
// Build sparkline samples from tsSkews (already sorted by time above).
// Build sparkline samples from tsSkews (sorted by time).
sort.Slice(tsSkews, func(i, j int) bool { return tsSkews[i].ts < tsSkews[j].ts })
samples := make([]SkewSample, len(tsSkews))
for i, p := range tsSkews {
samples[i] = SkewSample{Timestamp: p.ts, SkewSec: round(p.skew, 1)}
}
return &NodeClockSkew{
Pubkey: pubkey,
MeanSkewSec: round(meanSkew, 1),
MedianSkewSec: round(medSkew, 1),
LastSkewSec: round(lastSkew, 1),
RecentMedianSkewSec: round(recentSkew, 1),
DriftPerDaySec: round(drift, 2),
Severity: severity,
SampleCount: totalSamples,
Calibrated: anyCal,
LastAdvertTS: lastAdvTS,
LastObservedTS: lastObsTS,
Samples: samples,
GoodFraction: round(goodFraction, 2),
RecentBadSampleCount: recentBadCount,
RecentSampleCount: recentSampleCount,
Pubkey: pubkey,
MeanSkewSec: round(meanSkew, 1),
MedianSkewSec: round(medSkew, 1),
LastSkewSec: round(lastSkew, 1),
DriftPerDaySec: round(drift, 2),
Severity: severity,
SampleCount: totalSamples,
Calibrated: anyCal,
LastAdvertTS: lastAdvTS,
LastObservedTS: lastObsTS,
Samples: samples,
}
}
@@ -667,18 +544,7 @@ type tsSkewPair struct {
}
// computeDrift estimates linear drift in seconds per day from time-ordered
// (timestamp, skew) pairs. Issue #789: a single GPS-correction event (huge
// skew jump in seconds) used to dominate ordinary least squares and produce
// absurd drift like 1.7M sec/day. We now:
//
// 1. Drop pairs whose consecutive skew jump exceeds maxPlausibleSkewJumpSec
// (clock corrections, not physical drift). This protects both OLS-style
// consumers and Theil-Sen.
// 2. Use Theil-Sen regression — the slope is the median of all pairwise
// slopes, naturally robust to remaining outliers (breakdown point ~29%).
//
// For very small samples after filtering we fall back to a simple slope
// between first and last calibrated samples.
// (timestamp, skew) pairs using simple linear regression.
func computeDrift(pairs []tsSkewPair) float64 {
if len(pairs) < 2 {
return 0
@@ -694,55 +560,21 @@ func computeDrift(pairs []tsSkewPair) float64 {
return 0
}
// Outlier filter: drop samples where the skew jumps more than
// maxPlausibleSkewJumpSec from the running "stable" baseline.
// We anchor on the first sample, then accept each subsequent point
// that's within the threshold of the most recent accepted point —
// this preserves a slow drift while rejecting correction events.
filtered := make([]tsSkewPair, 0, len(pairs))
filtered = append(filtered, pairs[0])
for i := 1; i < len(pairs); i++ {
prev := filtered[len(filtered)-1]
if math.Abs(pairs[i].skew-prev.skew) <= maxPlausibleSkewJumpSec {
filtered = append(filtered, pairs[i])
}
// Simple linear regression: skew = a + b*t
n := float64(len(pairs))
var sumX, sumY, sumXY, sumX2 float64
for _, p := range pairs {
x := float64(p.ts - pairs[0].ts) // normalize to avoid large numbers
y := p.skew
sumX += x
sumY += y
sumXY += x * y
sumX2 += x * x
}
// If the filter killed too much (e.g. unstable node), fall back to the
// raw series so we at least produce *something* — it'll be capped by
// maxReasonableDriftPerDay downstream.
if len(filtered) < 2 || float64(filtered[len(filtered)-1].ts-filtered[0].ts) < 3600 {
filtered = pairs
}
// Cap point count for Theil-Sen (O(n²) on pairs). Keep most-recent.
if len(filtered) > theilSenMaxPoints {
filtered = filtered[len(filtered)-theilSenMaxPoints:]
}
return theilSenSlope(filtered) * 86400 // sec/sec → sec/day
}
// theilSenSlope returns the Theil-Sen estimator: median of all pairwise
// slopes (yj - yi) / (tj - ti) for i < j. Naturally robust to outliers.
// Pairs must be sorted by timestamp ascending.
func theilSenSlope(pairs []tsSkewPair) float64 {
n := len(pairs)
if n < 2 {
denom := n*sumX2 - sumX*sumX
if denom == 0 {
return 0
}
// Pre-allocate: n*(n-1)/2 pairs.
slopes := make([]float64, 0, n*(n-1)/2)
for i := 0; i < n; i++ {
for j := i + 1; j < n; j++ {
dt := float64(pairs[j].ts - pairs[i].ts)
if dt <= 0 {
continue
}
slopes = append(slopes, (pairs[j].skew-pairs[i].skew)/dt)
}
}
if len(slopes) == 0 {
return 0
}
return median(slopes)
slope := (n*sumXY - sumX*sumY) / denom // seconds of drift per second
return slope * 86400 // convert to seconds per day
}
-410
View File
@@ -544,413 +544,3 @@ func TestGetNodeClockSkew_NormalNodeWithDrift(t *testing.T) {
func formatInt64(n int64) string {
return fmt.Sprintf("%d", n)
}
// ── #789: Recent-window severity & robust drift ───────────────────────────────
// TestSeverityUsesRecentNotMedian: 100 historical bad samples (skew=-60s,
// each ~5min apart) followed by 5 fresh good samples (skew=-1s). All-time
// median is still huge-ish but recent-window severity must reflect the
// current healthy state.
func TestSeverityUsesRecentNotMedian(t *testing.T) {
ps := NewPacketStore(nil, nil)
pt := 4
baseObs := int64(1700000000)
var txs []*StoreTx
for i := 0; i < 105; i++ {
obsTS := baseObs + int64(i)*300 // 5 min apart
var skew int64 = -60
if i >= 100 {
skew = -1 // good samples at the tail
}
advTS := obsTS + skew
tx := &StoreTx{
Hash: fmt.Sprintf("recent-h%03d", i),
PayloadType: &pt,
DecodedJSON: `{"payload":{"timestamp":` + formatInt64(advTS) + `}}`,
Observations: []*StoreObs{
{ObserverID: "obs1", Timestamp: time.Unix(obsTS, 0).UTC().Format(time.RFC3339)},
},
}
txs = append(txs, tx)
}
ps.mu.Lock()
ps.byNode["RECENT"] = txs
for _, tx := range txs {
ps.byPayloadType[4] = append(ps.byPayloadType[4], tx)
}
ps.clockSkew.computeInterval = 0
ps.mu.Unlock()
r := ps.GetNodeClockSkew("RECENT")
if r == nil {
t.Fatal("nil result")
}
if r.Severity != SkewOK {
t.Errorf("severity = %v, want ok (recent samples are healthy)", r.Severity)
}
if math.Abs(r.RecentMedianSkewSec) > 5 {
t.Errorf("recentMedianSkewSec = %v, want ~-1", r.RecentMedianSkewSec)
}
// Historical median should still be retained for context.
if math.Abs(r.MedianSkewSec) < 30 {
t.Errorf("medianSkewSec = %v, expected historical median to remain large", r.MedianSkewSec)
}
}
// TestDriftRejectsCorrectionJump: 30 minutes of clean linear drift, then a
// single 60-second skew jump. The pre-jump slope should win — drift must
// not be catastrophically inflated by the correction event.
func TestDriftRejectsCorrectionJump(t *testing.T) {
pairs := []tsSkewPair{}
// 30 min of stable, ~12 sec/day drift: 1s per 7200s.
for i := 0; i < 12; i++ {
ts := int64(i) * 300
skew := float64(i) * (1.0 / 24.0) // ~0.04s per 5min step → 12 s/day
pairs = append(pairs, tsSkewPair{ts: ts, skew: skew})
}
// Wait an hour, then a single 1000-sec correction jump (clearly outlier).
pairs = append(pairs, tsSkewPair{ts: 3600 + 12*300, skew: 1000})
drift := computeDrift(pairs)
// Without rejection this would be ~ (1000-0)/(end-0) * 86400 = enormous.
if math.Abs(drift) > 100 {
t.Errorf("drift = %v, expected small (~12 s/day), correction jump should be filtered", drift)
}
}
// TestTheilSenMatchesOLSWhenClean: on clean linear data Theil-Sen should
// produce essentially the OLS answer.
func TestTheilSenMatchesOLSWhenClean(t *testing.T) {
// 1 sec drift per hour = 24 sec/day, 20 evenly-spaced samples.
pairs := []tsSkewPair{}
for i := 0; i < 20; i++ {
pairs = append(pairs, tsSkewPair{
ts: int64(i) * 600,
skew: float64(i) * (600.0 / 3600.0),
})
}
drift := computeDrift(pairs)
if math.Abs(drift-24.0) > 0.25 { // ~1%
t.Errorf("drift = %v, want ~24", drift)
}
}
// TestReporterScenario_789: reproduce the exact scenario from issue #789.
// Reporter saw mean=-52565156, median=-59063561, last=-0.8, sample count
// 1662, drift +1793549.9 s/day, severity=absurd. After the fix, severity
// must be ok (recent samples are healthy) and drift must be sane.
func TestReporterScenario_789(t *testing.T) {
ps := NewPacketStore(nil, nil)
pt := 4
baseObs := int64(1700000000)
var txs []*StoreTx
// 1657 samples with the bad ~-683-day skew (the historical poison),
// then 5 freshly corrected samples at -0.8s — totals 1662.
for i := 0; i < 1662; i++ {
obsTS := baseObs + int64(i)*60 // 1 min apart
var skew int64
if i < 1657 {
skew = -59063561 // ~ -683 days
} else {
skew = -1 // corrected (rounded; reporter saw -0.8)
}
advTS := obsTS + skew
tx := &StoreTx{
Hash: fmt.Sprintf("rep-%04d", i),
PayloadType: &pt,
DecodedJSON: `{"payload":{"timestamp":` + formatInt64(advTS) + `}}`,
Observations: []*StoreObs{
{ObserverID: "obs1", Timestamp: time.Unix(obsTS, 0).UTC().Format(time.RFC3339)},
},
}
txs = append(txs, tx)
}
ps.mu.Lock()
ps.byNode["REPNODE"] = txs
for _, tx := range txs {
ps.byPayloadType[4] = append(ps.byPayloadType[4], tx)
}
ps.clockSkew.computeInterval = 0
ps.mu.Unlock()
r := ps.GetNodeClockSkew("REPNODE")
if r == nil {
t.Fatal("nil result")
}
// Severity must reflect current health, not the all-time median.
if r.Severity != SkewOK && r.Severity != SkewWarning {
t.Errorf("severity = %v, want ok/warning (recent samples are healthy)", r.Severity)
}
if math.Abs(r.RecentMedianSkewSec) > 5 {
t.Errorf("recentMedianSkewSec = %v, want near 0", r.RecentMedianSkewSec)
}
// Drift must not be absurd. The historical jump is one event between
// the 1657th and 1658th sample; outlier rejection must contain it.
if math.Abs(r.DriftPerDaySec) > maxReasonableDriftPerDay {
t.Errorf("drift = %v, must be <= cap %v", r.DriftPerDaySec, maxReasonableDriftPerDay)
}
// And it should be close to zero (stable historical + stable corrected).
if math.Abs(r.DriftPerDaySec) > 1000 {
t.Errorf("drift = %v, expected near zero after outlier rejection", r.DriftPerDaySec)
}
// Historical median is preserved as context.
if math.Abs(r.MedianSkewSec) < 1e6 {
t.Errorf("medianSkewSec = %v, expected historical poison preserved as context", r.MedianSkewSec)
}
}
// TestBimodalClock_845: 60% good samples → bimodal_clock severity.
func TestBimodalClock_845(t *testing.T) {
ps := NewPacketStore(nil, nil)
pt := 4
baseObs := int64(1700000000)
var txs []*StoreTx
// 6 good samples (-5s each), 4 bad samples (-50000000s each) = 60% good
// Interleave so the recent window (last 5) captures both good and bad.
skews := []int64{-5, -5, -50000000, -5, -50000000, -5, -50000000, -5, -50000000, -5}
for i := 0; i < 10; i++ {
obsTS := baseObs + int64(i)*60
advTS := obsTS + skews[i]
tx := &StoreTx{
Hash: fmt.Sprintf("bimodal-%04d", i),
PayloadType: &pt,
DecodedJSON: `{"payload":{"timestamp":` + formatInt64(advTS) + `}}`,
Observations: []*StoreObs{
{ObserverID: "obs1", Timestamp: time.Unix(obsTS, 0).UTC().Format(time.RFC3339)},
},
}
txs = append(txs, tx)
}
ps.mu.Lock()
ps.byNode["BIMODAL"] = txs
for _, tx := range txs {
ps.byPayloadType[4] = append(ps.byPayloadType[4], tx)
}
ps.clockSkew.computeInterval = 0
ps.mu.Unlock()
r := ps.GetNodeClockSkew("BIMODAL")
if r == nil {
t.Fatal("nil result")
}
if r.Severity != SkewBimodalClock {
t.Errorf("severity = %v, want bimodal_clock", r.Severity)
}
if math.Abs(r.RecentMedianSkewSec-(-5)) > 1 {
t.Errorf("recentMedianSkewSec = %v, want ≈ -5 (median of good samples)", r.RecentMedianSkewSec)
}
if r.GoodFraction < 0.5 || r.GoodFraction > 0.7 {
t.Errorf("goodFraction = %v, want ~0.6", r.GoodFraction)
}
if r.RecentBadSampleCount < 1 {
t.Errorf("recentBadSampleCount = %v, want > 0", r.RecentBadSampleCount)
}
}
// TestAllBad_NoClock_845: all samples bad → no_clock.
func TestAllBad_NoClock_845(t *testing.T) {
ps := NewPacketStore(nil, nil)
pt := 4
baseObs := int64(1700000000)
var txs []*StoreTx
for i := 0; i < 10; i++ {
obsTS := baseObs + int64(i)*60
advTS := obsTS - 50000000
tx := &StoreTx{
Hash: fmt.Sprintf("allbad-%04d", i),
PayloadType: &pt,
DecodedJSON: `{"payload":{"timestamp":` + formatInt64(advTS) + `}}`,
Observations: []*StoreObs{
{ObserverID: "obs1", Timestamp: time.Unix(obsTS, 0).UTC().Format(time.RFC3339)},
},
}
txs = append(txs, tx)
}
ps.mu.Lock()
ps.byNode["ALLBAD"] = txs
for _, tx := range txs {
ps.byPayloadType[4] = append(ps.byPayloadType[4], tx)
}
ps.clockSkew.computeInterval = 0
ps.mu.Unlock()
r := ps.GetNodeClockSkew("ALLBAD")
if r == nil {
t.Fatal("nil result")
}
if r.Severity != SkewNoClock {
t.Errorf("severity = %v, want no_clock", r.Severity)
}
}
// TestMostlyGood_OK_845: 90% good 10% bad → ok (outlier filtered).
func TestMostlyGood_OK_845(t *testing.T) {
ps := NewPacketStore(nil, nil)
pt := 4
baseObs := int64(1700000000)
var txs []*StoreTx
// 9 good at -5s, 1 bad at -50000000s
for i := 0; i < 10; i++ {
obsTS := baseObs + int64(i)*60
var skew int64
if i < 9 {
skew = -5
} else {
skew = -50000000
}
advTS := obsTS + skew
tx := &StoreTx{
Hash: fmt.Sprintf("mostly-%04d", i),
PayloadType: &pt,
DecodedJSON: `{"payload":{"timestamp":` + formatInt64(advTS) + `}}`,
Observations: []*StoreObs{
{ObserverID: "obs1", Timestamp: time.Unix(obsTS, 0).UTC().Format(time.RFC3339)},
},
}
txs = append(txs, tx)
}
ps.mu.Lock()
ps.byNode["MOSTLY"] = txs
for _, tx := range txs {
ps.byPayloadType[4] = append(ps.byPayloadType[4], tx)
}
ps.clockSkew.computeInterval = 0
ps.mu.Unlock()
r := ps.GetNodeClockSkew("MOSTLY")
if r == nil {
t.Fatal("nil result")
}
// 90% good → normal classification path, median of good samples = -5s → ok
if r.Severity != SkewOK {
t.Errorf("severity = %v, want ok", r.Severity)
}
if math.Abs(r.RecentMedianSkewSec-(-5)) > 1 {
t.Errorf("recentMedianSkewSec = %v, want ≈ -5", r.RecentMedianSkewSec)
}
}
// TestSingleSample_845: one good sample → ok.
func TestSingleSample_845(t *testing.T) {
ps := NewPacketStore(nil, nil)
pt := 4
obsTS := int64(1700000000)
advTS := obsTS - 30 // 30s skew
tx := &StoreTx{
Hash: "single-0001",
PayloadType: &pt,
DecodedJSON: `{"payload":{"timestamp":` + formatInt64(advTS) + `}}`,
Observations: []*StoreObs{
{ObserverID: "obs1", Timestamp: time.Unix(obsTS, 0).UTC().Format(time.RFC3339)},
},
}
ps.mu.Lock()
ps.byNode["SINGLE"] = []*StoreTx{tx}
ps.byPayloadType[4] = append(ps.byPayloadType[4], tx)
ps.clockSkew.computeInterval = 0
ps.mu.Unlock()
r := ps.GetNodeClockSkew("SINGLE")
if r == nil {
t.Fatal("nil result")
}
if r.Severity != SkewOK {
t.Errorf("severity = %v, want ok", r.Severity)
}
if r.RecentSampleCount != 1 {
t.Errorf("recentSampleCount = %d, want 1", r.RecentSampleCount)
}
if r.GoodFraction != 1.0 {
t.Errorf("goodFraction = %v, want 1.0", r.GoodFraction)
}
}
// TestFiftyFifty_Bimodal_845: 50% good / 50% bad → bimodal_clock.
func TestFiftyFifty_Bimodal_845(t *testing.T) {
ps := NewPacketStore(nil, nil)
pt := 4
baseObs := int64(1700000000)
var txs []*StoreTx
for i := 0; i < 10; i++ {
obsTS := baseObs + int64(i)*60
var skew int64
if i%2 == 0 {
skew = -10
} else {
skew = -50000000
}
tx := &StoreTx{
Hash: fmt.Sprintf("fifty-%04d", i),
PayloadType: &pt,
DecodedJSON: `{"payload":{"timestamp":` + formatInt64(obsTS+skew) + `}}`,
Observations: []*StoreObs{
{ObserverID: "obs1", Timestamp: time.Unix(obsTS, 0).UTC().Format(time.RFC3339)},
},
}
txs = append(txs, tx)
}
ps.mu.Lock()
ps.byNode["FIFTY"] = txs
for _, tx := range txs {
ps.byPayloadType[4] = append(ps.byPayloadType[4], tx)
}
ps.clockSkew.computeInterval = 0
ps.mu.Unlock()
r := ps.GetNodeClockSkew("FIFTY")
if r == nil {
t.Fatal("nil result")
}
if r.Severity != SkewBimodalClock {
t.Errorf("severity = %v, want bimodal_clock", r.Severity)
}
if r.GoodFraction < 0.4 || r.GoodFraction > 0.6 {
t.Errorf("goodFraction = %v, want ~0.5", r.GoodFraction)
}
}
// TestAllGood_OK_845: all samples good → ok, no bimodal.
func TestAllGood_OK_845(t *testing.T) {
ps := NewPacketStore(nil, nil)
pt := 4
baseObs := int64(1700000000)
var txs []*StoreTx
for i := 0; i < 10; i++ {
obsTS := baseObs + int64(i)*60
tx := &StoreTx{
Hash: fmt.Sprintf("allgood-%04d", i),
PayloadType: &pt,
DecodedJSON: `{"payload":{"timestamp":` + formatInt64(obsTS-3) + `}}`,
Observations: []*StoreObs{
{ObserverID: "obs1", Timestamp: time.Unix(obsTS, 0).UTC().Format(time.RFC3339)},
},
}
txs = append(txs, tx)
}
ps.mu.Lock()
ps.byNode["ALLGOOD"] = txs
for _, tx := range txs {
ps.byPayloadType[4] = append(ps.byPayloadType[4], tx)
}
ps.clockSkew.computeInterval = 0
ps.mu.Unlock()
r := ps.GetNodeClockSkew("ALLGOOD")
if r == nil {
t.Fatal("nil result")
}
if r.Severity != SkewOK {
t.Errorf("severity = %v, want ok", r.Severity)
}
if r.GoodFraction != 1.0 {
t.Errorf("goodFraction = %v, want 1.0", r.GoodFraction)
}
if r.RecentBadSampleCount != 0 {
t.Errorf("recentBadSampleCount = %v, want 0", r.RecentBadSampleCount)
}
}
@@ -1,403 +0,0 @@
package main
import (
"fmt"
"sync"
"sync/atomic"
"testing"
"time"
)
// TestConcurrentIngestAndEviction exercises the race between IngestNewFromDB
// adding packets (via direct store manipulation simulating the locked section)
// and RunEviction removing packets. Without proper locking this would trigger
// the race detector and produce inconsistent index state.
func TestConcurrentIngestAndEviction(t *testing.T) {
// Seed store with 200 old packets that are eligible for eviction
startTime := time.Now().UTC().Add(-48 * time.Hour)
store := makeTestStore(200, startTime, 1)
store.retentionHours = 24 // everything older than 24h is evictable
store.loaded = true
// Track bytes for all seeded packets
for _, tx := range store.packets {
store.trackedBytes += estimateStoreTxBytes(tx)
for _, obs := range tx.Observations {
store.trackedBytes += estimateStoreObsBytes(obs)
}
}
const numIngestGoroutines = 5
const packetsPerGoroutine = 50
const numEvictionGoroutines = 3
var wg sync.WaitGroup
var ingestedCount int64
// Concurrent ingest: simulate what IngestNewFromDB does under the lock
for g := 0; g < numIngestGoroutines; g++ {
wg.Add(1)
go func(goroutineID int) {
defer wg.Done()
for i := 0; i < packetsPerGoroutine; i++ {
txID := 1000 + goroutineID*1000 + i
hash := fmt.Sprintf("new_hash_%d_%04d", goroutineID, i)
pt := 5 // GRP_TXT
ts := time.Now().UTC().Format(time.RFC3339)
tx := &StoreTx{
ID: txID,
Hash: hash,
FirstSeen: ts,
LatestSeen: ts,
PayloadType: &pt,
DecodedJSON: fmt.Sprintf(`{"pubKey":"newpk_%d_%04d"}`, goroutineID, i),
obsKeys: make(map[string]bool),
observerSet: make(map[string]bool),
}
obs := &StoreObs{
ID: txID*10 + 1,
TransmissionID: txID,
ObserverID: fmt.Sprintf("obs_g%d", goroutineID),
ObserverName: fmt.Sprintf("Observer_g%d", goroutineID),
Timestamp: ts,
}
tx.Observations = append(tx.Observations, obs)
tx.ObservationCount = 1
// Acquire write lock (same as IngestNewFromDB)
store.mu.Lock()
store.packets = append(store.packets, tx)
store.byHash[hash] = tx
store.byTxID[txID] = tx
store.byObsID[obs.ID] = obs
store.byObserver[obs.ObserverID] = append(store.byObserver[obs.ObserverID], obs)
store.byPayloadType[pt] = append(store.byPayloadType[pt], tx)
pk := fmt.Sprintf("newpk_%d_%04d", goroutineID, i)
if store.nodeHashes[pk] == nil {
store.nodeHashes[pk] = make(map[string]bool)
}
store.nodeHashes[pk][hash] = true
store.byNode[pk] = append(store.byNode[pk], tx)
store.trackedBytes += estimateStoreTxBytes(tx)
store.trackedBytes += estimateStoreObsBytes(obs)
store.totalObs++
store.mu.Unlock()
atomic.AddInt64(&ingestedCount, 1)
}
}(g)
}
// Concurrent eviction goroutines
var evictedTotal int64
for g := 0; g < numEvictionGoroutines; g++ {
wg.Add(1)
go func() {
defer wg.Done()
for i := 0; i < 10; i++ {
store.mu.Lock()
n := store.EvictStale()
store.mu.Unlock()
atomic.AddInt64(&evictedTotal, int64(n))
time.Sleep(time.Millisecond)
}
}()
}
// Concurrent readers (QueryPackets uses RLock)
for g := 0; g < 3; g++ {
wg.Add(1)
go func() {
defer wg.Done()
for i := 0; i < 20; i++ {
store.mu.RLock()
_ = len(store.packets)
_ = len(store.byHash)
store.mu.RUnlock()
time.Sleep(500 * time.Microsecond)
}
}()
}
wg.Wait()
// --- Post-state assertions ---
store.mu.RLock()
defer store.mu.RUnlock()
totalIngested := int(atomic.LoadInt64(&ingestedCount))
totalEvicted := int(atomic.LoadInt64(&evictedTotal))
if totalIngested != numIngestGoroutines*packetsPerGoroutine {
t.Fatalf("expected %d ingested, got %d", numIngestGoroutines*packetsPerGoroutine, totalIngested)
}
// Invariant: packets remaining = initial(200) + ingested - evicted
expectedRemaining := 200 + totalIngested - totalEvicted
if len(store.packets) != expectedRemaining {
t.Fatalf("packets count mismatch: got %d, expected %d (200 + %d ingested - %d evicted)",
len(store.packets), expectedRemaining, totalIngested, totalEvicted)
}
// Invariant: byHash must be consistent with packets slice
if len(store.byHash) != len(store.packets) {
t.Fatalf("byHash size %d != packets len %d", len(store.byHash), len(store.packets))
}
// Invariant: every packet in the slice must be in byHash
for _, tx := range store.packets {
if store.byHash[tx.Hash] != tx {
t.Fatalf("packet %s in slice but not in byHash (or points to different tx)", tx.Hash)
}
}
// Invariant: byTxID must map to packets in the slice
byTxIDCount := 0
for _, tx := range store.packets {
if store.byTxID[tx.ID] == tx {
byTxIDCount++
}
}
if byTxIDCount != len(store.packets) {
t.Fatalf("byTxID consistency: %d/%d packets found", byTxIDCount, len(store.packets))
}
// Invariant: trackedBytes must be non-negative
if store.trackedBytes < 0 {
t.Fatalf("trackedBytes went negative: %d", store.trackedBytes)
}
// Verify eviction actually happened (old packets were eligible)
if totalEvicted == 0 {
t.Fatal("expected some evictions to occur but got 0")
}
t.Logf("OK: ingested=%d, evicted=%d, remaining=%d, trackedBytes=%d",
totalIngested, totalEvicted, len(store.packets), store.trackedBytes)
}
// TestConcurrentIngestNewObservationsAndEviction exercises the race between
// adding new observations to existing transmissions and eviction removing those
// same transmissions. This targets the IngestNewObservations path.
func TestConcurrentIngestNewObservationsAndEviction(t *testing.T) {
// Create store with 100 packets, half old (evictable), half recent
now := time.Now().UTC()
store := makeTestStore(0, now, 1) // empty, we'll add manually
store.retentionHours = 1
// Add 50 old packets (2h ago) and 50 recent packets
for i := 0; i < 100; i++ {
var ts time.Time
if i < 50 {
ts = now.Add(-2 * time.Hour).Add(time.Duration(i) * time.Second)
} else {
ts = now.Add(-time.Duration(100-i) * time.Second)
}
hash := fmt.Sprintf("obs_hash_%04d", i)
txID := i + 1
pt := 4
tx := &StoreTx{
ID: txID,
Hash: hash,
FirstSeen: ts.UTC().Format(time.RFC3339),
LatestSeen: ts.UTC().Format(time.RFC3339),
PayloadType: &pt,
DecodedJSON: fmt.Sprintf(`{"pubKey":"pk%04d"}`, i),
obsKeys: make(map[string]bool),
observerSet: make(map[string]bool),
}
store.packets = append(store.packets, tx)
store.byHash[hash] = tx
store.byTxID[txID] = tx
store.byPayloadType[pt] = append(store.byPayloadType[pt], tx)
store.trackedBytes += estimateStoreTxBytes(tx)
}
store.loaded = true
const numObsGoroutines = 4
const obsPerGoroutine = 100
var wg sync.WaitGroup
var addedObs int64
// Goroutines adding observations to RECENT packets (index 50-99)
for g := 0; g < numObsGoroutines; g++ {
wg.Add(1)
go func(gID int) {
defer wg.Done()
for i := 0; i < obsPerGoroutine; i++ {
targetIdx := 50 + (i % 50) // only target recent packets
hash := fmt.Sprintf("obs_hash_%04d", targetIdx)
store.mu.Lock()
tx := store.byHash[hash]
if tx != nil {
obsID := 50000 + gID*10000 + i
obs := &StoreObs{
ID: obsID,
TransmissionID: tx.ID,
ObserverID: fmt.Sprintf("obs_new_%d", gID),
ObserverName: fmt.Sprintf("NewObs_%d", gID),
Timestamp: time.Now().UTC().Format(time.RFC3339),
}
dk := obs.ObserverID + "|"
if !tx.obsKeys[dk] || true { // allow duplicates for stress
tx.Observations = append(tx.Observations, obs)
tx.ObservationCount++
store.byObsID[obsID] = obs
store.byObserver[obs.ObserverID] = append(store.byObserver[obs.ObserverID], obs)
store.trackedBytes += estimateStoreObsBytes(obs)
store.totalObs++
atomic.AddInt64(&addedObs, 1)
}
}
store.mu.Unlock()
}
}(g)
}
// Concurrent eviction
var evictedTotal int64
for g := 0; g < 2; g++ {
wg.Add(1)
go func() {
defer wg.Done()
for i := 0; i < 15; i++ {
store.mu.Lock()
n := store.EvictStale()
store.mu.Unlock()
atomic.AddInt64(&evictedTotal, int64(n))
time.Sleep(500 * time.Microsecond)
}
}()
}
wg.Wait()
// --- Assertions ---
store.mu.RLock()
defer store.mu.RUnlock()
totalEvicted := int(atomic.LoadInt64(&evictedTotal))
totalAdded := int(atomic.LoadInt64(&addedObs))
// All 50 old packets should have been evicted
if totalEvicted < 50 {
t.Fatalf("expected at least 50 evictions (old packets), got %d", totalEvicted)
}
// Recent packets (50) should survive
if len(store.packets) < 50 {
t.Fatalf("expected at least 50 remaining packets (recent ones), got %d", len(store.packets))
}
// byHash consistency
for _, tx := range store.packets {
if store.byHash[tx.Hash] != tx {
t.Fatalf("byHash inconsistency for %s", tx.Hash)
}
}
// No evicted packet should remain in byHash
for i := 0; i < 50; i++ {
hash := fmt.Sprintf("obs_hash_%04d", i)
if store.byHash[hash] != nil {
t.Fatalf("evicted packet %s still in byHash", hash)
}
}
// byObsID should not reference observations from evicted packets
for obsID, obs := range store.byObsID {
if store.byTxID[obs.TransmissionID] == nil {
t.Fatalf("byObsID[%d] references evicted transmission %d", obsID, obs.TransmissionID)
}
}
// trackedBytes non-negative
if store.trackedBytes < 0 {
t.Fatalf("trackedBytes negative: %d", store.trackedBytes)
}
t.Logf("OK: evicted=%d, added_obs=%d, remaining=%d, trackedBytes=%d",
totalEvicted, totalAdded, len(store.packets), store.trackedBytes)
}
// TestConcurrentRunEvictionWithReads exercises RunEviction's two-phase locking
// against concurrent read operations (simulating QueryPackets / GetStoreStats).
// Without proper RWMutex usage, this would race on slice/map reads.
func TestConcurrentRunEvictionWithReads(t *testing.T) {
startTime := time.Now().UTC().Add(-3 * time.Hour)
store := makeTestStore(500, startTime, 1)
store.retentionHours = 1
store.loaded = true
for _, tx := range store.packets {
store.trackedBytes += estimateStoreTxBytes(tx)
for _, obs := range tx.Observations {
store.trackedBytes += estimateStoreObsBytes(obs)
}
}
var wg sync.WaitGroup
// Multiple RunEviction calls (uses its own locking)
var evicted int64
for g := 0; g < 3; g++ {
wg.Add(1)
go func() {
defer wg.Done()
n := store.RunEviction()
atomic.AddInt64(&evicted, int64(n))
}()
}
// Concurrent readers using the public read-lock pattern
var readCount int64
for g := 0; g < 5; g++ {
wg.Add(1)
go func() {
defer wg.Done()
for i := 0; i < 50; i++ {
store.mu.RLock()
count := len(store.packets)
_ = count
// Iterate a portion of byHash (simulating query)
for hash, tx := range store.byHash {
_ = hash
_ = tx.ObservationCount
break // just access one
}
store.mu.RUnlock()
atomic.AddInt64(&readCount, 1)
}
}()
}
wg.Wait()
store.mu.RLock()
defer store.mu.RUnlock()
totalEvicted := int(atomic.LoadInt64(&evicted))
// Must have evicted packets older than 1h (most of the 500 are 1-3h old)
if totalEvicted == 0 {
t.Fatal("expected evictions but got 0")
}
// Consistency: byHash == packets len
if len(store.byHash) != len(store.packets) {
t.Fatalf("byHash %d != packets %d after concurrent RunEviction+reads",
len(store.byHash), len(store.packets))
}
// All reads completed without panic
if atomic.LoadInt64(&readCount) != 250 {
t.Fatalf("not all reads completed: %d/250", atomic.LoadInt64(&readCount))
}
t.Logf("OK: evicted=%d, remaining=%d, reads=%d",
totalEvicted, len(store.packets), atomic.LoadInt64(&readCount))
}
+1 -2
View File
@@ -115,8 +115,7 @@ type NeighborGraphConfig struct {
// PacketStoreConfig controls in-memory packet store limits.
type PacketStoreConfig struct {
RetentionHours float64 `json:"retentionHours"` // max age of packets in hours (0 = unlimited)
MaxMemoryMB int `json:"maxMemoryMB"` // hard memory ceiling in MB (0 = unlimited)
MaxResolvedPubkeyIndexEntries int `json:"maxResolvedPubkeyIndexEntries"` // warning threshold for index size (0 = 5M default)
MaxMemoryMB int `json:"maxMemoryMB"` // hard memory ceiling in MB (0 = unlimited)
}
// GeoFilterConfig is an alias for the shared geofilter.Config type.
+142 -56
View File
@@ -47,7 +47,7 @@ func setupTestDBv2(t *testing.T) *DB {
id INTEGER PRIMARY KEY AUTOINCREMENT,
transmission_id INTEGER NOT NULL REFERENCES transmissions(id),
observer_id TEXT, observer_name TEXT, direction TEXT,
snr REAL, rssi REAL, score INTEGER, path_json TEXT, timestamp INTEGER NOT NULL, raw_hex TEXT
snr REAL, rssi REAL, score INTEGER, path_json TEXT, timestamp INTEGER NOT NULL
);
`
if _, err := conn.Exec(schema); err != nil {
@@ -585,15 +585,12 @@ func TestHandlePacketsMultiNodeWithStore(t *testing.T) {
func TestHandlePacketDetailNoStore(t *testing.T) {
_, router := setupNoStoreServer(t)
// With no in-memory store, handlePacketDetail now falls back to the DB
// (#827). The seeded transmissions are present in the DB, so by-hash and
// by-ID lookups succeed; only truly absent IDs return 404.
t.Run("by hash", func(t *testing.T) {
req := httptest.NewRequest("GET", "/api/packets/abc123def4567890", nil)
w := httptest.NewRecorder()
router.ServeHTTP(w, req)
if w.Code != 200 {
t.Fatalf("expected 200 (DB fallback), got %d: %s", w.Code, w.Body.String())
if w.Code != 404 {
t.Fatalf("expected 404 (no store), got %d: %s", w.Code, w.Body.String())
}
})
@@ -601,8 +598,8 @@ func TestHandlePacketDetailNoStore(t *testing.T) {
req := httptest.NewRequest("GET", "/api/packets/1", nil)
w := httptest.NewRecorder()
router.ServeHTTP(w, req)
if w.Code != 200 {
t.Fatalf("expected 200 (DB fallback), got %d: %s", w.Code, w.Body.String())
if w.Code != 404 {
t.Fatalf("expected 404 (no store), got %d: %s", w.Code, w.Body.String())
}
})
@@ -763,9 +760,9 @@ func TestGetChannelsFromStore(t *testing.T) {
func TestPrefixMapResolve(t *testing.T) {
nodes := []nodeInfo{
{Role: "repeater", PublicKey: "aabbccdd11223344", Name: "NodeA", HasGPS: true, Lat: 37.5, Lon: -122.0},
{Role: "repeater", PublicKey: "aabbccdd55667788", Name: "NodeB", HasGPS: false},
{Role: "repeater", PublicKey: "eeff0011aabbccdd", Name: "NodeC", HasGPS: true, Lat: 38.0, Lon: -121.0},
{PublicKey: "aabbccdd11223344", Name: "NodeA", HasGPS: true, Lat: 37.5, Lon: -122.0},
{PublicKey: "aabbccdd55667788", Name: "NodeB", HasGPS: false},
{PublicKey: "eeff0011aabbccdd", Name: "NodeC", HasGPS: true, Lat: 38.0, Lon: -121.0},
}
pm := buildPrefixMap(nodes)
@@ -805,8 +802,8 @@ func TestPrefixMapResolve(t *testing.T) {
t.Run("multiple candidates no GPS", func(t *testing.T) {
noGPSNodes := []nodeInfo{
{Role: "repeater", PublicKey: "aa11bb22", Name: "X", HasGPS: false},
{Role: "repeater", PublicKey: "aa11cc33", Name: "Y", HasGPS: false},
{PublicKey: "aa11bb22", Name: "X", HasGPS: false},
{PublicKey: "aa11cc33", Name: "Y", HasGPS: false},
}
pm2 := buildPrefixMap(noGPSNodes)
n := pm2.resolve("aa11")
@@ -820,8 +817,8 @@ func TestPrefixMapResolve(t *testing.T) {
func TestPrefixMapCap(t *testing.T) {
// 16-char pubkey — longer than maxPrefixLen
nodes := []nodeInfo{
{Role: "repeater", PublicKey: "aabbccdd11223344", Name: "LongKey"},
{Role: "repeater", PublicKey: "eeff0011", Name: "ShortKey"}, // exactly 8 chars
{PublicKey: "aabbccdd11223344", Name: "LongKey"},
{PublicKey: "eeff0011", Name: "ShortKey"}, // exactly 8 chars
}
pm := buildPrefixMap(nodes)
@@ -2148,6 +2145,13 @@ func setupRichTestDB(t *testing.T) *DB {
db.conn.Exec(`INSERT INTO observations (transmission_id, observer_idx, snr, rssi, path_json, timestamp)
VALUES (5, 1, 14.0, -88, '["aa"]', ?)`, recentEpoch)
// Extra packet sharing subpath "eeff,0011" with hash_with_path_02 above,
// so that subpath has count>=2 and survives singleton pruning.
db.conn.Exec(`INSERT INTO transmissions (raw_hex, hash, first_seen, route_type, payload_type, decoded_json)
VALUES ('0140eeff0011', 'hash_shared_subpath', ?, 1, 4, '{"pubKey":"eeff001199887766","name":"TestShared","type":"ADVERT"}')`, recent)
db.conn.Exec(`INSERT INTO observations (transmission_id, observer_idx, snr, rssi, path_json, timestamp)
VALUES (6, 1, 9.0, -92, '["eeff","0011"]', ?)`, recentEpoch)
return db
}
@@ -2279,14 +2283,11 @@ func TestSubpathPrecomputedIndex(t *testing.T) {
t.Fatal("expected spTotalPaths > 0 after Load()")
}
// The rich test DB has paths ["aa","bb"], ["aabb","ccdd"], and
// ["eeff","0011","2233"]. That yields 5 unique raw subpaths.
// The rich test DB has paths ["aa","bb"], ["aabb","ccdd"],
// ["eeff","0011","2233"], and ["eeff","0011"]. After singleton pruning,
// only subpaths with count>=2 survive. "eeff,0011" appears in two packets.
expectedRaw := map[string]int{
"aa,bb": 1,
"aabb,ccdd": 1,
"eeff,0011": 1,
"0011,2233": 1,
"eeff,0011,2233": 1,
"eeff,0011": 2,
}
for key, want := range expectedRaw {
got, ok := store.spIndex[key]
@@ -2296,8 +2297,16 @@ func TestSubpathPrecomputedIndex(t *testing.T) {
t.Errorf("spIndex[%q] = %d, want %d", key, got, want)
}
}
if store.spTotalPaths != 3 {
t.Errorf("spTotalPaths = %d, want 3", store.spTotalPaths)
// Singleton subpaths must have been pruned
singletons := []string{"aa,bb", "aabb,ccdd", "0011,2233", "eeff,0011,2233"}
for _, key := range singletons {
if _, ok := store.spIndex[key]; ok {
t.Errorf("expected singleton spIndex[%q] to be pruned", key)
}
}
if store.spTotalPaths != 4 {
t.Errorf("spTotalPaths = %d, want 4", store.spTotalPaths)
}
// Fast-path (no region) and slow-path (with region) must return the
@@ -2325,31 +2334,19 @@ func TestSubpathTxIndexPopulated(t *testing.T) {
store := NewPacketStore(db, nil)
store.Load()
// spTxIndex must be populated alongside spIndex
if len(store.spTxIndex) == 0 {
t.Fatal("expected spTxIndex to be populated after Load()")
// spIndex must be populated after Load()
if len(store.spIndex) == 0 {
t.Fatal("expected spIndex to be populated after Load()")
}
// Every key in spIndex must also exist in spTxIndex with matching count
for key, count := range store.spIndex {
txs, ok := store.spTxIndex[key]
if !ok {
t.Errorf("spTxIndex missing key %q that exists in spIndex", key)
continue
}
if len(txs) != count {
t.Errorf("spTxIndex[%q] has %d txs, spIndex count is %d", key, len(txs), count)
}
}
// GetSubpathDetail should return correct match count via indexed lookup
// GetSubpathDetail should return correct match count via scan fallback
detail := store.GetSubpathDetail([]string{"eeff", "0011"})
if detail == nil {
t.Fatal("expected non-nil detail for existing subpath")
}
matches, _ := detail["totalMatches"].(int)
if matches != 1 {
t.Errorf("totalMatches = %d, want 1", matches)
if matches != 2 {
t.Errorf("totalMatches = %d, want 2", matches)
}
// Non-existent subpath should return 0 matches
@@ -2397,6 +2394,55 @@ func TestSubpathDetailMixedCaseHops(t *testing.T) {
}
}
// TestSubpathSingletonDrop verifies that singleton entries are pruned from
// spIndex while count>=2 entries are preserved.
func TestSubpathSingletonDrop(t *testing.T) {
db := setupRichTestDB(t)
defer db.Close()
store := NewPacketStore(db, nil)
store.Load()
// "eeff,0011" appears in 2 packets — must survive singleton pruning
if count, ok := store.spIndex["eeff,0011"]; !ok {
t.Fatal("expected spIndex[\"eeff,0011\"] to survive singleton pruning")
} else if count != 2 {
t.Errorf("spIndex[\"eeff,0011\"] = %d, want 2", count)
}
// All count==1 entries must be gone
for key, count := range store.spIndex {
if count < 2 {
t.Errorf("spIndex[%q] = %d, singletons should have been pruned", key, count)
}
}
}
// TestSubpathEmptyDB verifies that the store loads successfully on a DB
// with no transmissions (no subpaths at all).
func TestSubpathEmptyDB(t *testing.T) {
db := setupTestDB(t)
defer db.Close()
store := NewPacketStore(db, nil)
store.Load()
if len(store.spIndex) != 0 {
t.Errorf("expected empty spIndex on empty DB, got %d entries", len(store.spIndex))
}
if store.spTotalPaths != 0 {
t.Errorf("expected spTotalPaths=0 on empty DB, got %d", store.spTotalPaths)
}
// GetSubpathDetail should still work (return zero matches)
detail := store.GetSubpathDetail([]string{"aa", "bb"})
if detail == nil {
t.Fatal("expected non-nil detail even on empty DB")
}
matches, _ := detail["totalMatches"].(int)
if matches != 0 {
t.Errorf("totalMatches on empty DB = %d, want 0", matches)
}
}
func TestStoreGetAnalyticsRFCacheHit(t *testing.T) {
db := setupRichTestDB(t)
defer db.Close()
@@ -4319,48 +4365,88 @@ func TestIndexByNodePreCheck(t *testing.T) {
})
}
// TestIndexByNodeResolvedPath tests that indexByNode only indexes decoded JSON pubkeys.
// After #800, resolved_path entries are handled via the decode-window, not indexByNode.
// TestIndexByNodeResolvedPath tests that resolved_path entries are indexed in byNode.
func TestIndexByNodeResolvedPath(t *testing.T) {
store := &PacketStore{
byNode: make(map[string][]*StoreTx),
nodeHashes: make(map[string]map[string]bool),
}
t.Run("decoded JSON pubkeys still indexed", func(t *testing.T) {
pk := "aabb1122334455ff"
t.Run("indexes resolved path pubkeys from observations", func(t *testing.T) {
relayPK := "aabb1122334455ff"
tx := &StoreTx{
Hash: "rp1",
DecodedJSON: `{"pubKey":"` + pk + `"}`,
DecodedJSON: `{"type":"CHAN","text":"hello"}`, // no pubKey fields
Observations: []*StoreObs{
{ResolvedPath: []*string{&relayPK}},
},
}
store.indexByNode(tx)
if len(store.byNode[relayPK]) != 1 {
t.Errorf("expected relay pubkey indexed, got %d", len(store.byNode[relayPK]))
}
})
t.Run("skips null entries in resolved path", func(t *testing.T) {
pk := "cc11dd22ee33ff44"
tx := &StoreTx{
Hash: "rp2",
Observations: []*StoreObs{
{ResolvedPath: []*string{nil, &pk, nil}},
},
}
store.indexByNode(tx)
if len(store.byNode[pk]) != 1 {
t.Errorf("expected decoded pubkey indexed, got %d", len(store.byNode[pk]))
t.Errorf("expected resolved pubkey indexed, got %d", len(store.byNode[pk]))
}
// Verify nil entries didn't create empty-string keys
if _, exists := store.byNode[""]; exists {
t.Error("nil/empty resolved path entries should not create byNode entries")
}
})
t.Run("resolved path pubkeys NOT indexed by indexByNode", func(t *testing.T) {
// After #800, indexByNode only handles decoded JSON fields.
// Resolved path pubkeys are handled by the decode-window.
t.Run("relay-only node appears in byNode", func(t *testing.T) {
// A packet with no decoded pubkey fields, only a relay in resolved path
relayOnly := "relay0only0pubkey"
tx := &StoreTx{
Hash: "rp2",
DecodedJSON: `{"type":"CHAN","text":"hello"}`, // no pubKey fields
Hash: "rp3",
// No DecodedJSON at all — pure relay
Observations: []*StoreObs{
{ResolvedPath: []*string{&relayOnly}},
},
}
store.indexByNode(tx)
// No new entries expected since there are no decoded pubkeys
if len(store.byNode[relayOnly]) != 1 {
t.Errorf("expected relay-only node indexed, got %d", len(store.byNode[relayOnly]))
}
})
t.Run("dedup within decoded JSON", func(t *testing.T) {
t.Run("dedup between decoded JSON and resolved path", func(t *testing.T) {
pk := "dedup0test0pk1234"
tx := &StoreTx{
Hash: "rp4",
DecodedJSON: `{"pubKey":"` + pk + `","destPubKey":"` + pk + `"}`,
DecodedJSON: `{"pubKey":"` + pk + `"}`,
Observations: []*StoreObs{
{ResolvedPath: []*string{&pk}},
},
}
store.indexByNode(tx)
if len(store.byNode[pk]) != 1 {
t.Errorf("expected dedup to keep 1 entry, got %d", len(store.byNode[pk]))
}
})
t.Run("indexes tx.ResolvedPath when observations empty", func(t *testing.T) {
rpPK := "txlevel0resolved1"
tx := &StoreTx{
Hash: "rp5",
ResolvedPath: []*string{&rpPK},
}
store.indexByNode(tx)
if len(store.byNode[rpPK]) != 1 {
t.Errorf("expected tx-level resolved path indexed, got %d", len(store.byNode[rpPK]))
}
})
}
// BenchmarkIndexByNode measures indexByNode performance with and without pubkey
-24
View File
@@ -20,7 +20,6 @@ type DB struct {
path string // filesystem path to the database file
isV3 bool // v3 schema: observer_idx in observations (vs observer_id in v2)
hasResolvedPath bool // observations table has resolved_path column
hasObsRawHex bool // observations table has raw_hex column (#881)
// Channel list cache (60s TTL) — avoids repeated GROUP BY scans (#762)
channelsCacheMu sync.Mutex
@@ -77,9 +76,6 @@ func (db *DB) detectSchema() {
if colName == "resolved_path" {
db.hasResolvedPath = true
}
if colName == "raw_hex" {
db.hasObsRawHex = true
}
}
}
}
@@ -388,7 +384,6 @@ type PacketQuery struct {
Until string
Region string
Node string
Channel string // channel_hash filter (#812). Plain names like "#test"/"public" or "enc_<HEX>" for encrypted
Order string // ASC or DESC
ExpandObservations bool // when true, include observation sub-maps in txToMap output
}
@@ -625,11 +620,6 @@ func (db *DB) buildTransmissionWhere(q PacketQuery) ([]string, []interface{}) {
where = append(where, "t.decoded_json LIKE ?")
args = append(args, "%"+pk+"%")
}
if q.Channel != "" {
// channel_hash column is indexed for payload_type = 5; filter is exact match.
where = append(where, "t.channel_hash = ?")
args = append(args, q.Channel)
}
if q.Observer != "" {
ids := strings.Split(q.Observer, ",")
placeholders := strings.Repeat("?,", len(ids))
@@ -696,20 +686,6 @@ func (db *DB) GetPacketByHash(hash string) (map[string]interface{}, error) {
return nil, nil
}
// GetObservationsForHash returns all observations for the transmission with
// the given content hash. Used as a fallback by the packet-detail handler
// when the in-memory PacketStore has pruned the entry but the DB still has it.
func (db *DB) GetObservationsForHash(hash string) []map[string]interface{} {
var txID int
err := db.conn.QueryRow("SELECT id FROM transmissions WHERE hash = ?",
strings.ToLower(hash)).Scan(&txID)
if err != nil {
return nil
}
obsByTx := db.getObservationsForTransmissions([]int{txID})
return obsByTx[txID]
}
// GetNodes returns filtered, paginated node list.
func (db *DB) GetNodes(limit, offset int, role, search, before, lastHeard, sortBy, region string) ([]map[string]interface{}, int, map[string]int, error) {
+2 -60
View File
@@ -74,8 +74,7 @@ func setupTestDB(t *testing.T) *DB {
score INTEGER,
path_json TEXT,
timestamp INTEGER NOT NULL,
resolved_path TEXT,
raw_hex TEXT
resolved_path TEXT
);
CREATE TABLE IF NOT EXISTS observer_metrics (
@@ -1135,8 +1134,7 @@ func setupTestDBV2(t *testing.T) *DB {
rssi REAL,
score INTEGER,
path_json TEXT,
timestamp INTEGER NOT NULL,
raw_hex TEXT
timestamp INTEGER NOT NULL
);
`
if _, err := conn.Exec(schema); err != nil {
@@ -1977,59 +1975,3 @@ func TestParseWindowDuration(t *testing.T) {
}
}
}
// TestPerObservationRawHexEnrich verifies enrichObs returns per-observation raw_hex
// when available, falling back to transmission raw_hex when NULL (#881).
func TestPerObservationRawHexEnrich(t *testing.T) {
db := setupTestDB(t)
defer db.Close()
// Insert observers
db.conn.Exec(`INSERT INTO observers (id, name) VALUES ('obs-a', 'Observer A')`)
db.conn.Exec(`INSERT INTO observers (id, name) VALUES ('obs-b', 'Observer B')`)
var rowA, rowB int64
db.conn.QueryRow(`SELECT rowid FROM observers WHERE id='obs-a'`).Scan(&rowA)
db.conn.QueryRow(`SELECT rowid FROM observers WHERE id='obs-b'`).Scan(&rowB)
// Insert transmission with raw_hex
txHex := "deadbeef"
db.conn.Exec(`INSERT INTO transmissions (raw_hex, hash, first_seen) VALUES (?, 'hash1', '2026-04-21T10:00:00Z')`, txHex)
// Insert two observations: A has its own raw_hex, B has NULL (historical)
obsAHex := "c0ffee01"
db.conn.Exec(`INSERT INTO observations (transmission_id, observer_idx, snr, rssi, path_json, timestamp, raw_hex)
VALUES (1, ?, -5.0, -90.0, '[]', 1745236800, ?)`, rowA, obsAHex)
db.conn.Exec(`INSERT INTO observations (transmission_id, observer_idx, snr, rssi, path_json, timestamp)
VALUES (1, ?, -3.0, -85.0, '["aabb"]', 1745236801)`, rowB)
store := NewPacketStore(db, nil)
if err := store.Load(); err != nil {
t.Fatalf("store load: %v", err)
}
tx := store.byHash["hash1"]
if tx == nil {
t.Fatal("transmission not loaded")
}
if len(tx.Observations) < 2 {
t.Fatalf("expected 2 observations, got %d", len(tx.Observations))
}
// Check enriched observations
for _, obs := range tx.Observations {
m := store.enrichObs(obs)
rh, _ := m["raw_hex"].(string)
if obs.RawHex != "" {
// Observer A: should get per-observation raw_hex
if rh != obsAHex {
t.Errorf("obs with own raw_hex: got %q, want %q", rh, obsAHex)
}
} else {
// Observer B: should fall back to transmission raw_hex
if rh != txHex {
t.Errorf("obs without raw_hex: got %q, want %q (tx fallback)", rh, txHex)
}
}
}
}
+101 -3
View File
@@ -10,7 +10,6 @@ import (
"strings"
"time"
"github.com/meshcore-analyzer/packetpath"
"github.com/meshcore-analyzer/sigvalidate"
)
@@ -165,9 +164,8 @@ func decodePath(pathByte byte, buf []byte, offset int) (Path, int) {
}, totalBytes
}
// isTransportRoute delegates to packetpath.IsTransportRoute.
func isTransportRoute(routeType int) bool {
return packetpath.IsTransportRoute(routeType)
return routeType == RouteTransportFlood || routeType == RouteTransportDirect
}
func decodeEncryptedPayload(typeName string, buf []byte) Payload {
@@ -443,6 +441,106 @@ func DecodePacket(hexString string, validateSignatures bool) (*DecodedPacket, er
}, nil
}
// HexRange represents a labeled byte range for the hex breakdown visualization.
type HexRange struct {
Start int `json:"start"`
End int `json:"end"`
Label string `json:"label"`
}
// Breakdown holds colored byte ranges returned by the packet detail endpoint.
type Breakdown struct {
Ranges []HexRange `json:"ranges"`
}
// BuildBreakdown computes labeled byte ranges for each section of a MeshCore packet.
// The returned ranges are consumed by createColoredHexDump() and buildHexLegend()
// in the frontend (public/app.js).
func BuildBreakdown(hexString string) *Breakdown {
hexString = strings.ReplaceAll(hexString, " ", "")
hexString = strings.ReplaceAll(hexString, "\n", "")
hexString = strings.ReplaceAll(hexString, "\r", "")
buf, err := hex.DecodeString(hexString)
if err != nil || len(buf) < 2 {
return &Breakdown{Ranges: []HexRange{}}
}
var ranges []HexRange
offset := 0
// Byte 0: Header
ranges = append(ranges, HexRange{Start: 0, End: 0, Label: "Header"})
offset = 1
header := decodeHeader(buf[0])
// Bytes 1-4: Transport Codes (TRANSPORT_FLOOD / TRANSPORT_DIRECT only)
if isTransportRoute(header.RouteType) {
if len(buf) < offset+4 {
return &Breakdown{Ranges: ranges}
}
ranges = append(ranges, HexRange{Start: offset, End: offset + 3, Label: "Transport Codes"})
offset += 4
}
if offset >= len(buf) {
return &Breakdown{Ranges: ranges}
}
// Next byte: Path Length (bits 7-6 = hashSize-1, bits 5-0 = hashCount)
ranges = append(ranges, HexRange{Start: offset, End: offset, Label: "Path Length"})
pathByte := buf[offset]
offset++
hashSize := int(pathByte>>6) + 1
hashCount := int(pathByte & 0x3F)
pathBytes := hashSize * hashCount
// Path hops
if hashCount > 0 && offset+pathBytes <= len(buf) {
ranges = append(ranges, HexRange{Start: offset, End: offset + pathBytes - 1, Label: "Path"})
}
offset += pathBytes
if offset >= len(buf) {
return &Breakdown{Ranges: ranges}
}
payloadStart := offset
// Payload — break ADVERT into named sub-fields; everything else is one Payload range
if header.PayloadType == PayloadADVERT && len(buf)-payloadStart >= 100 {
ranges = append(ranges, HexRange{Start: payloadStart, End: payloadStart + 31, Label: "PubKey"})
ranges = append(ranges, HexRange{Start: payloadStart + 32, End: payloadStart + 35, Label: "Timestamp"})
ranges = append(ranges, HexRange{Start: payloadStart + 36, End: payloadStart + 99, Label: "Signature"})
appStart := payloadStart + 100
if appStart < len(buf) {
ranges = append(ranges, HexRange{Start: appStart, End: appStart, Label: "Flags"})
appFlags := buf[appStart]
fOff := appStart + 1
if appFlags&0x10 != 0 && fOff+8 <= len(buf) {
ranges = append(ranges, HexRange{Start: fOff, End: fOff + 3, Label: "Latitude"})
ranges = append(ranges, HexRange{Start: fOff + 4, End: fOff + 7, Label: "Longitude"})
fOff += 8
}
if appFlags&0x20 != 0 && fOff+2 <= len(buf) {
fOff += 2
}
if appFlags&0x40 != 0 && fOff+2 <= len(buf) {
fOff += 2
}
if appFlags&0x80 != 0 && fOff < len(buf) {
ranges = append(ranges, HexRange{Start: fOff, End: len(buf) - 1, Label: "Name"})
}
}
} else {
ranges = append(ranges, HexRange{Start: payloadStart, End: len(buf) - 1, Label: "Payload"})
}
return &Breakdown{Ranges: ranges}
}
// ComputeContentHash computes the SHA-256-based content hash (first 16 hex chars).
// It hashes the payload-type nibble + payload (skipping path bytes) to produce a
// route-independent identifier for the same logical packet. For TRACE packets,
+140
View File
@@ -97,6 +97,146 @@ func TestDecodePacket_FloodHasNoCodes(t *testing.T) {
}
}
func TestBuildBreakdown_InvalidHex(t *testing.T) {
b := BuildBreakdown("not-hex!")
if len(b.Ranges) != 0 {
t.Errorf("expected empty ranges for invalid hex, got %d", len(b.Ranges))
}
}
func TestBuildBreakdown_TooShort(t *testing.T) {
b := BuildBreakdown("11") // 1 byte — no path byte
if len(b.Ranges) != 0 {
t.Errorf("expected empty ranges for too-short packet, got %d", len(b.Ranges))
}
}
func TestBuildBreakdown_FloodNonAdvert(t *testing.T) {
// Header 0x15: route=1/FLOOD, payload=5/GRP_TXT
// PathByte 0x01: 1 hop, 1-byte hash
// PathHop: AA
// Payload: FF0011
b := BuildBreakdown("1501AAFFFF00")
labels := rangeLabels(b.Ranges)
expect := []string{"Header", "Path Length", "Path", "Payload"}
if !equalLabels(labels, expect) {
t.Errorf("expected labels %v, got %v", expect, labels)
}
// Verify byte positions
assertRange(t, b.Ranges, "Header", 0, 0)
assertRange(t, b.Ranges, "Path Length", 1, 1)
assertRange(t, b.Ranges, "Path", 2, 2)
assertRange(t, b.Ranges, "Payload", 3, 5)
}
func TestBuildBreakdown_TransportFlood(t *testing.T) {
// Header 0x14: route=0/TRANSPORT_FLOOD, payload=5/GRP_TXT
// TransportCodes: AABBCCDD (4 bytes)
// PathByte 0x01: 1 hop, 1-byte hash
// PathHop: EE
// Payload: FF00
b := BuildBreakdown("14AABBCCDD01EEFF00")
assertRange(t, b.Ranges, "Header", 0, 0)
assertRange(t, b.Ranges, "Transport Codes", 1, 4)
assertRange(t, b.Ranges, "Path Length", 5, 5)
assertRange(t, b.Ranges, "Path", 6, 6)
assertRange(t, b.Ranges, "Payload", 7, 8)
}
func TestBuildBreakdown_FloodNoHops(t *testing.T) {
// Header 0x15: FLOOD/GRP_TXT; PathByte 0x00: 0 hops; Payload: AABB
b := BuildBreakdown("150000AABB")
assertRange(t, b.Ranges, "Header", 0, 0)
assertRange(t, b.Ranges, "Path Length", 1, 1)
// No Path range since hashCount=0
for _, r := range b.Ranges {
if r.Label == "Path" {
t.Error("expected no Path range for zero-hop packet")
}
}
assertRange(t, b.Ranges, "Payload", 2, 4)
}
func TestBuildBreakdown_AdvertBasic(t *testing.T) {
// Header 0x11: FLOOD/ADVERT
// PathByte 0x01: 1 hop, 1-byte hash
// PathHop: AA
// Payload: 100 bytes (PubKey32 + Timestamp4 + Signature64) + Flags=0x02 (repeater, no extras)
pubkey := repeatHex("AB", 32)
ts := "00000000" // 4 bytes
sig := repeatHex("CD", 64)
flags := "02"
hex := "1101AA" + pubkey + ts + sig + flags
b := BuildBreakdown(hex)
assertRange(t, b.Ranges, "Header", 0, 0)
assertRange(t, b.Ranges, "Path Length", 1, 1)
assertRange(t, b.Ranges, "Path", 2, 2)
assertRange(t, b.Ranges, "PubKey", 3, 34)
assertRange(t, b.Ranges, "Timestamp", 35, 38)
assertRange(t, b.Ranges, "Signature", 39, 102)
assertRange(t, b.Ranges, "Flags", 103, 103)
}
func TestBuildBreakdown_AdvertWithLocation(t *testing.T) {
// flags=0x12: hasLocation bit set
pubkey := repeatHex("00", 32)
ts := "00000000"
sig := repeatHex("00", 64)
flags := "12" // 0x10 = hasLocation
latBytes := "00000000"
lonBytes := "00000000"
hex := "1101AA" + pubkey + ts + sig + flags + latBytes + lonBytes
b := BuildBreakdown(hex)
assertRange(t, b.Ranges, "Latitude", 104, 107)
assertRange(t, b.Ranges, "Longitude", 108, 111)
}
func TestBuildBreakdown_AdvertWithName(t *testing.T) {
// flags=0x82: hasName bit set
pubkey := repeatHex("00", 32)
ts := "00000000"
sig := repeatHex("00", 64)
flags := "82" // 0x80 = hasName
name := "4E6F6465" // "Node" in hex
hex := "1101AA" + pubkey + ts + sig + flags + name
b := BuildBreakdown(hex)
assertRange(t, b.Ranges, "Name", 104, 107)
}
// helpers
func rangeLabels(ranges []HexRange) []string {
out := make([]string, len(ranges))
for i, r := range ranges {
out[i] = r.Label
}
return out
}
func equalLabels(a, b []string) bool {
if len(a) != len(b) {
return false
}
for i := range a {
if a[i] != b[i] {
return false
}
}
return true
}
func assertRange(t *testing.T, ranges []HexRange, label string, wantStart, wantEnd int) {
t.Helper()
for _, r := range ranges {
if r.Label == label {
if r.Start != wantStart || r.End != wantEnd {
t.Errorf("range %q: want [%d,%d], got [%d,%d]", label, wantStart, wantEnd, r.Start, r.End)
}
return
}
}
t.Errorf("range %q not found in %v", label, rangeLabels(ranges))
}
func TestZeroHopDirectHashSize(t *testing.T) {
// DIRECT (RouteType=2) + REQ (PayloadType=0) → header byte = 0x02
+10 -35
View File
@@ -247,11 +247,6 @@ func TestEvictStale_CleansNodeIndexes(t *testing.T) {
func TestEvictStale_CleansResolvedPathNodeIndexes(t *testing.T) {
now := time.Now().UTC()
// Create a temp DB for on-demand SQL fetch during eviction
db := setupTestDB(t)
defer db.Close()
store := &PacketStore{
packets: make([]*StoreTx, 0),
byHash: make(map[string]*StoreTx),
@@ -272,33 +267,25 @@ func TestEvictStale_CleansResolvedPathNodeIndexes(t *testing.T) {
subpathCache: make(map[string]*cachedResult),
rfCacheTTL: 15 * time.Second,
retentionHours: 24,
db: db,
useResolvedPathIndex: true,
}
store.initResolvedPathIndex()
// Create a packet indexed via resolved_path pubkeys
// Create a packet indexed only via resolved_path (no decoded JSON pubkeys)
relayPK := "relay0001abcdef"
txID := 1
obsID := 100
tx := &StoreTx{
ID: txID,
ID: 1,
Hash: "hash_rp_001",
FirstSeen: now.Add(-48 * time.Hour).UTC().Format(time.RFC3339),
}
rpPtr := &relayPK
obs := &StoreObs{
ID: obsID,
TransmissionID: txID,
ID: 100,
TransmissionID: 1,
ObserverID: "obs0",
Timestamp: tx.FirstSeen,
ResolvedPath: []*string{rpPtr},
}
tx.Observations = append(tx.Observations, obs)
// Insert into DB so on-demand SQL fetch works during eviction
db.conn.Exec("INSERT INTO transmissions (id, raw_hex, hash, first_seen) VALUES (?, '', ?, ?)",
txID, tx.Hash, tx.FirstSeen)
db.conn.Exec("INSERT INTO observations (id, transmission_id, observer_idx, path_json, timestamp, resolved_path) VALUES (?, ?, 1, ?, ?, ?)",
obsID, txID, `["aa"]`, now.Add(-48*time.Hour).Unix(), `["`+relayPK+`"]`)
tx.ResolvedPath = []*string{rpPtr}
store.packets = append(store.packets, tx)
store.byHash[tx.Hash] = tx
@@ -306,9 +293,8 @@ func TestEvictStale_CleansResolvedPathNodeIndexes(t *testing.T) {
store.byObsID[obs.ID] = obs
store.byObserver["obs0"] = append(store.byObserver["obs0"], obs)
// Index relay via decode-window simulation
store.addToByNode(tx, relayPK)
store.addToResolvedPubkeyIndex(txID, []string{relayPK})
// Index via resolved_path
store.indexByNode(tx)
// Verify indexed
if len(store.byNode[relayPK]) != 1 {
@@ -318,7 +304,7 @@ func TestEvictStale_CleansResolvedPathNodeIndexes(t *testing.T) {
t.Fatalf("expected nodeHashes[%s] to contain %s", relayPK, tx.Hash)
}
evicted := store.RunEviction()
evicted := store.EvictStale()
if evicted != 1 {
t.Fatalf("expected 1 evicted, got %d", evicted)
}
@@ -330,14 +316,6 @@ func TestEvictStale_CleansResolvedPathNodeIndexes(t *testing.T) {
if _, exists := store.nodeHashes[relayPK]; exists {
t.Fatalf("expected nodeHashes[%s] to be deleted after eviction", relayPK)
}
// Verify resolved pubkey index is cleaned up
h := resolvedPubkeyHash(relayPK)
if len(store.resolvedPubkeyIndex[h]) != 0 {
t.Fatalf("expected resolvedPubkeyIndex to be empty after eviction")
}
if _, exists := store.resolvedPubkeyReverse[txID]; exists {
t.Fatalf("expected resolvedPubkeyReverse to be empty after eviction")
}
}
func TestEvictStale_RunEvictionThreadSafe(t *testing.T) {
@@ -568,9 +546,6 @@ func TestEstimateStoreTxBytes(t *testing.T) {
manualCalc := int64(storeTxBaseBytes) + int64(len(tx.RawHex)+len(tx.Hash)+len(tx.DecodedJSON)+len(tx.PathJSON)) + int64(numIndexesPerTx*indexEntryBytes)
manualCalc += perTxMapsBytes
manualCalc += hops * perPathHopBytes
if hops > 1 {
manualCalc += (hops * (hops - 1) / 2) * perSubpathEntryBytes
}
if est != manualCalc {
t.Fatalf("estimateStoreTxBytes = %d, want %d (manual calc)", est, manualCalc)
}
-4
View File
@@ -14,10 +14,6 @@ replace github.com/meshcore-analyzer/geofilter => ../../internal/geofilter
replace github.com/meshcore-analyzer/sigvalidate => ../../internal/sigvalidate
require github.com/meshcore-analyzer/packetpath v0.0.0
replace github.com/meshcore-analyzer/packetpath => ../../internal/packetpath
require (
github.com/dustin/go-humanize v1.0.1 // indirect
github.com/google/uuid v1.6.0 // indirect
-107
View File
@@ -1,107 +0,0 @@
package main
import (
"encoding/json"
"testing"
"time"
_ "modernc.org/sqlite"
)
const issue673NodePK = "7502f19f44cad6d7b626e1d811c00a914af452636182ccded3fd019803395ec9"
// setupIssue673Store builds an in-memory store with one repeater node having:
// - one ADVERT packet (legitimately indexed in byNode)
// - one GRP_TXT packet whose decoded text contains the node's pubkey (false-positive candidate)
func setupIssue673Store(t *testing.T) (*PacketStore, *DB) {
t.Helper()
db := setupTestDB(t)
_, err := db.conn.Exec(
"INSERT INTO nodes (public_key, name, role) VALUES (?, ?, ?)",
issue673NodePK, "Quail Hollow Park", "repeater",
)
if err != nil {
t.Fatal(err)
}
ps := NewPacketStore(db, nil)
now := time.Now().UTC().Format(time.RFC3339)
pt4 := 4 // ADVERT
pt5 := 5 // GRP_TXT
advertDecoded, _ := json.Marshal(map[string]interface{}{"pubKey": issue673NodePK})
advert := &StoreTx{
ID: 1,
Hash: "advert_hash_673",
PayloadType: &pt4,
DecodedJSON: string(advertDecoded),
FirstSeen: now,
}
otherPK := "aabbccddaabbccddaabbccddaabbccddaabbccddaabbccddaabbccddaabbccdd"
chatDecoded, _ := json.Marshal(map[string]interface{}{
"srcPubKey": otherPK,
"text": "Check out node " + issue673NodePK + " on the analyzer",
})
chat := &StoreTx{
ID: 2,
Hash: "chat_hash_673",
PayloadType: &pt5,
DecodedJSON: string(chatDecoded),
FirstSeen: now,
}
ps.mu.Lock()
ps.packets = append(ps.packets, advert, chat)
ps.byHash[advert.Hash] = advert
ps.byHash[chat.Hash] = chat
ps.byTxID[advert.ID] = advert
ps.byTxID[chat.ID] = chat
ps.byNode[issue673NodePK] = []*StoreTx{advert}
ps.mu.Unlock()
return ps, db
}
// TestGetNodeAnalytics_ExcludesGRPTXTWithPubkeyInText verifies that a GRP_TXT packet
// whose message text contains a node's pubkey is not counted in that node's analytics.
func TestGetNodeAnalytics_ExcludesGRPTXTWithPubkeyInText(t *testing.T) {
ps, db := setupIssue673Store(t)
defer db.Close()
analytics, err := ps.GetNodeAnalytics(issue673NodePK, 30)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if analytics == nil {
t.Fatal("expected analytics, got nil")
}
for _, ptc := range analytics.PacketTypeBreakdown {
if ptc.PayloadType == 5 {
t.Errorf("GRP_TXT (type 5) should not appear in analytics for repeater node, got count=%d", ptc.Count)
}
}
}
// TestFilterPackets_NodeQueryDoesNotMatchChatText verifies that the slow path of
// filterPackets (node filter combined with Since) does not return a GRP_TXT packet
// whose pubkey appears only in message text, not in a structured pubkey field.
func TestFilterPackets_NodeQueryDoesNotMatchChatText(t *testing.T) {
ps, db := setupIssue673Store(t)
defer db.Close()
yesterday := time.Now().Add(-24 * time.Hour).UTC().Format(time.RFC3339)
result := ps.QueryPackets(PacketQuery{Node: issue673NodePK, Since: yesterday, Limit: 50})
if result.Total != 1 {
t.Errorf("expected 1 packet for node (ADVERT only), got %d", result.Total)
}
for _, pkt := range result.Packets {
if pkt["hash"] == "chat_hash_673" {
t.Errorf("GRP_TXT with pubkey in message text was incorrectly returned for node query")
}
}
}
-78
View File
@@ -1,78 +0,0 @@
package main
import (
"encoding/json"
"net/http/httptest"
"testing"
"time"
"github.com/gorilla/mux"
)
// TestRepro810 reproduces #810: when the longest-path observation has NULL
// resolved_path but a shorter-path observation has one, fetchResolvedPathForTxBest
// returns nil → /api/nodes/{pk}/health.recentPackets[].resolved_path is missing
// while /api/packets shows it.
func TestRepro810(t *testing.T) {
db := setupTestDB(t)
now := time.Now().UTC()
recent := now.Add(-1 * time.Hour).Format(time.RFC3339)
recentEpoch := now.Add(-1 * time.Hour).Unix()
db.conn.Exec(`INSERT INTO observers (id, name, last_seen, first_seen, packet_count) VALUES ('obs1','O1',?, '2026-01-01T00:00:00Z', 100)`, recent)
db.conn.Exec(`INSERT INTO observers (id, name, last_seen, first_seen, packet_count) VALUES ('obs2','O2',?, '2026-01-01T00:00:00Z', 100)`, recent)
db.conn.Exec(`INSERT INTO nodes (public_key, name, role, last_seen, first_seen, advert_count) VALUES ('aabbccdd11223344','R','repeater',?, '2026-01-01T00:00:00Z', 1)`, recent)
db.conn.Exec(`INSERT INTO transmissions (raw_hex, hash, first_seen, route_type, payload_type, decoded_json) VALUES ('AABB','testhash00000001',?,1,4,'{"pubKey":"aabbccdd11223344","type":"ADVERT"}')`, recent)
// Longest-path obs WITHOUT resolved_path
db.conn.Exec(`INSERT INTO observations (transmission_id, observer_idx, snr, rssi, path_json, timestamp) VALUES (1,1,12.5,-90,'["aa","bb","cc"]',?)`, recentEpoch)
// Shorter-path obs WITH resolved_path
db.conn.Exec(`INSERT INTO observations (transmission_id, observer_idx, snr, rssi, path_json, timestamp, resolved_path) VALUES (1,2,8.0,-95,'["aa","bb"]',?,'["aabbccdd11223344","eeff00112233aabb"]')`, recentEpoch-100)
cfg := &Config{Port: 3000}
hub := NewHub()
srv := NewServer(db, cfg, hub)
store := NewPacketStore(db, nil)
if err := store.Load(); err != nil {
t.Fatal(err)
}
srv.store = store
router := mux.NewRouter()
srv.RegisterRoutes(router)
// Sanity: /api/packets should show resolved_path for this tx.
reqP := httptest.NewRequest("GET", "/api/packets?limit=10", nil)
wP := httptest.NewRecorder()
router.ServeHTTP(wP, reqP)
var pktsBody map[string]interface{}
json.Unmarshal(wP.Body.Bytes(), &pktsBody)
pkts, _ := pktsBody["packets"].([]interface{})
hasOnPackets := false
for _, p := range pkts {
pm := p.(map[string]interface{})
if pm["hash"] == "testhash00000001" && pm["resolved_path"] != nil {
hasOnPackets = true
}
}
if !hasOnPackets {
t.Fatal("precondition: /api/packets must report resolved_path for tx")
}
req := httptest.NewRequest("GET", "/api/nodes/aabbccdd11223344/health", nil)
w := httptest.NewRecorder()
router.ServeHTTP(w, req)
var body map[string]interface{}
json.Unmarshal(w.Body.Bytes(), &body)
rp, _ := body["recentPackets"].([]interface{})
if len(rp) == 0 {
t.Fatal("no recentPackets")
}
for _, p := range rp {
pm := p.(map[string]interface{})
if pm["hash"] == "testhash00000001" {
if pm["resolved_path"] == nil {
t.Fatal("BUG #810: /health.recentPackets resolved_path is nil despite /api/packets reporting it")
}
return
}
}
t.Fatal("tx not found in recentPackets")
}
-132
View File
@@ -1,132 +0,0 @@
package main
import (
"os"
"strconv"
"strings"
"sync"
"time"
)
// MemorySnapshot is a point-in-time view of process memory across several
// vantage points. Values are in MB (1024*1024 bytes), rounded to one decimal.
//
// Field invariants (typical, not guaranteed under exotic conditions):
//
// processRSSMB >= goSysMB >= goHeapInuseMB >= storeDataMB
//
// - processRSSMB is what the kernel charges the process (resident set).
// Read from /proc/self/status `VmRSS:` on Linux; falls back to goSysMB
// on other platforms or when /proc is unavailable.
// - goSysMB is the total memory obtained from the OS by the Go runtime
// (heap, stacks, GC metadata, mspans, mcache, etc.). Includes
// fragmentation and unused-but-mapped span overhead.
// - goHeapInuseMB is the live, in-use Go heap (HeapInuse). Excludes
// idle spans and runtime overhead.
// - storeDataMB is the in-store packet byte estimate (transmissions +
// observations). Subset of HeapInuse. Does not include index maps,
// analytics caches, broadcast queues, or runtime overhead. Used as
// the input to the eviction watermark.
//
// processRSSMB and storeDataMB are monotonic only relative to ingest +
// eviction; both can shrink when packets age out. goHeapInuseMB and goSysMB
// fluctuate with GC.
//
// cgoBytesMB intentionally absent: this build uses the pure-Go
// modernc.org/sqlite driver, so there is no cgo allocator to measure.
// Reintroduce only if we ever switch back to mattn/go-sqlite3.
type MemorySnapshot struct {
ProcessRSSMB float64 `json:"processRSSMB"`
GoHeapInuseMB float64 `json:"goHeapInuseMB"`
GoSysMB float64 `json:"goSysMB"`
StoreDataMB float64 `json:"storeDataMB"`
}
// rssCache rate-limits the /proc/self/status read. Go memory stats are
// already cached by Server.getMemStats (5s TTL). We use a tighter 1s TTL
// here so processRSSMB stays reasonably fresh during ops debugging
// without paying the syscall cost on every /api/stats hit.
var (
rssCacheMu sync.Mutex
rssCacheValueMB float64
rssCacheCachedAt time.Time
)
const rssCacheTTL = 1 * time.Second
// getMemorySnapshot composes a MemorySnapshot using the Server's existing
// runtime.MemStats cache (5s TTL, used by /api/health and /api/perf too)
// plus a rate-limited /proc RSS read. storeDataMB is supplied by the
// caller because the packet store is the source of truth.
func (s *Server) getMemorySnapshot(storeDataMB float64) MemorySnapshot {
ms := s.getMemStats()
rssCacheMu.Lock()
if time.Since(rssCacheCachedAt) > rssCacheTTL {
rssCacheValueMB = readProcRSSMB()
rssCacheCachedAt = time.Now()
}
rssMB := rssCacheValueMB
rssCacheMu.Unlock()
if rssMB <= 0 {
// Fallback when /proc is unavailable (non-Linux, sandboxes, etc.).
// runtime.Sys is an upper bound on Go-attributable memory and a
// reasonable proxy for pure-Go builds.
rssMB = float64(ms.Sys) / 1048576.0
}
return MemorySnapshot{
ProcessRSSMB: roundMB(rssMB),
GoHeapInuseMB: roundMB(float64(ms.HeapInuse) / 1048576.0),
GoSysMB: roundMB(float64(ms.Sys) / 1048576.0),
StoreDataMB: roundMB(storeDataMB),
}
}
// readProcRSSMB parses /proc/self/status for the VmRSS line. Returns 0 on
// any failure (file missing, malformed line, parse error) — the caller
// then uses a runtime fallback. Linux only; macOS/Windows return 0.
//
// Safety notes (djb): the file path is hard-coded, no untrusted input is
// concatenated. We bound the read at 8 KiB (the whole status file is
// well under 4 KiB on modern kernels) so a corrupt /proc can't OOM us.
// We only parse digits with strconv; no shell, no exec, no format strings.
func readProcRSSMB() float64 {
const maxStatusBytes = 8 * 1024
f, err := os.Open("/proc/self/status")
if err != nil {
return 0
}
defer f.Close()
buf := make([]byte, maxStatusBytes)
n, err := f.Read(buf)
if err != nil && n == 0 {
return 0
}
for _, line := range strings.Split(string(buf[:n]), "\n") {
if !strings.HasPrefix(line, "VmRSS:") {
continue
}
// Format: "VmRSS:\t 123456 kB"
fields := strings.Fields(line[len("VmRSS:"):])
if len(fields) < 2 {
return 0
}
kb, err := strconv.ParseFloat(fields[0], 64)
if err != nil || kb < 0 {
return 0
}
// Unit is kB per kernel convention; convert to MB.
return kb / 1024.0
}
return 0
}
func roundMB(v float64) float64 {
if v < 0 {
return 0
}
return float64(int64(v*10+0.5)) / 10.0
}
+18 -18
View File
@@ -12,9 +12,9 @@ import (
func TestResolveAmbiguousEdges_GeoProximity(t *testing.T) {
// Node A at lat=45, lon=-122. Candidate B1 at lat=45.1, lon=-122.1 (close).
// Candidate B2 at lat=10, lon=10 (far away). Prefix "b0" matches both.
nodeA := nodeInfo{Role: "repeater", PublicKey: "aaaa1111", Name: "NodeA", HasGPS: true, Lat: 45.0, Lon: -122.0}
nodeB1 := nodeInfo{Role: "repeater", PublicKey: "b0b1eeee", Name: "CloseNode", HasGPS: true, Lat: 45.1, Lon: -122.1}
nodeB2 := nodeInfo{Role: "repeater", PublicKey: "b0c2ffff", Name: "FarNode", HasGPS: true, Lat: 10.0, Lon: 10.0}
nodeA := nodeInfo{PublicKey: "aaaa1111", Name: "NodeA", HasGPS: true, Lat: 45.0, Lon: -122.0}
nodeB1 := nodeInfo{PublicKey: "b0b1eeee", Name: "CloseNode", HasGPS: true, Lat: 45.1, Lon: -122.1}
nodeB2 := nodeInfo{PublicKey: "b0c2ffff", Name: "FarNode", HasGPS: true, Lat: 10.0, Lon: 10.0}
pm := buildPrefixMap([]nodeInfo{nodeA, nodeB1, nodeB2})
@@ -62,8 +62,8 @@ func TestResolveAmbiguousEdges_GeoProximity(t *testing.T) {
// Test 2: Ambiguous edge merged with existing resolved edge (count accumulation).
func TestResolveAmbiguousEdges_MergeWithExisting(t *testing.T) {
nodeA := nodeInfo{Role: "repeater", PublicKey: "aaaa1111", Name: "NodeA", HasGPS: true, Lat: 45.0, Lon: -122.0}
nodeB := nodeInfo{Role: "repeater", PublicKey: "b0b1eeee", Name: "NodeB", HasGPS: true, Lat: 45.1, Lon: -122.1}
nodeA := nodeInfo{PublicKey: "aaaa1111", Name: "NodeA", HasGPS: true, Lat: 45.0, Lon: -122.0}
nodeB := nodeInfo{PublicKey: "b0b1eeee", Name: "NodeB", HasGPS: true, Lat: 45.1, Lon: -122.1}
pm := buildPrefixMap([]nodeInfo{nodeA, nodeB})
@@ -133,9 +133,9 @@ func TestResolveAmbiguousEdges_MergeWithExisting(t *testing.T) {
// Test 3: Ambiguous edge left as-is when resolution fails.
func TestResolveAmbiguousEdges_FailsNoChange(t *testing.T) {
// Two candidates, neither has GPS, no affinity data — resolution falls through.
nodeA := nodeInfo{Role: "repeater", PublicKey: "aaaa1111", Name: "NodeA"}
nodeB1 := nodeInfo{Role: "repeater", PublicKey: "b0b1eeee", Name: "B1"}
nodeB2 := nodeInfo{Role: "repeater", PublicKey: "b0c2ffff", Name: "B2"}
nodeA := nodeInfo{PublicKey: "aaaa1111", Name: "NodeA"}
nodeB1 := nodeInfo{PublicKey: "b0b1eeee", Name: "B1"}
nodeB2 := nodeInfo{PublicKey: "b0c2ffff", Name: "B2"}
pm := buildPrefixMap([]nodeInfo{nodeA, nodeB1, nodeB2})
@@ -175,7 +175,7 @@ func TestResolveAmbiguousEdges_FailsNoChange(t *testing.T) {
// Test 3 (corrected): Resolution fails when prefix has no candidates in prefix map.
func TestResolveAmbiguousEdges_NoMatch(t *testing.T) {
nodeA := nodeInfo{Role: "repeater", PublicKey: "aaaa1111", Name: "NodeA"}
nodeA := nodeInfo{PublicKey: "aaaa1111", Name: "NodeA"}
// pm has no entries matching prefix "zz"
pm := buildPrefixMap([]nodeInfo{nodeA})
@@ -215,8 +215,8 @@ func TestResolveAmbiguousEdges_NoMatch(t *testing.T) {
// Test 6: Phase 1 edge collection unchanged (no regression).
func TestPhase1EdgeCollection_Unchanged(t *testing.T) {
// Build a simple graph and verify non-ambiguous edges are not touched.
nodeA := nodeInfo{Role: "repeater", PublicKey: "aaaa1111", Name: "NodeA", HasGPS: true, Lat: 45.0, Lon: -122.0}
nodeB := nodeInfo{Role: "repeater", PublicKey: "bbbb2222", Name: "NodeB", HasGPS: true, Lat: 45.1, Lon: -122.1}
nodeA := nodeInfo{PublicKey: "aaaa1111", Name: "NodeA", HasGPS: true, Lat: 45.0, Lon: -122.0}
nodeB := nodeInfo{PublicKey: "bbbb2222", Name: "NodeB", HasGPS: true, Lat: 45.1, Lon: -122.1}
ts := time.Now().UTC().Format(time.RFC3339)
payloadType := 4
@@ -232,7 +232,7 @@ func TestPhase1EdgeCollection_Unchanged(t *testing.T) {
Observations: obs,
}
store := ngTestStore([]nodeInfo{nodeA, nodeB, {Role: "repeater", PublicKey: "cccc3333", Name: "Observer"}}, []*StoreTx{tx})
store := ngTestStore([]nodeInfo{nodeA, nodeB, {PublicKey: "cccc3333", Name: "Observer"}}, []*StoreTx{tx})
graph := BuildFromStore(store)
edges := graph.Neighbors("aaaa1111")
@@ -255,8 +255,8 @@ func TestPhase1EdgeCollection_Unchanged(t *testing.T) {
// Test 7: Merge preserves higher LastSeen timestamp.
func TestResolveAmbiguousEdges_PreservesHigherLastSeen(t *testing.T) {
nodeA := nodeInfo{Role: "repeater", PublicKey: "aaaa1111", Name: "NodeA", HasGPS: true, Lat: 45.0, Lon: -122.0}
nodeB := nodeInfo{Role: "repeater", PublicKey: "b0b1eeee", Name: "NodeB", HasGPS: true, Lat: 45.1, Lon: -122.1}
nodeA := nodeInfo{PublicKey: "aaaa1111", Name: "NodeA", HasGPS: true, Lat: 45.0, Lon: -122.0}
nodeB := nodeInfo{PublicKey: "b0b1eeee", Name: "NodeB", HasGPS: true, Lat: 45.1, Lon: -122.1}
pm := buildPrefixMap([]nodeInfo{nodeA, nodeB})
graph := NewNeighborGraph()
@@ -307,10 +307,10 @@ func TestResolveAmbiguousEdges_PreservesHigherLastSeen(t *testing.T) {
// Test 5: Integration — node with both 1-byte and 2-byte prefix observations shows single entry.
func TestIntegration_DualPrefixSingleNeighbor(t *testing.T) {
nodeA := nodeInfo{Role: "repeater", PublicKey: "aaaa1111aaaa1111", Name: "NodeA", HasGPS: true, Lat: 45.0, Lon: -122.0}
nodeB := nodeInfo{Role: "repeater", PublicKey: "b0b1eeeeb0b1eeee", Name: "NodeB", HasGPS: true, Lat: 45.1, Lon: -122.1}
nodeB2 := nodeInfo{Role: "repeater", PublicKey: "b0c2ffffb0c2ffff", Name: "NodeB2", HasGPS: true, Lat: 10.0, Lon: 10.0}
observer := nodeInfo{Role: "repeater", PublicKey: "cccc3333cccc3333", Name: "Observer"}
nodeA := nodeInfo{PublicKey: "aaaa1111aaaa1111", Name: "NodeA", HasGPS: true, Lat: 45.0, Lon: -122.0}
nodeB := nodeInfo{PublicKey: "b0b1eeeeb0b1eeee", Name: "NodeB", HasGPS: true, Lat: 45.1, Lon: -122.1}
nodeB2 := nodeInfo{PublicKey: "b0c2ffffb0c2ffff", Name: "NodeB2", HasGPS: true, Lat: 10.0, Lon: 10.0}
observer := nodeInfo{PublicKey: "cccc3333cccc3333", Name: "Observer"}
ts := time.Now().UTC().Format(time.RFC3339)
pt := 4
+63 -63
View File
@@ -86,9 +86,9 @@ func TestBuildNeighborGraph_EmptyStore(t *testing.T) {
func TestBuildNeighborGraph_AdvertSingleHopPath(t *testing.T) {
// ADVERT from X, path=["R1_prefix"] → edges: X↔R1 and Observer↔R1
nodes := []nodeInfo{
{Role: "repeater", PublicKey: "aaaa1111", Name: "NodeX"},
{Role: "repeater", PublicKey: "r1aabbcc", Name: "R1"},
{Role: "repeater", PublicKey: "obs00001", Name: "Observer"},
{PublicKey: "aaaa1111", Name: "NodeX"},
{PublicKey: "r1aabbcc", Name: "R1"},
{PublicKey: "obs00001", Name: "Observer"},
}
tx := ngMakeTx(1, 4, ngFromNodeJSON("aaaa1111"), []*StoreObs{
ngMakeObs("obs00001", `["r1aa"]`, nowStr, ngFloatPtr(-10)),
@@ -132,10 +132,10 @@ func TestBuildNeighborGraph_AdvertSingleHopPath(t *testing.T) {
func TestBuildNeighborGraph_AdvertMultiHopPath(t *testing.T) {
// ADVERT from X, path=["R1","R2"] → X↔R1 and Observer↔R2
nodes := []nodeInfo{
{Role: "repeater", PublicKey: "aaaa1111", Name: "NodeX"},
{Role: "repeater", PublicKey: "r1aabbcc", Name: "R1"},
{Role: "repeater", PublicKey: "r2ddeeff", Name: "R2"},
{Role: "repeater", PublicKey: "obs00001", Name: "Observer"},
{PublicKey: "aaaa1111", Name: "NodeX"},
{PublicKey: "r1aabbcc", Name: "R1"},
{PublicKey: "r2ddeeff", Name: "R2"},
{PublicKey: "obs00001", Name: "Observer"},
}
tx := ngMakeTx(1, 4, ngFromNodeJSON("aaaa1111"), []*StoreObs{
ngMakeObs("obs00001", `["r1aa","r2dd"]`, nowStr, nil),
@@ -170,8 +170,8 @@ func TestBuildNeighborGraph_AdvertMultiHopPath(t *testing.T) {
func TestBuildNeighborGraph_AdvertZeroHop(t *testing.T) {
// ADVERT from X, path=[] → X↔Observer direct edge
nodes := []nodeInfo{
{Role: "repeater", PublicKey: "aaaa1111", Name: "NodeX"},
{Role: "repeater", PublicKey: "obs00001", Name: "Observer"},
{PublicKey: "aaaa1111", Name: "NodeX"},
{PublicKey: "obs00001", Name: "Observer"},
}
tx := ngMakeTx(1, 4, ngFromNodeJSON("aaaa1111"), []*StoreObs{
ngMakeObs("obs00001", `[]`, nowStr, nil),
@@ -195,8 +195,8 @@ func TestBuildNeighborGraph_AdvertZeroHop(t *testing.T) {
func TestBuildNeighborGraph_NonAdvertEmptyPath(t *testing.T) {
// Non-ADVERT, path=[] → no edges
nodes := []nodeInfo{
{Role: "repeater", PublicKey: "aaaa1111", Name: "NodeX"},
{Role: "repeater", PublicKey: "obs00001", Name: "Observer"},
{PublicKey: "aaaa1111", Name: "NodeX"},
{PublicKey: "obs00001", Name: "Observer"},
}
tx := ngMakeTx(1, 2, ngFromNodeJSON("aaaa1111"), []*StoreObs{
ngMakeObs("obs00001", `[]`, nowStr, nil),
@@ -212,10 +212,10 @@ func TestBuildNeighborGraph_NonAdvertEmptyPath(t *testing.T) {
func TestBuildNeighborGraph_NonAdvertOnlyObserverEdge(t *testing.T) {
// Non-ADVERT with path=["R1","R2"] → only Observer↔R2, NO originator edge
nodes := []nodeInfo{
{Role: "repeater", PublicKey: "aaaa1111", Name: "NodeX"},
{Role: "repeater", PublicKey: "r1aabbcc", Name: "R1"},
{Role: "repeater", PublicKey: "r2ddeeff", Name: "R2"},
{Role: "repeater", PublicKey: "obs00001", Name: "Observer"},
{PublicKey: "aaaa1111", Name: "NodeX"},
{PublicKey: "r1aabbcc", Name: "R1"},
{PublicKey: "r2ddeeff", Name: "R2"},
{PublicKey: "obs00001", Name: "Observer"},
}
tx := ngMakeTx(1, 2, ngFromNodeJSON("aaaa1111"), []*StoreObs{
ngMakeObs("obs00001", `["r1aa","r2dd"]`, nowStr, nil),
@@ -236,9 +236,9 @@ func TestBuildNeighborGraph_NonAdvertOnlyObserverEdge(t *testing.T) {
func TestBuildNeighborGraph_NonAdvertSingleHop(t *testing.T) {
// Non-ADVERT with path=["R1"] → Observer↔R1 only
nodes := []nodeInfo{
{Role: "repeater", PublicKey: "aaaa1111", Name: "NodeX"},
{Role: "repeater", PublicKey: "r1aabbcc", Name: "R1"},
{Role: "repeater", PublicKey: "obs00001", Name: "Observer"},
{PublicKey: "aaaa1111", Name: "NodeX"},
{PublicKey: "r1aabbcc", Name: "R1"},
{PublicKey: "obs00001", Name: "Observer"},
}
tx := ngMakeTx(1, 2, ngFromNodeJSON("aaaa1111"), []*StoreObs{
ngMakeObs("obs00001", `["r1aa"]`, nowStr, nil),
@@ -259,10 +259,10 @@ func TestBuildNeighborGraph_NonAdvertSingleHop(t *testing.T) {
func TestBuildNeighborGraph_HashCollision(t *testing.T) {
// Two nodes share prefix "a3" → ambiguous edge
nodes := []nodeInfo{
{Role: "repeater", PublicKey: "aaaa1111", Name: "NodeX"},
{Role: "repeater", PublicKey: "a3bb1111", Name: "CandidateA"},
{Role: "repeater", PublicKey: "a3bb2222", Name: "CandidateB"},
{Role: "repeater", PublicKey: "obs00001", Name: "Observer"},
{PublicKey: "aaaa1111", Name: "NodeX"},
{PublicKey: "a3bb1111", Name: "CandidateA"},
{PublicKey: "a3bb2222", Name: "CandidateB"},
{PublicKey: "obs00001", Name: "Observer"},
}
tx := ngMakeTx(1, 4, ngFromNodeJSON("aaaa1111"), []*StoreObs{
ngMakeObs("obs00001", `["a3bb"]`, nowStr, nil),
@@ -308,13 +308,13 @@ func TestBuildNeighborGraph_ConfidenceAutoResolve(t *testing.T) {
// CandidateB has no known neighbors (Jaccard = 0).
// An ambiguous edge X↔prefix "a3" with candidates [A, B] should auto-resolve to A.
nodes := []nodeInfo{
{Role: "repeater", PublicKey: "aaaa1111", Name: "NodeX"},
{Role: "repeater", PublicKey: "n1111111", Name: "N1"},
{Role: "repeater", PublicKey: "n2222222", Name: "N2"},
{Role: "repeater", PublicKey: "n3333333", Name: "N3"},
{Role: "repeater", PublicKey: "a3001111", Name: "CandidateA"},
{Role: "repeater", PublicKey: "a3002222", Name: "CandidateB"},
{Role: "repeater", PublicKey: "obs00001", Name: "Observer"},
{PublicKey: "aaaa1111", Name: "NodeX"},
{PublicKey: "n1111111", Name: "N1"},
{PublicKey: "n2222222", Name: "N2"},
{PublicKey: "n3333333", Name: "N3"},
{PublicKey: "a3001111", Name: "CandidateA"},
{PublicKey: "a3002222", Name: "CandidateB"},
{PublicKey: "obs00001", Name: "Observer"},
}
// Create resolved edges: X↔N1, X↔N2, X↔N3, A↔N1, A↔N2, A↔N3
@@ -373,11 +373,11 @@ func TestBuildNeighborGraph_ConfidenceAutoResolve(t *testing.T) {
func TestBuildNeighborGraph_EqualScoresAmbiguous(t *testing.T) {
// Two candidates with identical neighbor sets → should NOT auto-resolve.
nodes := []nodeInfo{
{Role: "repeater", PublicKey: "aaaa1111", Name: "NodeX"},
{Role: "repeater", PublicKey: "n1111111", Name: "N1"},
{Role: "repeater", PublicKey: "a3001111", Name: "CandidateA"},
{Role: "repeater", PublicKey: "a3002222", Name: "CandidateB"},
{Role: "repeater", PublicKey: "obs00001", Name: "Observer"},
{PublicKey: "aaaa1111", Name: "NodeX"},
{PublicKey: "n1111111", Name: "N1"},
{PublicKey: "a3001111", Name: "CandidateA"},
{PublicKey: "a3002222", Name: "CandidateB"},
{PublicKey: "obs00001", Name: "Observer"},
}
var txs []*StoreTx
@@ -425,8 +425,8 @@ func TestBuildNeighborGraph_EqualScoresAmbiguous(t *testing.T) {
func TestBuildNeighborGraph_ObserverSelfEdgeGuard(t *testing.T) {
// Observer's own prefix in path → should NOT create self-edge.
nodes := []nodeInfo{
{Role: "repeater", PublicKey: "aaaa1111", Name: "NodeX"},
{Role: "repeater", PublicKey: "obs00001", Name: "Observer"},
{PublicKey: "aaaa1111", Name: "NodeX"},
{PublicKey: "obs00001", Name: "Observer"},
}
tx := ngMakeTx(1, 4, ngFromNodeJSON("aaaa1111"), []*StoreObs{
ngMakeObs("obs00001", `["obs0"]`, nowStr, nil),
@@ -445,8 +445,8 @@ func TestBuildNeighborGraph_ObserverSelfEdgeGuard(t *testing.T) {
func TestBuildNeighborGraph_OrphanPrefix(t *testing.T) {
// Path contains prefix matching zero nodes → edge recorded as unresolved.
nodes := []nodeInfo{
{Role: "repeater", PublicKey: "aaaa1111", Name: "NodeX"},
{Role: "repeater", PublicKey: "obs00001", Name: "Observer"},
{PublicKey: "aaaa1111", Name: "NodeX"},
{PublicKey: "obs00001", Name: "Observer"},
}
tx := ngMakeTx(1, 4, ngFromNodeJSON("aaaa1111"), []*StoreObs{
ngMakeObs("obs00001", `["ff99"]`, nowStr, nil),
@@ -506,9 +506,9 @@ func TestAffinityScore_StaleAndLow(t *testing.T) {
func TestBuildNeighborGraph_CountAccumulation(t *testing.T) {
nodes := []nodeInfo{
{Role: "repeater", PublicKey: "aaaa1111", Name: "NodeX"},
{Role: "repeater", PublicKey: "r1aabbcc", Name: "R1"},
{Role: "repeater", PublicKey: "obs00001", Name: "Observer"},
{PublicKey: "aaaa1111", Name: "NodeX"},
{PublicKey: "r1aabbcc", Name: "R1"},
{PublicKey: "obs00001", Name: "Observer"},
}
var txs []*StoreTx
@@ -535,10 +535,10 @@ func TestBuildNeighborGraph_CountAccumulation(t *testing.T) {
func TestBuildNeighborGraph_MultipleObservers(t *testing.T) {
nodes := []nodeInfo{
{Role: "repeater", PublicKey: "aaaa1111", Name: "NodeX"},
{Role: "repeater", PublicKey: "r1aabbcc", Name: "R1"},
{Role: "repeater", PublicKey: "obs00001", Name: "Obs1"},
{Role: "repeater", PublicKey: "obs00002", Name: "Obs2"},
{PublicKey: "aaaa1111", Name: "NodeX"},
{PublicKey: "r1aabbcc", Name: "R1"},
{PublicKey: "obs00001", Name: "Obs1"},
{PublicKey: "obs00002", Name: "Obs2"},
}
tx := ngMakeTx(1, 4, ngFromNodeJSON("aaaa1111"), []*StoreObs{
@@ -565,9 +565,9 @@ func TestBuildNeighborGraph_MultipleObservers(t *testing.T) {
func TestBuildNeighborGraph_TimeDecayOldObservations(t *testing.T) {
nodes := []nodeInfo{
{Role: "repeater", PublicKey: "aaaa1111", Name: "NodeX"},
{Role: "repeater", PublicKey: "r1aabbcc", Name: "R1"},
{Role: "repeater", PublicKey: "obs00001", Name: "Observer"},
{PublicKey: "aaaa1111", Name: "NodeX"},
{PublicKey: "r1aabbcc", Name: "R1"},
{PublicKey: "obs00001", Name: "Observer"},
}
tx := ngMakeTx(1, 4, ngFromNodeJSON("aaaa1111"), []*StoreObs{
@@ -592,10 +592,10 @@ func TestBuildNeighborGraph_TimeDecayOldObservations(t *testing.T) {
func TestBuildNeighborGraph_ADVERTOnlyConstraint(t *testing.T) {
// Non-ADVERT: should NOT create originator↔path[0] edge, only observer↔path[last].
nodes := []nodeInfo{
{Role: "repeater", PublicKey: "aaaa1111", Name: "NodeX"},
{Role: "repeater", PublicKey: "r1aabbcc", Name: "R1"},
{Role: "repeater", PublicKey: "r2ddeeff", Name: "R2"},
{Role: "repeater", PublicKey: "obs00001", Name: "Observer"},
{PublicKey: "aaaa1111", Name: "NodeX"},
{PublicKey: "r1aabbcc", Name: "R1"},
{PublicKey: "r2ddeeff", Name: "R2"},
{PublicKey: "obs00001", Name: "Observer"},
}
tx := ngMakeTx(1, 2, ngFromNodeJSON("aaaa1111"), []*StoreObs{
ngMakeObs("obs00001", `["r1aa","r2dd"]`, nowStr, nil),
@@ -631,9 +631,9 @@ func ngPubKeyJSON(pubkey string) string {
func TestBuildNeighborGraph_AdvertPubKeyField(t *testing.T) {
// Real ADVERTs use "pubKey", not "from_node". Verify the builder handles it.
nodes := []nodeInfo{
{Role: "repeater", PublicKey: "99bf37abcdef1234567890abcdef1234567890abcdef1234567890abcdef1234", Name: "Originator"},
{Role: "repeater", PublicKey: "r1aabbccdd001122334455667788990011223344556677889900112233445566", Name: "R1"},
{Role: "repeater", PublicKey: "obs0000100112233445566778899001122334455667788990011223344556677", Name: "Observer"},
{PublicKey: "99bf37abcdef1234567890abcdef1234567890abcdef1234567890abcdef1234", Name: "Originator"},
{PublicKey: "r1aabbccdd001122334455667788990011223344556677889900112233445566", Name: "R1"},
{PublicKey: "obs0000100112233445566778899001122334455667788990011223344556677", Name: "Observer"},
}
tx := ngMakeTx(1, 4, ngPubKeyJSON("99bf37abcdef1234567890abcdef1234567890abcdef1234567890abcdef1234"), []*StoreObs{
ngMakeObs("obs0000100112233445566778899001122334455667788990011223344556677", `["r1"]`, nowStr, ngFloatPtr(-8.5)),
@@ -666,10 +666,10 @@ func TestBuildNeighborGraph_OneByteHashPrefixes(t *testing.T) {
// Real-world scenario: 1-byte hash prefixes with multiple candidates.
// Should create edges (possibly ambiguous) rather than empty graph.
nodes := []nodeInfo{
{Role: "repeater", PublicKey: "c0dedad400000000000000000000000000000000000000000000000000000001", Name: "NodeC0-1"},
{Role: "repeater", PublicKey: "c0dedad900000000000000000000000000000000000000000000000000000002", Name: "NodeC0-2"},
{Role: "repeater", PublicKey: "a3bbccdd00000000000000000000000000000000000000000000000000000003", Name: "Originator"},
{Role: "repeater", PublicKey: "obs1234500000000000000000000000000000000000000000000000000000004", Name: "Observer"},
{PublicKey: "c0dedad400000000000000000000000000000000000000000000000000000001", Name: "NodeC0-1"},
{PublicKey: "c0dedad900000000000000000000000000000000000000000000000000000002", Name: "NodeC0-2"},
{PublicKey: "a3bbccdd00000000000000000000000000000000000000000000000000000003", Name: "Originator"},
{PublicKey: "obs1234500000000000000000000000000000000000000000000000000000004", Name: "Observer"},
}
// ADVERT from Originator with 1-byte path hop "c0"
tx := ngMakeTx(1, 4, ngPubKeyJSON("a3bbccdd00000000000000000000000000000000000000000000000000000003"), []*StoreObs{
@@ -809,10 +809,10 @@ func TestExtractFromNode_UsesCachedParse(t *testing.T) {
func BenchmarkBuildFromStore(b *testing.B) {
// Simulate a dataset with many packets and repeated pubkeys
nodes := []nodeInfo{
{Role: "repeater", PublicKey: "aaaa1111", Name: "NodeA"},
{Role: "repeater", PublicKey: "bbbb2222", Name: "NodeB"},
{Role: "repeater", PublicKey: "cccc3333", Name: "NodeC"},
{Role: "repeater", PublicKey: "dddd4444", Name: "NodeD"},
{PublicKey: "aaaa1111", Name: "NodeA"},
{PublicKey: "bbbb2222", Name: "NodeB"},
{PublicKey: "cccc3333", Name: "NodeC"},
{PublicKey: "dddd4444", Name: "NodeD"},
}
const numPackets = 1000
packets := make([]*StoreTx, 0, numPackets)
+9 -52
View File
@@ -381,13 +381,7 @@ func backfillResolvedPathsAsync(store *PacketStore, dbPath string, chunkSize int
}
}
for _, obs := range tx.Observations {
// Check if this observation has been resolved: look up in the index.
// If the tx has no reverse-map entries AND path is non-empty, it needs backfill.
hasRP := false
if _, ok := store.resolvedPubkeyReverse[tx.ID]; ok {
hasRP = true
}
if !hasRP && obs.PathJSON != "" && obs.PathJSON != "[]" {
if obs.ResolvedPath == nil && obs.PathJSON != "" && obs.PathJSON != "[]" {
allPending = append(allPending, obsRef{
obsID: obs.ID,
pathJSON: obs.PathJSON,
@@ -488,61 +482,24 @@ func backfillResolvedPathsAsync(store *PacketStore, dbPath string, chunkSize int
}
}
// Update in-memory state: update resolved pubkey index, re-pick best observation,
// and invalidate LRU cache entries for backfilled observations (#800).
//
// Lock ordering: always take s.mu BEFORE lruMu. The read path
// (fetchResolvedPathForObs) takes lruMu independently of s.mu,
// so we must NOT hold s.mu while taking lruMu. Instead, collect
// obsIDs to invalidate under s.mu, release it, then take lruMu.
// Update in-memory state and re-pick best observation under a single
// write lock. The per-tx pickBestObservation is O(observations) which is
// typically <10 per tx — negligible cost vs. the race risk of splitting
// the lock (pollAndMerge can append to tx.Observations concurrently).
store.mu.Lock()
affectedSet := make(map[string]bool)
lruInvalidate := make([]int, 0, len(results))
for _, r := range results {
// Remove old index entries for this tx, then re-add with new pubkeys
if obs, ok := store.byObsID[r.obsID]; ok {
obs.ResolvedPath = r.rp
}
if !affectedSet[r.txHash] {
affectedSet[r.txHash] = true
if tx, ok := store.byHash[r.txHash]; ok {
store.removeFromResolvedPubkeyIndex(tx.ID)
pickBestObservation(tx)
}
}
// Add new resolved pubkeys to index
if tx, ok := store.byHash[r.txHash]; ok {
pks := extractResolvedPubkeys(r.rp)
store.addToResolvedPubkeyIndex(tx.ID, pks)
// Update byNode for relay nodes
for _, pk := range pks {
store.addToByNode(tx, pk)
}
// Update byPathHop resolved-key entries
hopsSeen := make(map[string]bool)
for _, hop := range txGetParsedPath(tx) {
hopsSeen[strings.ToLower(hop)] = true
}
for _, pk := range pks {
if !hopsSeen[pk] {
hopsSeen[pk] = true
store.byPathHop[pk] = append(store.byPathHop[pk], tx)
}
}
}
lruInvalidate = append(lruInvalidate, r.obsID)
}
// Re-pick best observation for affected transmissions
for txHash := range affectedSet {
if tx, ok := store.byHash[txHash]; ok {
pickBestObservation(tx)
}
}
store.mu.Unlock()
// Invalidate LRU entries AFTER releasing s.mu to maintain lock
// ordering (lruMu must never be taken while s.mu is held).
store.lruMu.Lock()
for _, obsID := range lruInvalidate {
store.lruDelete(obsID)
}
store.lruMu.Unlock()
}
totalProcessed += len(chunk)
+57 -35
View File
@@ -38,7 +38,7 @@ func createTestDBWithSchema(t *testing.T) (*DB, string) {
observer_id TEXT, observer_name TEXT, direction TEXT,
snr REAL, rssi REAL, score INTEGER,
path_json TEXT, timestamp TEXT,
resolved_path TEXT, raw_hex TEXT
resolved_path TEXT
)`)
conn.Exec(`CREATE TABLE nodes (
public_key TEXT PRIMARY KEY, name TEXT, role TEXT,
@@ -58,8 +58,8 @@ func createTestDBWithSchema(t *testing.T) (*DB, string) {
func TestResolvePathForObs(t *testing.T) {
// Build a prefix map with known nodes
nodes := []nodeInfo{
{Role: "repeater", PublicKey: "aabbccddee1234567890aabbccddee1234567890aabbccddee1234567890aabb", Name: "Node-AA"},
{Role: "repeater", PublicKey: "bbccddee1234567890aabbccddee1234567890aabbccddee1234567890aabb11", Name: "Node-BB"},
{PublicKey: "aabbccddee1234567890aabbccddee1234567890aabbccddee1234567890aabb", Name: "Node-AA"},
{PublicKey: "bbccddee1234567890aabbccddee1234567890aabbccddee1234567890aabb11", Name: "Node-BB"},
}
pm := buildPrefixMap(nodes)
graph := NewNeighborGraph()
@@ -97,7 +97,7 @@ func TestResolvePathForObs_EmptyPath(t *testing.T) {
func TestResolvePathForObs_Unresolvable(t *testing.T) {
nodes := []nodeInfo{
{Role: "repeater", PublicKey: "aabbccddee1234567890aabbccddee1234567890aabbccddee1234567890aabb", Name: "Node-AA"},
{PublicKey: "aabbccddee1234567890aabbccddee1234567890aabbccddee1234567890aabb", Name: "Node-AA"},
}
pm := buildPrefixMap(nodes)
@@ -203,14 +203,14 @@ func TestLoadNeighborEdgesFromDB(t *testing.T) {
}
func TestStoreObsResolvedPathInBroadcast(t *testing.T) {
// After #800 refactor, resolved_path is no longer stored on StoreTx/StoreObs structs.
// Broadcast maps carry resolved_path from the decode-window, not from struct fields.
// This test verifies pickBestObservation no longer sets ResolvedPath on tx.
// Verify resolved_path appears in broadcast maps
pk := "aabbccdd"
obs := &StoreObs{
ID: 1,
ObserverID: "obs1",
ObserverName: "Observer 1",
PathJSON: `["aa"]`,
ResolvedPath: []*string{&pk},
Timestamp: "2024-01-01T00:00:00Z",
}
@@ -221,26 +221,32 @@ func TestStoreObsResolvedPathInBroadcast(t *testing.T) {
}
pickBestObservation(tx)
// tx should NOT have a ResolvedPath field anymore (compile-time guard)
// Verify the best observation's fields are propagated correctly
if tx.ObserverID != "obs1" {
t.Errorf("expected ObserverID=obs1, got %s", tx.ObserverID)
if tx.ResolvedPath == nil {
t.Fatal("expected ResolvedPath to be set on tx after pickBestObservation")
}
if *tx.ResolvedPath[0] != "aabbccdd" {
t.Errorf("expected resolved path to be aabbccdd, got %s", *tx.ResolvedPath[0])
}
}
func TestResolvedPathInTxToMap(t *testing.T) {
// After #800, txToMap no longer includes resolved_path from the struct.
// resolved_path is only available via on-demand SQL fetch (txToMapWithRP).
pk := "aabbccdd"
tx := &StoreTx{
ID: 1,
Hash: "abc123",
PathJSON: `["aa"]`,
obsKeys: make(map[string]bool),
ID: 1,
Hash: "abc123",
PathJSON: `["aa"]`,
ResolvedPath: []*string{&pk},
obsKeys: make(map[string]bool),
}
m := txToMap(tx)
if _, ok := m["resolved_path"]; ok {
t.Error("resolved_path should not be in txToMap output (removed in #800)")
rp, ok := m["resolved_path"]
if !ok {
t.Fatal("resolved_path not in txToMap output")
}
rpSlice, ok := rp.([]*string)
if !ok || len(rpSlice) != 1 || *rpSlice[0] != "aabbccdd" {
t.Errorf("unexpected resolved_path: %v", rp)
}
}
@@ -264,7 +270,7 @@ func TestEnsureResolvedPathColumn(t *testing.T) {
conn, _ := sql.Open("sqlite", "file:"+dbPath+"?_journal_mode=WAL")
conn.Exec(`CREATE TABLE observations (
id INTEGER PRIMARY KEY, transmission_id INTEGER,
observer_id TEXT, path_json TEXT, timestamp TEXT, raw_hex TEXT
observer_id TEXT, path_json TEXT, timestamp TEXT
)`)
conn.Close()
@@ -359,21 +365,27 @@ func TestLoadWithResolvedPath(t *testing.T) {
t.Fatalf("expected 1 observation, got %d", len(tx.Observations))
}
// After #800, ResolvedPath is not stored on StoreObs struct.
// Instead, resolved pubkeys are in the membership index.
_ = tx.Observations[0] // obs exists
h := resolvedPubkeyHash("aabbccdd")
if len(store.resolvedPubkeyIndex[h]) != 1 {
t.Fatal("expected resolved pubkey to be indexed")
obs := tx.Observations[0]
if obs.ResolvedPath == nil {
t.Fatal("expected ResolvedPath to be loaded")
}
if len(obs.ResolvedPath) != 1 || *obs.ResolvedPath[0] != "aabbccdd" {
t.Errorf("unexpected ResolvedPath: %v", obs.ResolvedPath)
}
// Check that pickBestObservation propagated resolved_path to tx
if tx.ResolvedPath == nil || len(tx.ResolvedPath) != 1 {
t.Error("expected ResolvedPath to be propagated to tx")
}
}
func TestResolvedPathInAPIResponse(t *testing.T) {
// After #800, TransmissionResp no longer has ResolvedPath field.
// resolved_path is included dynamically in map-based API responses.
// Test that TransmissionResp properly marshals resolved_path
pk := "aabbccddee"
resp := TransmissionResp{
ID: 1,
Hash: "test",
ID: 1,
Hash: "test",
ResolvedPath: []*string{&pk, nil},
}
data, err := json.Marshal(resp)
@@ -384,9 +396,19 @@ func TestResolvedPathInAPIResponse(t *testing.T) {
var m map[string]interface{}
json.Unmarshal(data, &m)
// resolved_path should NOT be in the marshaled JSON
if _, ok := m["resolved_path"]; ok {
t.Error("resolved_path should not be in TransmissionResp JSON (#800)")
rp, ok := m["resolved_path"]
if !ok {
t.Fatal("resolved_path missing from JSON")
}
rpArr, ok := rp.([]interface{})
if !ok || len(rpArr) != 2 {
t.Fatalf("unexpected resolved_path shape: %v", rp)
}
if rpArr[0] != "aabbccddee" {
t.Errorf("first element wrong: %v", rpArr[0])
}
if rpArr[1] != nil {
t.Errorf("second element should be null: %v", rpArr[1])
}
}
@@ -437,8 +459,8 @@ func TestExtractEdgesFromObs_NonAdvertNoPath(t *testing.T) {
func TestExtractEdgesFromObs_WithPath(t *testing.T) {
nodes := []nodeInfo{
{Role: "repeater", PublicKey: "aabbccddee1234567890aabbccddee1234567890aabbccddee1234567890aabb", Name: "Node-AA"},
{Role: "repeater", PublicKey: "ffgghhii1234567890aabbccddee1234567890aabbccddee1234567890aabb11", Name: "Node-FF"},
{PublicKey: "aabbccddee1234567890aabbccddee1234567890aabbccddee1234567890aabb", Name: "Node-AA"},
{PublicKey: "ffgghhii1234567890aabbccddee1234567890aabbccddee1234567890aabb11", Name: "Node-FF"},
}
pm := buildPrefixMap(nodes)
-212
View File
@@ -1,212 +0,0 @@
package main
import (
"encoding/json"
"testing"
)
func TestCanAppearInPath(t *testing.T) {
cases := []struct {
role string
want bool
}{
{"repeater", true},
{"Repeater", true},
{"REPEATER", true},
{"room_server", true},
{"Room_Server", true},
{"room", true},
{"companion", false},
{"sensor", false},
{"", false},
{"unknown", false},
}
for _, tc := range cases {
if got := canAppearInPath(tc.role); got != tc.want {
t.Errorf("canAppearInPath(%q) = %v, want %v", tc.role, got, tc.want)
}
}
}
func TestBuildPrefixMap_ExcludesCompanions(t *testing.T) {
nodes := []nodeInfo{
{PublicKey: "7a1234abcdef", Role: "companion", Name: "MyCompanion"},
}
pm := buildPrefixMap(nodes)
if len(pm.m) != 0 {
t.Fatalf("expected empty prefix map, got %d entries", len(pm.m))
}
}
func TestBuildPrefixMap_ExcludesSensors(t *testing.T) {
nodes := []nodeInfo{
{PublicKey: "7a1234abcdef", Role: "sensor", Name: "MySensor"},
}
pm := buildPrefixMap(nodes)
if len(pm.m) != 0 {
t.Fatalf("expected empty prefix map, got %d entries", len(pm.m))
}
}
func TestResolveWithContext_NilWhenOnlyCompanionMatchesPrefix(t *testing.T) {
nodes := []nodeInfo{
{PublicKey: "7a1234abcdef", Role: "companion", Name: "MyCompanion"},
}
pm := buildPrefixMap(nodes)
r, _, _ := pm.resolveWithContext("7a", nil, nil)
if r != nil {
t.Fatalf("expected nil, got %+v", r)
}
}
func TestResolveWithContext_NilWhenOnlySensorMatchesPrefix(t *testing.T) {
nodes := []nodeInfo{
{PublicKey: "7a1234abcdef", Role: "sensor", Name: "MySensor"},
}
pm := buildPrefixMap(nodes)
r, _, _ := pm.resolveWithContext("7a", nil, nil)
if r != nil {
t.Fatalf("expected nil for sensor-only prefix, got %+v", r)
}
}
func TestResolveWithContext_PrefersRepeaterOverCompanionAtSamePrefix(t *testing.T) {
nodes := []nodeInfo{
{PublicKey: "7a1234abcdef", Role: "companion", Name: "MyCompanion"},
{PublicKey: "7a5678901234", Role: "repeater", Name: "MyRepeater"},
}
pm := buildPrefixMap(nodes)
r, _, _ := pm.resolveWithContext("7a", nil, nil)
if r == nil {
t.Fatal("expected non-nil result")
}
if r.Name != "MyRepeater" {
t.Fatalf("expected MyRepeater, got %s", r.Name)
}
}
func TestResolveWithContext_PrefersRoomServerOverCompanionAtSamePrefix(t *testing.T) {
nodes := []nodeInfo{
{PublicKey: "ab1234abcdef", Role: "companion", Name: "MyCompanion"},
{PublicKey: "ab5678901234", Role: "room_server", Name: "MyRoom"},
}
pm := buildPrefixMap(nodes)
r, _, _ := pm.resolveWithContext("ab", nil, nil)
if r == nil {
t.Fatal("expected non-nil result")
}
if r.Name != "MyRoom" {
t.Fatalf("expected MyRoom, got %s", r.Name)
}
}
func TestResolve_NilWhenOnlyCompanionMatchesPrefix(t *testing.T) {
nodes := []nodeInfo{
{PublicKey: "7a1234abcdef", Role: "companion", Name: "MyCompanion"},
}
pm := buildPrefixMap(nodes)
r := pm.resolve("7a")
if r != nil {
t.Fatalf("expected nil from resolve() for companion-only prefix, got %+v", r)
}
}
func TestResolve_NilWhenOnlySensorMatchesPrefix(t *testing.T) {
nodes := []nodeInfo{
{PublicKey: "7a1234abcdef", Role: "sensor", Name: "MySensor"},
}
pm := buildPrefixMap(nodes)
r := pm.resolve("7a")
if r != nil {
t.Fatalf("expected nil from resolve() for sensor-only prefix, got %+v", r)
}
}
func TestResolveWithContext_PicksRepeaterEvenWhenCompanionHasGPS(t *testing.T) {
// Adversarial: companion has GPS, repeater doesn't. Role filter should
// exclude companion entirely, so repeater wins despite lacking GPS.
nodes := []nodeInfo{
{PublicKey: "7a1234abcdef", Role: "companion", Name: "GPSCompanion", Lat: 37.0, Lon: -122.0, HasGPS: true},
{PublicKey: "7a5678901234", Role: "repeater", Name: "NoGPSRepeater", Lat: 0, Lon: 0, HasGPS: false},
}
pm := buildPrefixMap(nodes)
r, _, _ := pm.resolveWithContext("7a", nil, nil)
if r == nil {
t.Fatal("expected non-nil result")
}
if r.Name != "NoGPSRepeater" {
t.Fatalf("expected NoGPSRepeater (role filter excludes companion), got %s", r.Name)
}
}
func TestComputeDistancesForTx_CompanionNeverInResolvedChain(t *testing.T) {
// Integration test: a path with a prefix matching both a companion and a
// repeater. The resolveHop function (using buildPrefixMap) should only
// return the repeater.
nodes := []nodeInfo{
{PublicKey: "7a1234abcdef", Role: "companion", Name: "BadCompanion", Lat: 37.0, Lon: -122.0, HasGPS: true},
{PublicKey: "7a5678901234", Role: "repeater", Name: "GoodRepeater", Lat: 38.0, Lon: -123.0, HasGPS: true},
{PublicKey: "bb1111111111", Role: "repeater", Name: "OtherRepeater", Lat: 39.0, Lon: -124.0, HasGPS: true},
}
pm := buildPrefixMap(nodes)
nodeByPk := make(map[string]*nodeInfo)
for i := range nodes {
nodeByPk[nodes[i].PublicKey] = &nodes[i]
}
repeaterSet := map[string]bool{
"7a5678901234": true,
"bb1111111111": true,
}
// Build a synthetic StoreTx with a path ["7a", "bb"] and a sender with GPS
senderPK := "cc0000000000"
sender := nodeInfo{PublicKey: senderPK, Role: "repeater", Name: "Sender", Lat: 36.0, Lon: -121.0, HasGPS: true}
nodeByPk[senderPK] = &sender
pathJSON, _ := json.Marshal([]string{"7a", "bb"})
decoded, _ := json.Marshal(map[string]interface{}{"pubKey": senderPK})
tx := &StoreTx{
PathJSON: string(pathJSON),
DecodedJSON: string(decoded),
FirstSeen: "2026-04-30T12:00",
}
resolveHop := func(hop string) *nodeInfo {
return pm.resolve(hop)
}
hops, pathRec := computeDistancesForTx(tx, nodeByPk, repeaterSet, resolveHop)
// Verify BadCompanion's pubkey never appears in hops
badPK := "7a1234abcdef"
for i, h := range hops {
if h.FromPk == badPK || h.ToPk == badPK {
t.Fatalf("hop[%d] contains BadCompanion pubkey: from=%s to=%s", i, h.FromPk, h.ToPk)
}
}
// Verify BadCompanion's pubkey never appears in pathRec
if pathRec == nil {
t.Fatal("expected non-nil path record (3 GPS nodes in chain)")
}
for i, hop := range pathRec.Hops {
if hop.FromPk == badPK || hop.ToPk == badPK {
t.Fatalf("pathRec.Hops[%d] contains BadCompanion pubkey: from=%s to=%s", i, hop.FromPk, hop.ToPk)
}
}
// Verify GoodRepeater IS in the chain (proves the prefix was resolved to the right node)
goodPK := "7a5678901234"
foundGood := false
for _, hop := range pathRec.Hops {
if hop.FromPk == goodPK || hop.ToPk == goodPK {
foundGood = true
break
}
}
if !foundGood {
t.Fatal("expected GoodRepeater (7a5678901234) in pathRec.Hops but not found")
}
}
+29 -29
View File
@@ -11,7 +11,7 @@ import (
func TestResolveWithContext_UniquePrefix(t *testing.T) {
pm := buildPrefixMap([]nodeInfo{
{Role: "repeater", PublicKey: "a1b2c3d4", Name: "Node-A", HasGPS: true, Lat: 1, Lon: 2},
{PublicKey: "a1b2c3d4", Name: "Node-A", HasGPS: true, Lat: 1, Lon: 2},
})
ni, confidence, _ := pm.resolveWithContext("a1b2c3d4", nil, nil)
if ni == nil || ni.Name != "Node-A" {
@@ -24,7 +24,7 @@ func TestResolveWithContext_UniquePrefix(t *testing.T) {
func TestResolveWithContext_NoMatch(t *testing.T) {
pm := buildPrefixMap([]nodeInfo{
{Role: "repeater", PublicKey: "a1b2c3d4", Name: "Node-A"},
{PublicKey: "a1b2c3d4", Name: "Node-A"},
})
ni, confidence, _ := pm.resolveWithContext("ff", nil, nil)
if ni != nil {
@@ -37,8 +37,8 @@ func TestResolveWithContext_NoMatch(t *testing.T) {
func TestResolveWithContext_AffinityWins(t *testing.T) {
pm := buildPrefixMap([]nodeInfo{
{Role: "repeater", PublicKey: "a1aaaaaa", Name: "Node-A1"},
{Role: "repeater", PublicKey: "a1bbbbbb", Name: "Node-A2"},
{PublicKey: "a1aaaaaa", Name: "Node-A1"},
{PublicKey: "a1bbbbbb", Name: "Node-A2"},
})
graph := NewNeighborGraph()
@@ -60,9 +60,9 @@ func TestResolveWithContext_AffinityWins(t *testing.T) {
func TestResolveWithContext_AffinityTooClose_FallsToGeo(t *testing.T) {
pm := buildPrefixMap([]nodeInfo{
{Role: "repeater", PublicKey: "a1aaaaaa", Name: "Node-A1", HasGPS: true, Lat: 10, Lon: 20},
{Role: "repeater", PublicKey: "a1bbbbbb", Name: "Node-A2", HasGPS: true, Lat: 11, Lon: 21},
{Role: "repeater", PublicKey: "c0c0c0c0", Name: "Ctx", HasGPS: true, Lat: 10.1, Lon: 20.1},
{PublicKey: "a1aaaaaa", Name: "Node-A1", HasGPS: true, Lat: 10, Lon: 20},
{PublicKey: "a1bbbbbb", Name: "Node-A2", HasGPS: true, Lat: 11, Lon: 21},
{PublicKey: "c0c0c0c0", Name: "Ctx", HasGPS: true, Lat: 10.1, Lon: 20.1},
})
graph := NewNeighborGraph()
@@ -85,8 +85,8 @@ func TestResolveWithContext_AffinityTooClose_FallsToGeo(t *testing.T) {
func TestResolveWithContext_GPSPreference(t *testing.T) {
pm := buildPrefixMap([]nodeInfo{
{Role: "repeater", PublicKey: "a1aaaaaa", Name: "NoGPS"},
{Role: "repeater", PublicKey: "a1bbbbbb", Name: "HasGPS", HasGPS: true, Lat: 1, Lon: 2},
{PublicKey: "a1aaaaaa", Name: "NoGPS"},
{PublicKey: "a1bbbbbb", Name: "HasGPS", HasGPS: true, Lat: 1, Lon: 2},
})
ni, confidence, _ := pm.resolveWithContext("a1", nil, nil)
@@ -100,8 +100,8 @@ func TestResolveWithContext_GPSPreference(t *testing.T) {
func TestResolveWithContext_FirstMatchFallback(t *testing.T) {
pm := buildPrefixMap([]nodeInfo{
{Role: "repeater", PublicKey: "a1aaaaaa", Name: "First"},
{Role: "repeater", PublicKey: "a1bbbbbb", Name: "Second"},
{PublicKey: "a1aaaaaa", Name: "First"},
{PublicKey: "a1bbbbbb", Name: "Second"},
})
ni, confidence, _ := pm.resolveWithContext("a1", nil, nil)
@@ -115,8 +115,8 @@ func TestResolveWithContext_FirstMatchFallback(t *testing.T) {
func TestResolveWithContext_NilGraphFallsToGPS(t *testing.T) {
pm := buildPrefixMap([]nodeInfo{
{Role: "repeater", PublicKey: "a1aaaaaa", Name: "NoGPS"},
{Role: "repeater", PublicKey: "a1bbbbbb", Name: "HasGPS", HasGPS: true, Lat: 1, Lon: 2},
{PublicKey: "a1aaaaaa", Name: "NoGPS"},
{PublicKey: "a1bbbbbb", Name: "HasGPS", HasGPS: true, Lat: 1, Lon: 2},
})
ni, confidence, _ := pm.resolveWithContext("a1", []string{"someone"}, nil)
@@ -131,8 +131,8 @@ func TestResolveWithContext_NilGraphFallsToGPS(t *testing.T) {
func TestResolveWithContext_BackwardCompatResolve(t *testing.T) {
// Verify original resolve() still works unchanged
pm := buildPrefixMap([]nodeInfo{
{Role: "repeater", PublicKey: "a1aaaaaa", Name: "NoGPS"},
{Role: "repeater", PublicKey: "a1bbbbbb", Name: "HasGPS", HasGPS: true, Lat: 1, Lon: 2},
{PublicKey: "a1aaaaaa", Name: "NoGPS"},
{PublicKey: "a1bbbbbb", Name: "HasGPS", HasGPS: true, Lat: 1, Lon: 2},
})
ni := pm.resolve("a1")
if ni == nil || ni.Name != "HasGPS" {
@@ -164,8 +164,8 @@ func TestResolveHopsAPI_UniquePrefix(t *testing.T) {
_ = srv
// Insert a unique node
srv.db.conn.Exec("INSERT OR IGNORE INTO nodes (public_key, name, lat, lon, role) VALUES (?, ?, ?, ?, ?)",
"ff11223344", "UniqueNode", 37.0, -122.0, "repeater")
srv.db.conn.Exec("INSERT OR IGNORE INTO nodes (public_key, name, lat, lon) VALUES (?, ?, ?, ?)",
"ff11223344", "UniqueNode", 37.0, -122.0)
srv.store.InvalidateNodeCache()
req := httptest.NewRequest("GET", "/api/resolve-hops?hops=ff11223344", nil)
@@ -189,10 +189,10 @@ func TestResolveHopsAPI_UniquePrefix(t *testing.T) {
func TestResolveHopsAPI_AmbiguousNoContext(t *testing.T) {
srv, router := setupTestServer(t)
srv.db.conn.Exec("INSERT OR IGNORE INTO nodes (public_key, name, lat, lon, role) VALUES (?, ?, ?, ?, ?)",
"ee1aaaaaaa", "Node-E1", 37.0, -122.0, "repeater")
srv.db.conn.Exec("INSERT OR IGNORE INTO nodes (public_key, name, lat, lon, role) VALUES (?, ?, ?, ?, ?)",
"ee1bbbbbbb", "Node-E2", 38.0, -121.0, "repeater")
srv.db.conn.Exec("INSERT OR IGNORE INTO nodes (public_key, name, lat, lon) VALUES (?, ?, ?, ?)",
"ee1aaaaaaa", "Node-E1", 37.0, -122.0)
srv.db.conn.Exec("INSERT OR IGNORE INTO nodes (public_key, name, lat, lon) VALUES (?, ?, ?, ?)",
"ee1bbbbbbb", "Node-E2", 38.0, -121.0)
srv.store.InvalidateNodeCache()
req := httptest.NewRequest("GET", "/api/resolve-hops?hops=ee1", nil)
@@ -224,12 +224,12 @@ func TestResolveHopsAPI_AmbiguousNoContext(t *testing.T) {
func TestResolveHopsAPI_WithAffinityContext(t *testing.T) {
srv, router := setupTestServer(t)
srv.db.conn.Exec("INSERT OR IGNORE INTO nodes (public_key, name, lat, lon, role) VALUES (?, ?, ?, ?, ?)",
"dd1aaaaaaa", "Node-D1", 37.0, -122.0, "repeater")
srv.db.conn.Exec("INSERT OR IGNORE INTO nodes (public_key, name, lat, lon, role) VALUES (?, ?, ?, ?, ?)",
"dd1bbbbbbb", "Node-D2", 38.0, -121.0, "repeater")
srv.db.conn.Exec("INSERT OR IGNORE INTO nodes (public_key, name, lat, lon, role) VALUES (?, ?, ?, ?, ?)",
"c0c0c0c0c0", "Context", 37.1, -122.1, "repeater")
srv.db.conn.Exec("INSERT OR IGNORE INTO nodes (public_key, name, lat, lon) VALUES (?, ?, ?, ?)",
"dd1aaaaaaa", "Node-D1", 37.0, -122.0)
srv.db.conn.Exec("INSERT OR IGNORE INTO nodes (public_key, name, lat, lon) VALUES (?, ?, ?, ?)",
"dd1bbbbbbb", "Node-D2", 38.0, -121.0)
srv.db.conn.Exec("INSERT OR IGNORE INTO nodes (public_key, name, lat, lon) VALUES (?, ?, ?, ?)",
"c0c0c0c0c0", "Context", 37.1, -122.1)
// Invalidate node cache so the PM includes newly inserted nodes.
srv.store.cacheMu.Lock()
@@ -279,8 +279,8 @@ func TestResolveHopsAPI_WithAffinityContext(t *testing.T) {
func TestResolveHopsAPI_ResponseShape(t *testing.T) {
srv, router := setupTestServer(t)
srv.db.conn.Exec("INSERT OR IGNORE INTO nodes (public_key, name, lat, lon, role) VALUES (?, ?, ?, ?, ?)",
"bb1aaaaaaa", "Node-B1", 37.0, -122.0, "repeater")
srv.db.conn.Exec("INSERT OR IGNORE INTO nodes (public_key, name, lat, lon) VALUES (?, ?, ?, ?)",
"bb1aaaaaaa", "Node-B1", 37.0, -122.0)
req := httptest.NewRequest("GET", "/api/resolve-hops?hops=bb1a", nil)
rr := httptest.NewRecorder()
-475
View File
@@ -1,475 +0,0 @@
package main
// Lock ordering contract (MUST be followed everywhere):
//
// s.mu → s.lruMu (s.mu is the outer lock, lruMu is the inner lock)
//
// • Never acquire s.lruMu while holding s.mu.
// • fetchResolvedPathForObs takes lruMu independently — callers under s.mu
// must NOT call it directly; instead collect IDs under s.mu, release, then
// do LRU ops under lruMu separately.
// • The backfill path (backfillResolvedPathsAsync) follows this by collecting
// obsIDs to invalidate under s.mu, releasing it, then taking lruMu.
import (
"database/sql"
"hash/fnv"
"log"
"strings"
)
// resolvedPubkeyHash computes a fast 64-bit hash for membership index keying.
// Uses FNV-1a from stdlib — good distribution, no external dependency.
func resolvedPubkeyHash(pk string) uint64 {
h := fnv.New64a()
h.Write([]byte(strings.ToLower(pk)))
return h.Sum64()
}
// addToResolvedPubkeyIndex adds a txID under each resolved pubkey hash.
// Deduplicates both within a single call AND across calls — won't add the
// same (hash, txID) pair twice even when called multiple times for the same tx.
// Must be called under s.mu write lock.
func (s *PacketStore) addToResolvedPubkeyIndex(txID int, resolvedPubkeys []string) {
if !s.useResolvedPathIndex {
return
}
seen := make(map[uint64]bool, len(resolvedPubkeys))
for _, pk := range resolvedPubkeys {
if pk == "" {
continue
}
h := resolvedPubkeyHash(pk)
if seen[h] {
continue
}
seen[h] = true
// Cross-call dedup: check if (h, txID) already exists in forward index.
existing := s.resolvedPubkeyIndex[h]
alreadyPresent := false
for _, id := range existing {
if id == txID {
alreadyPresent = true
break
}
}
if alreadyPresent {
continue
}
s.resolvedPubkeyIndex[h] = append(existing, txID)
s.resolvedPubkeyReverse[txID] = append(s.resolvedPubkeyReverse[txID], h)
}
}
// removeFromResolvedPubkeyIndex removes all index entries for a txID using the reverse map.
// Must be called under s.mu write lock.
func (s *PacketStore) removeFromResolvedPubkeyIndex(txID int) {
if !s.useResolvedPathIndex {
return
}
hashes := s.resolvedPubkeyReverse[txID]
for _, h := range hashes {
list := s.resolvedPubkeyIndex[h]
// Remove ALL occurrences of txID (not just the first) to prevent orphans.
filtered := list[:0]
for _, id := range list {
if id != txID {
filtered = append(filtered, id)
}
}
if len(filtered) == 0 {
delete(s.resolvedPubkeyIndex, h)
} else {
s.resolvedPubkeyIndex[h] = filtered
}
}
delete(s.resolvedPubkeyReverse, txID)
}
// extractResolvedPubkeys extracts all non-nil, non-empty pubkeys from a resolved path.
func extractResolvedPubkeys(rp []*string) []string {
if len(rp) == 0 {
return nil
}
result := make([]string, 0, len(rp))
for _, p := range rp {
if p != nil && *p != "" {
result = append(result, *p)
}
}
return result
}
// mergeResolvedPubkeys collects unique non-empty pubkeys from multiple resolved paths.
func mergeResolvedPubkeys(paths ...[]*string) []string {
seen := make(map[string]bool)
var result []string
for _, rp := range paths {
for _, p := range rp {
if p != nil && *p != "" && !seen[*p] {
seen[*p] = true
result = append(result, *p)
}
}
}
return result
}
// nodeInResolvedPathViaIndex checks whether a transmission is associated with
// a target pubkey using the membership index + collision-safety SQL check.
// Must be called under s.mu RLock at minimum.
func (s *PacketStore) nodeInResolvedPathViaIndex(tx *StoreTx, targetPK string) bool {
if !s.useResolvedPathIndex {
// Flag off: can't disambiguate, keep candidate (conservative)
return true
}
// If this tx has no indexed pubkeys at all, we can't disambiguate —
// keep the candidate (same as old behavior for NULL resolved_path).
if _, hasReverse := s.resolvedPubkeyReverse[tx.ID]; !hasReverse {
return true
}
h := resolvedPubkeyHash(targetPK)
txIDs := s.resolvedPubkeyIndex[h]
// Check if this tx's ID is in the candidate list
for _, id := range txIDs {
if id == tx.ID {
// Found in index. Collision-safety: verify with SQL.
if s.db != nil && s.db.conn != nil {
return s.confirmResolvedPathContains(tx.ID, targetPK)
}
return true // no DB, trust the index
}
}
return false
}
// confirmResolvedPathContains verifies an exact pubkey match in resolved_path
// via SQL. This is the collision-safety fallback for the membership index.
func (s *PacketStore) confirmResolvedPathContains(txID int, pubkey string) bool {
if s.db == nil || s.db.conn == nil {
return true
}
// Use INSTR with surrounding quotes for exact match — avoids LIKE escape issues.
// resolved_path format: ["pubkey1","pubkey2",...]
needle := `"` + strings.ToLower(pubkey) + `"`
var count int
err := s.db.conn.QueryRow(
`SELECT COUNT(*) FROM observations WHERE transmission_id = ? AND INSTR(LOWER(resolved_path), ?) > 0`,
txID, needle,
).Scan(&count)
if err != nil {
return true // on error, keep the candidate
}
return count > 0
}
// fetchResolvedPathsForTx fetches resolved_path from SQLite for all observations
// of a transmission. Used for on-demand API responses and eviction cleanup.
func (s *PacketStore) fetchResolvedPathsForTx(txID int) map[int][]*string {
if s.db == nil || s.db.conn == nil {
return nil
}
rows, err := s.db.conn.Query(
`SELECT id, resolved_path FROM observations WHERE transmission_id = ? AND resolved_path IS NOT NULL`,
txID,
)
if err != nil {
return nil
}
defer rows.Close()
result := make(map[int][]*string)
for rows.Next() {
var obsID int
var rpJSON sql.NullString
if err := rows.Scan(&obsID, &rpJSON); err != nil {
continue
}
if rpJSON.Valid && rpJSON.String != "" {
result[obsID] = unmarshalResolvedPath(rpJSON.String)
}
}
return result
}
// fetchResolvedPathForObs fetches resolved_path for a single observation,
// using the LRU cache.
func (s *PacketStore) fetchResolvedPathForObs(obsID int) []*string {
if s.db == nil || s.db.conn == nil {
return nil
}
// Check LRU cache first
s.lruMu.RLock()
if s.apiResolvedPathLRU != nil {
if entry, ok := s.apiResolvedPathLRU[obsID]; ok {
s.lruMu.RUnlock()
return entry
}
}
s.lruMu.RUnlock()
var rpJSON sql.NullString
err := s.db.conn.QueryRow(
`SELECT resolved_path FROM observations WHERE id = ?`, obsID,
).Scan(&rpJSON)
if err != nil || !rpJSON.Valid {
return nil
}
rp := unmarshalResolvedPath(rpJSON.String)
// Store in LRU
s.lruMu.Lock()
s.lruPut(obsID, rp)
s.lruMu.Unlock()
return rp
}
// fetchResolvedPathForTxBest returns the best observation's resolved_path for a tx.
//
// "Best" = the longest path_json among observations that actually have a stored
// resolved_path. Earlier versions picked the longest-path obs unconditionally
// and queried SQL for that single ID — if the longest-path obs had NULL
// resolved_path while a shorter sibling had one, the call returned nil and
// callers (e.g. /api/nodes/{pk}/health.recentPackets) lost the field. Fixes
// #810 by checking all observations and falling back to the longest sibling
// that has a stored path.
func (s *PacketStore) fetchResolvedPathForTxBest(tx *StoreTx) []*string {
if tx == nil || len(tx.Observations) == 0 {
return nil
}
// Fast path: try the longest-path obs first via the LRU/SQL helper.
longest := tx.Observations[0]
longestLen := pathLen(longest.PathJSON)
for _, obs := range tx.Observations[1:] {
if l := pathLen(obs.PathJSON); l > longestLen {
longest = obs
longestLen = l
}
}
if rp := s.fetchResolvedPathForObs(longest.ID); rp != nil {
return rp
}
// Fallback: longest-path obs has no stored resolved_path. Query all
// observations for this tx and pick the one with the longest path_json
// that actually has a stored resolved_path.
rpMap := s.fetchResolvedPathsForTx(tx.ID)
if len(rpMap) == 0 {
return nil
}
var bestRP []*string
bestObsID := 0
bestLen := -1
for _, obs := range tx.Observations {
rp, ok := rpMap[obs.ID]
if !ok || rp == nil {
continue
}
if l := pathLen(obs.PathJSON); l > bestLen {
bestLen = l
bestRP = rp
bestObsID = obs.ID
}
}
// Populate LRU so repeat lookups for this tx don't re-issue the multi-row
// SQL fallback (e.g. dashboard polling /api/nodes/{pk}/health).
if bestRP != nil && bestObsID != 0 {
s.lruMu.Lock()
s.lruPut(bestObsID, bestRP)
s.lruMu.Unlock()
}
return bestRP
}
// --- Simple LRU cache for resolved paths ---
const lruMaxSize = 10000
// lruPut adds an entry. Must be called under s.lruMu write lock.
func (s *PacketStore) lruPut(obsID int, rp []*string) {
if s.apiResolvedPathLRU == nil {
return
}
if _, exists := s.apiResolvedPathLRU[obsID]; exists {
return
}
// Compact lruOrder if stale entries exceed 50% of capacity.
// This prevents effective capacity degradation after bulk deletions.
if len(s.lruOrder) >= lruMaxSize && len(s.apiResolvedPathLRU) < lruMaxSize/2 {
compacted := make([]int, 0, len(s.apiResolvedPathLRU))
for _, id := range s.lruOrder {
if _, ok := s.apiResolvedPathLRU[id]; ok {
compacted = append(compacted, id)
}
}
s.lruOrder = compacted
}
if len(s.lruOrder) >= lruMaxSize {
// Evict oldest, skipping stale entries
for len(s.lruOrder) > 0 {
evictID := s.lruOrder[0]
s.lruOrder = s.lruOrder[1:]
if _, ok := s.apiResolvedPathLRU[evictID]; ok {
delete(s.apiResolvedPathLRU, evictID)
break
}
// stale entry — skip and continue
}
}
s.apiResolvedPathLRU[obsID] = rp
s.lruOrder = append(s.lruOrder, obsID)
}
// lruDelete removes an entry. Must be called under s.lruMu write lock.
func (s *PacketStore) lruDelete(obsID int) {
if s.apiResolvedPathLRU == nil {
return
}
delete(s.apiResolvedPathLRU, obsID)
// Don't scan lruOrder — eviction handles stale entries naturally.
}
// resolvedPubkeysForEvictionBatch fetches resolved pubkeys for multiple txIDs
// from SQL in a single batched query. Returns a map from txID to unique pubkeys.
// MUST be called WITHOUT holding s.mu — this is the whole point of the batch approach.
// Chunks queries to stay under SQLite's 500-parameter limit.
func (s *PacketStore) resolvedPubkeysForEvictionBatch(txIDs []int) map[int][]string {
result := make(map[int][]string, len(txIDs))
if len(txIDs) == 0 || s.db == nil || s.db.conn == nil {
return result
}
const chunkSize = 499 // SQLite SQLITE_MAX_VARIABLE_NUMBER default is 999; stay well under
for start := 0; start < len(txIDs); start += chunkSize {
end := start + chunkSize
if end > len(txIDs) {
end = len(txIDs)
}
chunk := txIDs[start:end]
// Build query with placeholders
placeholders := make([]byte, 0, len(chunk)*2)
args := make([]interface{}, len(chunk))
for i, id := range chunk {
if i > 0 {
placeholders = append(placeholders, ',')
}
placeholders = append(placeholders, '?')
args[i] = id
}
query := "SELECT transmission_id, resolved_path FROM observations WHERE transmission_id IN (" +
string(placeholders) + ") AND resolved_path IS NOT NULL"
rows, err := s.db.conn.Query(query, args...)
if err != nil {
continue
}
for rows.Next() {
var txID int
var rpJSON sql.NullString
if err := rows.Scan(&txID, &rpJSON); err != nil {
continue
}
if !rpJSON.Valid || rpJSON.String == "" {
continue
}
rp := unmarshalResolvedPath(rpJSON.String)
for _, p := range rp {
if p != nil && *p != "" {
result[txID] = append(result[txID], *p)
}
}
}
rows.Close()
}
// Deduplicate per-txID
for txID, pks := range result {
seen := make(map[string]bool, len(pks))
deduped := pks[:0]
for _, pk := range pks {
if !seen[pk] {
seen[pk] = true
deduped = append(deduped, pk)
}
}
result[txID] = deduped
}
return result
}
// initResolvedPathIndex initializes the resolved path index data structures.
func (s *PacketStore) initResolvedPathIndex() {
s.resolvedPubkeyIndex = make(map[uint64][]int, 4096)
s.resolvedPubkeyReverse = make(map[int][]uint64, 4096)
s.apiResolvedPathLRU = make(map[int][]*string, lruMaxSize)
s.lruOrder = make([]int, 0, lruMaxSize)
}
// CompactResolvedPubkeyIndex reclaims memory from the resolved pubkey index maps
// after eviction. It removes empty forward-index entries (shouldn't exist if
// removeFromResolvedPubkeyIndex is correct, but defense in depth) and clips
// oversized slice backing arrays where cap > 2*len.
// Must be called under s.mu write lock.
func (s *PacketStore) CompactResolvedPubkeyIndex() {
if !s.useResolvedPathIndex {
return
}
for h, ids := range s.resolvedPubkeyIndex {
if len(ids) == 0 {
delete(s.resolvedPubkeyIndex, h)
continue
}
// Clip oversized backing arrays: if cap > 2*len, reallocate.
if cap(ids) > 2*len(ids)+8 {
clipped := make([]int, len(ids))
copy(clipped, ids)
s.resolvedPubkeyIndex[h] = clipped
}
}
for txID, hashes := range s.resolvedPubkeyReverse {
if len(hashes) == 0 {
delete(s.resolvedPubkeyReverse, txID)
continue
}
if cap(hashes) > 2*len(hashes)+8 {
clipped := make([]uint64, len(hashes))
copy(clipped, hashes)
s.resolvedPubkeyReverse[txID] = clipped
}
}
}
// defaultMaxResolvedPubkeyIndexEntries is the default hard cap for the forward
// index. When exceeded, a warning is logged. No auto-eviction — that's the
// eviction ticker's job.
const defaultMaxResolvedPubkeyIndexEntries = 5_000_000
// CheckResolvedPubkeyIndexSize logs a warning if the resolved pubkey forward
// index exceeds the configured maximum entries. Must be called under s.mu
// read lock at minimum.
func (s *PacketStore) CheckResolvedPubkeyIndexSize() {
if !s.useResolvedPathIndex {
return
}
maxEntries := s.maxResolvedPubkeyIndexEntries
if maxEntries <= 0 {
maxEntries = defaultMaxResolvedPubkeyIndexEntries
}
fwdLen := len(s.resolvedPubkeyIndex)
revLen := len(s.resolvedPubkeyReverse)
if fwdLen > maxEntries || revLen > maxEntries {
log.Printf("[store] WARNING: resolvedPubkeyIndex size exceeds limit — forward=%d reverse=%d limit=%d",
fwdLen, revLen, maxEntries)
}
}
File diff suppressed because it is too large Load Diff
+15 -99
View File
@@ -16,7 +16,6 @@ import (
"time"
"github.com/gorilla/mux"
"github.com/meshcore-analyzer/packetpath"
)
// Server holds shared state for route handlers.
@@ -570,16 +569,6 @@ func (s *Server) handleStats(w http.ResponseWriter, r *http.Request) {
backfillProgress = 1
}
// Memory accounting (#832). storeDataMB is the in-store packet byte
// estimate (the old "trackedMB"); processRSSMB / goHeapInuseMB / goSysMB
// give ops the breakdown needed to reason about real RSS. All values
// share a single 1s-cached snapshot to amortize ReadMemStats cost.
var storeDataMB float64
if s.store != nil {
storeDataMB = s.store.trackedMemoryMB()
}
mem := s.getMemorySnapshot(storeDataMB)
resp := &StatsResponse{
TotalPackets: stats.TotalPackets,
TotalTransmissions: &stats.TotalTransmissions,
@@ -603,12 +592,6 @@ func (s *Server) handleStats(w http.ResponseWriter, r *http.Request) {
BackfillProgress: backfillProgress,
SignatureDrops: s.db.GetSignatureDropCount(),
HashMigrationComplete: s.store != nil && s.store.hashMigrationComplete.Load(),
TrackedMB: mem.StoreDataMB, // deprecated alias
StoreDataMB: mem.StoreDataMB,
ProcessRSSMB: mem.ProcessRSSMB,
GoHeapInuseMB: mem.GoHeapInuseMB,
GoSysMB: mem.GoSysMB,
}
s.statsMu.Lock()
@@ -791,7 +774,6 @@ func (s *Server) handlePackets(w http.ResponseWriter, r *http.Request) {
Until: r.URL.Query().Get("until"),
Region: r.URL.Query().Get("region"),
Node: r.URL.Query().Get("node"),
Channel: r.URL.Query().Get("channel"),
Order: "DESC",
ExpandObservations: r.URL.Query().Get("expand") == "observations",
}
@@ -894,11 +876,9 @@ func (s *Server) handleBatchObservations(w http.ResponseWriter, r *http.Request)
func (s *Server) handlePacketDetail(w http.ResponseWriter, r *http.Request) {
param := mux.Vars(r)["id"]
var packet map[string]interface{}
fromDB := false
isHash := hashPattern.MatchString(strings.ToLower(param))
if s.store != nil {
if isHash {
if hashPattern.MatchString(strings.ToLower(param)) {
packet = s.store.GetPacketByHash(param)
}
if packet == nil {
@@ -911,25 +891,6 @@ func (s *Server) handlePacketDetail(w http.ResponseWriter, r *http.Request) {
}
}
}
// DB fallback: in-memory PacketStore prunes old entries, but the SQLite
// DB retains them and is the source for /api/nodes recentAdverts. Without
// this fallback, links from node-detail pages 404 once the packet ages out.
if packet == nil && s.db != nil {
if isHash {
if dbPkt, err := s.db.GetPacketByHash(param); err == nil && dbPkt != nil {
packet = dbPkt
fromDB = true
}
}
if packet == nil {
if id, parseErr := strconv.Atoi(param); parseErr == nil {
if dbPkt, err := s.db.GetTransmissionByID(id); err == nil && dbPkt != nil {
packet = dbPkt
fromDB = true
}
}
}
}
if packet == nil {
writeError(w, 404, "Not found")
return
@@ -940,9 +901,6 @@ func (s *Server) handlePacketDetail(w http.ResponseWriter, r *http.Request) {
if s.store != nil {
observations = s.store.GetObservationsForHash(hash)
}
if len(observations) == 0 && fromDB && s.db != nil && hash != "" {
observations = s.db.GetObservationsForHash(hash)
}
observationCount := len(observations)
if observationCount == 0 {
observationCount = 1
@@ -958,9 +916,11 @@ func (s *Server) handlePacketDetail(w http.ResponseWriter, r *http.Request) {
pathHops = []interface{}{}
}
rawHex, _ := packet["raw_hex"].(string)
writeJSON(w, PacketDetailResponse{
Packet: packet,
Path: pathHops,
Breakdown: BuildBreakdown(rawHex),
ObservationCount: observationCount,
Observations: mapSliceToObservations(observations),
})
@@ -1019,17 +979,8 @@ func (s *Server) handlePostPacket(w http.ResponseWriter, r *http.Request) {
contentHash := ComputeContentHash(hexStr)
pathJSON := "[]"
// For TRACE packets, path_json must be the payload-decoded route hops
// (decoded.Path.Hops), NOT the raw_hex header bytes which are SNR values.
// For all other packet types, derive path from raw_hex (#886).
if !packetpath.PathBytesAreHops(byte(decoded.Header.PayloadType)) {
if len(decoded.Path.Hops) > 0 {
if pj, e := json.Marshal(decoded.Path.Hops); e == nil {
pathJSON = string(pj)
}
}
} else if hops, err := packetpath.DecodePathFromRawHex(hexStr); err == nil && len(hops) > 0 {
if pj, e := json.Marshal(hops); e == nil {
if len(decoded.Path.Hops) > 0 {
if pj, e := json.Marshal(decoded.Path.Hops); e == nil {
pathJSON = string(pj)
}
}
@@ -1282,52 +1233,14 @@ func (s *Server) handleNodePaths(w http.ResponseWriter, r *http.Request) {
// Post-filter: verify target node actually appears in each candidate's resolved_path.
// The byPathHop index uses short prefixes which can collide (e.g. "c0" matches multiple nodes).
// We lean on resolved_path (from neighbor affinity graph) to disambiguate.
//
// Collect candidate IDs and index membership under the read lock, then release
// the lock before running SQL queries (confirmResolvedPathContains does disk I/O).
type candidateCheck struct {
tx *StoreTx
hasReverse bool
inIndex bool
}
checks := make([]candidateCheck, len(candidates))
for i, tx := range candidates {
cc := candidateCheck{tx: tx}
if !s.store.useResolvedPathIndex {
cc.inIndex = true // flag off — keep all
} else if _, hasRev := s.store.resolvedPubkeyReverse[tx.ID]; !hasRev {
cc.inIndex = true // no indexed pubkeys — keep (conservative)
} else {
h := resolvedPubkeyHash(lowerPK)
for _, id := range s.store.resolvedPubkeyIndex[h] {
if id == tx.ID {
cc.hasReverse = true // needs SQL confirmation
break
}
}
// If not in index at all, it's a definite no
filtered := candidates[:0] // reuse backing array
for _, tx := range candidates {
if nodeInResolvedPath(tx, lowerPK) {
filtered = append(filtered, tx)
}
checks[i] = cc
}
s.store.mu.RUnlock()
// Now run SQL checks outside the lock for candidates that need confirmation.
filtered := candidates[:0]
for _, cc := range checks {
if cc.inIndex {
filtered = append(filtered, cc.tx)
} else if cc.hasReverse {
if s.store.confirmResolvedPathContains(cc.tx.ID, lowerPK) {
filtered = append(filtered, cc.tx)
}
}
// else: not in index → exclude
}
candidates = filtered
// Re-acquire read lock for the aggregation phase that reads store data.
s.store.mu.RLock()
type pathAgg struct {
Hops []PathHopResp
Count int
@@ -2374,6 +2287,9 @@ func mapSliceToTransmissions(maps []map[string]interface{}) []TransmissionResp {
tx.PathJSON = m["path_json"]
tx.Direction = m["direction"]
tx.Score = m["score"]
if rp, ok := m["resolved_path"].([]*string); ok {
tx.ResolvedPath = rp
}
result = append(result, tx)
}
return result
@@ -2394,10 +2310,10 @@ func mapSliceToObservations(maps []map[string]interface{}) []ObservationResp {
obs.SNR = m["snr"]
obs.RSSI = m["rssi"]
obs.PathJSON = m["path_json"]
obs.ResolvedPath = m["resolved_path"]
obs.Direction = m["direction"]
obs.RawHex = m["raw_hex"]
obs.Timestamp = m["timestamp"]
if rp, ok := m["resolved_path"].([]*string); ok {
obs.ResolvedPath = rp
}
result = append(result, obs)
}
return result
+41 -205
View File
@@ -3681,55 +3681,67 @@ func TestNodePathsPrefixCollisionFilter(t *testing.T) {
func TestNodeInResolvedPath(t *testing.T) {
target := "aabbccdd11223344"
// After #800, nodeInResolvedPath is replaced by nodeInResolvedPathViaIndex
// which uses the membership index. Test the index-based approach.
store := &PacketStore{
byNode: make(map[string][]*StoreTx),
nodeHashes: make(map[string]map[string]bool),
useResolvedPathIndex: true,
}
store.initResolvedPathIndex()
// Case 1: tx indexed with target pubkey
tx1 := &StoreTx{ID: 1}
store.addToResolvedPubkeyIndex(1, []string{target})
if !store.nodeInResolvedPathViaIndex(tx1, target) {
t.Error("should match when index contains target")
// Case 1: tx.ResolvedPath contains target
pk := "aabbccdd11223344"
tx1 := &StoreTx{ResolvedPath: []*string{&pk}}
if !nodeInResolvedPath(tx1, target) {
t.Error("should match when ResolvedPath contains target")
}
// Case 2: tx indexed with different pubkey
tx2 := &StoreTx{ID: 2}
store.addToResolvedPubkeyIndex(2, []string{"aacafe0000000000"})
if store.nodeInResolvedPathViaIndex(tx2, target) {
t.Error("should not match when index contains different node")
// Case 2: tx.ResolvedPath contains different node
other := "aacafe0000000000"
tx2 := &StoreTx{ResolvedPath: []*string{&other}}
if nodeInResolvedPath(tx2, target) {
t.Error("should not match when ResolvedPath contains different node")
}
// Case 3: tx not in index at all — should match (no data to disambiguate)
tx3 := &StoreTx{ID: 3}
if !store.nodeInResolvedPathViaIndex(tx3, target) {
t.Error("should match when tx has no index entries (no data to disambiguate)")
// Case 3: nil ResolvedPath — should match (no data to disambiguate, keep it)
tx3 := &StoreTx{}
if !nodeInResolvedPath(tx3, target) {
t.Error("should match when ResolvedPath is nil (no data to disambiguate)")
}
// Case 4: ResolvedPath with nil elements only — has data but no match
tx4 := &StoreTx{ResolvedPath: []*string{nil, nil}}
if nodeInResolvedPath(tx4, target) {
t.Error("should not match when all ResolvedPath elements are nil")
}
// Case 5: target in observation but not in tx.ResolvedPath
tx5 := &StoreTx{
ResolvedPath: []*string{&other},
Observations: []*StoreObs{
{ResolvedPath: []*string{&pk}},
},
}
if !nodeInResolvedPath(tx5, target) {
t.Error("should match when observation's ResolvedPath contains target")
}
}
func TestPathHopIndexIncrementalUpdate(t *testing.T) {
// After #800, addTxToPathHopIndex only indexes raw hops (not resolved pubkeys).
// Resolved pubkeys are handled by the resolved pubkey membership index.
// Test that addTxToPathHopIndex and removeTxFromPathHopIndex work correctly
idx := make(map[string][]*StoreTx)
pk1 := "fullpubkey1"
tx1 := &StoreTx{
ID: 1,
PathJSON: `["ab","cd"]`,
ResolvedPath: []*string{&pk1, nil},
}
addTxToPathHopIndex(idx, tx1)
// Should be indexed under "ab" and "cd" only (no resolved pubkey)
// Should be indexed under "ab", "cd", and "fullpubkey1"
if len(idx["ab"]) != 1 {
t.Errorf("expected 1 entry for 'ab', got %d", len(idx["ab"]))
}
if len(idx["cd"]) != 1 {
t.Errorf("expected 1 entry for 'cd', got %d", len(idx["cd"]))
}
if len(idx["fullpubkey1"]) != 1 {
t.Errorf("expected 1 entry for resolved pubkey, got %d", len(idx["fullpubkey1"]))
}
// Add another tx with overlapping hop
tx2 := &StoreTx{
@@ -3754,6 +3766,9 @@ func TestPathHopIndexIncrementalUpdate(t *testing.T) {
if _, ok := idx["cd"]; ok {
t.Error("expected 'cd' key to be deleted after removal")
}
if _, ok := idx["fullpubkey1"]; ok {
t.Error("expected resolved pubkey key to be deleted after removal")
}
}
func TestMetricsAPIEndpoints(t *testing.T) {
@@ -3793,182 +3808,3 @@ func TestMetricsAPIEndpoints(t *testing.T) {
t.Errorf("expected 1 observer in summary, got %v", resp2["observers"])
}
}
// TestNodeHealth_RecentPackets_ResolvedPath verifies that recentPackets in the
// node health endpoint include resolved_path (regression for Codex review item #2).
func TestNodeHealth_RecentPackets_ResolvedPath(t *testing.T) {
_, router := setupTestServer(t)
req := httptest.NewRequest("GET", "/api/nodes/aabbccdd11223344/health", nil)
w := httptest.NewRecorder()
router.ServeHTTP(w, req)
if w.Code != 200 {
t.Fatalf("expected 200, got %d (body: %s)", w.Code, w.Body.String())
}
var body map[string]interface{}
if err := json.Unmarshal(w.Body.Bytes(), &body); err != nil {
t.Fatalf("json decode: %v", err)
}
rp, ok := body["recentPackets"].([]interface{})
if !ok || len(rp) == 0 {
t.Fatal("expected non-empty recentPackets")
}
// At least one packet should have resolved_path (tx 1 has observations with resolved_path)
found := false
for _, p := range rp {
pm, ok := p.(map[string]interface{})
if !ok {
continue
}
if pm["resolved_path"] != nil {
found = true
break
}
}
if !found {
t.Error("expected at least one recentPacket with resolved_path")
}
}
// TestPacketsExpand_ResolvedPath verifies that expandObservations=true includes
// resolved_path on expanded observations (regression for Codex review item #3).
func TestPacketsExpand_ResolvedPath(t *testing.T) {
_, router := setupTestServer(t)
req := httptest.NewRequest("GET", "/api/packets?expand=observations&limit=10", nil)
w := httptest.NewRecorder()
router.ServeHTTP(w, req)
if w.Code != 200 {
t.Fatalf("expected 200, got %d (body: %s)", w.Code, w.Body.String())
}
var body map[string]interface{}
if err := json.Unmarshal(w.Body.Bytes(), &body); err != nil {
t.Fatalf("json decode: %v", err)
}
packets, ok := body["packets"].([]interface{})
if !ok || len(packets) == 0 {
t.Fatal("expected non-empty packets")
}
// Find a packet with observations that should have resolved_path
found := false
for _, p := range packets {
pm, ok := p.(map[string]interface{})
if !ok {
continue
}
obs, ok := pm["observations"].([]interface{})
if !ok {
continue
}
for _, o := range obs {
om, ok := o.(map[string]interface{})
if !ok {
continue
}
if om["resolved_path"] != nil {
found = true
break
}
}
if found {
break
}
}
if !found {
t.Error("expected at least one expanded observation with resolved_path")
}
}
// TestPacketDetailFallsBackToDBWhenStoreMisses verifies that handlePacketDetail
// serves transmissions present in the DB but absent from the in-memory store.
// This is the recentAdverts → "Not found" bug (#827).
func TestPacketDetailFallsBackToDBWhenStoreMisses(t *testing.T) {
srv, router := setupTestServer(t)
// Insert a transmission directly into the DB AFTER store.Load(), so the
// in-memory PacketStore won't see it. Mirrors the production case where
// the store has pruned an entry but the DB still has it.
const dbOnlyHash = "deadbeef00112233"
now := time.Now().UTC().Format(time.RFC3339)
if _, err := srv.db.conn.Exec(`INSERT INTO transmissions
(raw_hex, hash, first_seen, route_type, payload_type, decoded_json)
VALUES ('FFEE', ?, ?, 1, 4, '{"type":"ADVERT"}')`, dbOnlyHash, now); err != nil {
t.Fatalf("insert: %v", err)
}
var txID int
if err := srv.db.conn.QueryRow("SELECT id FROM transmissions WHERE hash = ?", dbOnlyHash).Scan(&txID); err != nil {
t.Fatalf("lookup tx id: %v", err)
}
if _, err := srv.db.conn.Exec(`INSERT INTO observations
(transmission_id, observer_idx, snr, rssi, path_json, timestamp)
VALUES (?, 1, 7.5, -99, '[]', ?)`, txID, time.Now().Unix()); err != nil {
t.Fatalf("insert obs: %v", err)
}
// Confirm the store really doesn't have it (precondition for the fix).
if got := srv.store.GetPacketByHash(dbOnlyHash); got != nil {
t.Fatalf("test precondition failed: store unexpectedly has %s", dbOnlyHash)
}
req := httptest.NewRequest("GET", "/api/packets/"+dbOnlyHash, nil)
w := httptest.NewRecorder()
router.ServeHTTP(w, req)
if w.Code != 200 {
t.Fatalf("expected 200, got %d (body: %s)", w.Code, w.Body.String())
}
var body map[string]interface{}
if err := json.Unmarshal(w.Body.Bytes(), &body); err != nil {
t.Fatal(err)
}
pkt, ok := body["packet"].(map[string]interface{})
if !ok {
t.Fatal("expected packet object")
}
if pkt["hash"] != dbOnlyHash {
t.Errorf("expected hash %s, got %v", dbOnlyHash, pkt["hash"])
}
// Observations fallback should populate from DB too.
obs, _ := body["observations"].([]interface{})
if len(obs) == 0 {
t.Errorf("expected DB observations to be returned, got 0")
}
}
// TestPacketDetail404WhenAbsentFromBoth verifies that a hash present in
// neither store nor DB still returns 404 (no false positives from the fallback).
func TestPacketDetail404WhenAbsentFromBoth(t *testing.T) {
_, router := setupTestServer(t)
req := httptest.NewRequest("GET", "/api/packets/0011223344556677", nil)
w := httptest.NewRecorder()
router.ServeHTTP(w, req)
if w.Code != 404 {
t.Errorf("expected 404, got %d (body: %s)", w.Code, w.Body.String())
}
}
// TestPacketDetailPrefersStoreOverDB verifies the store result wins when the
// hash exists in both — the DB fallback must not double-fetch / overwrite.
func TestPacketDetailPrefersStoreOverDB(t *testing.T) {
srv, router := setupTestServer(t)
// abc123def4567890 is seeded in both DB and (after Load) the store.
const hash = "abc123def4567890"
if got := srv.store.GetPacketByHash(hash); got == nil {
t.Fatalf("test precondition failed: store should have %s", hash)
}
req := httptest.NewRequest("GET", "/api/packets/"+hash, nil)
w := httptest.NewRecorder()
router.ServeHTTP(w, req)
if w.Code != 200 {
t.Fatalf("expected 200, got %d", w.Code)
}
var body map[string]interface{}
json.Unmarshal(w.Body.Bytes(), &body)
pkt, _ := body["packet"].(map[string]interface{})
if pkt == nil || pkt["hash"] != hash {
t.Fatalf("expected packet with hash %s, got %v", hash, pkt)
}
// observation_count comes from store observations (2 seeded for tx 1).
if cnt, _ := body["observation_count"].(float64); cnt != 2 {
t.Errorf("expected observation_count=2 (from store), got %v", body["observation_count"])
}
}
-95
View File
@@ -1,95 +0,0 @@
package main
import (
"encoding/json"
"net/http/httptest"
"strings"
"testing"
)
// TestStatsMemoryFields verifies that /api/stats exposes the new memory
// breakdown introduced for issue #832: storeDataMB, processRSSMB,
// goHeapInuseMB, goSysMB, plus the deprecated trackedMB alias.
//
// We assert presence, type, sign, and ordering invariants — but NOT
// "RSS within X% of true RSS" because that is flaky in CI under cgo,
// containerization, and shared-runner load.
func TestStatsMemoryFields(t *testing.T) {
_, router := setupTestServer(t)
req := httptest.NewRequest("GET", "/api/stats", nil)
w := httptest.NewRecorder()
router.ServeHTTP(w, req)
if w.Code != 200 {
t.Fatalf("expected 200, got %d", w.Code)
}
var body map[string]interface{}
if err := json.Unmarshal(w.Body.Bytes(), &body); err != nil {
t.Fatalf("json decode: %v", err)
}
required := []string{"trackedMB", "storeDataMB", "processRSSMB", "goHeapInuseMB", "goSysMB"}
values := make(map[string]float64, len(required))
for _, k := range required {
v, ok := body[k]
if !ok {
t.Fatalf("missing field %q in /api/stats response", k)
}
f, ok := v.(float64)
if !ok {
t.Fatalf("field %q is %T, expected float64", k, v)
}
if f < 0 {
t.Errorf("field %q is negative: %v", k, f)
}
values[k] = f
}
// trackedMB is a deprecated alias for storeDataMB; they must match.
if values["trackedMB"] != values["storeDataMB"] {
t.Errorf("trackedMB (%v) != storeDataMB (%v); they must remain aliased",
values["trackedMB"], values["storeDataMB"])
}
// Ordering invariants. goSys is the runtime's view of total OS memory;
// HeapInuse is a subset of it. storeData is a subset of HeapInuse.
// processRSS may be 0 in environments without /proc — treat 0 as
// "unknown" rather than a failure.
if values["goHeapInuseMB"] > values["goSysMB"]+0.5 {
t.Errorf("invariant violated: goHeapInuseMB (%v) > goSysMB (%v)",
values["goHeapInuseMB"], values["goSysMB"])
}
if values["storeDataMB"] > values["goHeapInuseMB"]+0.5 && values["storeDataMB"] > 0 {
// In the test fixture storeDataMB is typically 0 (no packets in
// store); only enforce the bound when both are nonzero.
t.Errorf("invariant violated: storeDataMB (%v) > goHeapInuseMB (%v)",
values["storeDataMB"], values["goHeapInuseMB"])
}
if values["processRSSMB"] > 0 && values["goSysMB"] > 0 {
// goSys can briefly exceed RSS if pages are reserved-but-not-touched,
// so allow some slack.
if values["goSysMB"] > values["processRSSMB"]*4 {
t.Errorf("suspicious: goSysMB (%v) >> processRSSMB (%v)",
values["goSysMB"], values["processRSSMB"])
}
}
}
// TestStatsMemoryFieldsRawJSON spot-checks that the JSON wire format uses
// the documented camelCase names (no accidental rename through struct tags).
func TestStatsMemoryFieldsRawJSON(t *testing.T) {
_, router := setupTestServer(t)
req := httptest.NewRequest("GET", "/api/stats", nil)
w := httptest.NewRecorder()
router.ServeHTTP(w, req)
body := w.Body.String()
for _, key := range []string{
`"trackedMB":`, `"storeDataMB":`,
`"processRSSMB":`, `"goHeapInuseMB":`, `"goSysMB":`,
} {
if !strings.Contains(body, key) {
t.Errorf("missing %s in raw response: %s", key, body)
}
}
}
+305 -534
View File
File diff suppressed because it is too large Load Diff
-116
View File
@@ -1,116 +0,0 @@
package main
import (
"testing"
)
func f64(v float64) *float64 { return &v }
func TestDedupeTopHopsByPair(t *testing.T) {
hops := []distHopRecord{
{FromPk: "AAA", ToPk: "BBB", FromName: "A", ToName: "B", Dist: 100, Type: "R↔R", SNR: f64(5.0), Hash: "h1", Timestamp: "t1"},
{FromPk: "AAA", ToPk: "BBB", FromName: "A", ToName: "B", Dist: 90, Type: "R↔R", SNR: f64(8.0), Hash: "h2", Timestamp: "t2"},
{FromPk: "BBB", ToPk: "AAA", FromName: "B", ToName: "A", Dist: 80, Type: "R↔R", SNR: f64(3.0), Hash: "h3", Timestamp: "t3"},
{FromPk: "AAA", ToPk: "BBB", FromName: "A", ToName: "B", Dist: 70, Type: "R↔R", SNR: f64(6.0), Hash: "h4", Timestamp: "t4"},
{FromPk: "AAA", ToPk: "BBB", FromName: "A", ToName: "B", Dist: 60, Type: "R↔R", SNR: f64(4.0), Hash: "h5", Timestamp: "t5"},
{FromPk: "CCC", ToPk: "DDD", FromName: "C", ToName: "D", Dist: 50, Type: "C↔R", SNR: f64(7.0), Hash: "h6", Timestamp: "t6"},
}
result := dedupeHopsByPair(hops, 20)
if len(result) != 2 {
t.Fatalf("expected 2 entries, got %d", len(result))
}
// First entry: A↔B pair, max distance = 100, obsCount = 5
ab := result[0]
if ab["dist"].(float64) != 100 {
t.Errorf("expected dist 100, got %v", ab["dist"])
}
if ab["obsCount"].(int) != 5 {
t.Errorf("expected obsCount 5, got %v", ab["obsCount"])
}
if ab["hash"].(string) != "h1" {
t.Errorf("expected hash h1 (from max-dist record), got %v", ab["hash"])
}
if ab["bestSnr"].(float64) != 8.0 {
t.Errorf("expected bestSnr 8.0, got %v", ab["bestSnr"])
}
// medianSnr of [3,4,5,6,8] = 5.0
if ab["medianSnr"].(float64) != 5.0 {
t.Errorf("expected medianSnr 5.0, got %v", ab["medianSnr"])
}
// Second entry: C↔D pair
cd := result[1]
if cd["dist"].(float64) != 50 {
t.Errorf("expected dist 50, got %v", cd["dist"])
}
if cd["obsCount"].(int) != 1 {
t.Errorf("expected obsCount 1, got %v", cd["obsCount"])
}
}
func TestDedupeTopHopsReversePairMerges(t *testing.T) {
hops := []distHopRecord{
{FromPk: "BBB", ToPk: "AAA", FromName: "B", ToName: "A", Dist: 50, Type: "R↔R", Hash: "h1"},
{FromPk: "AAA", ToPk: "BBB", FromName: "A", ToName: "B", Dist: 80, Type: "R↔R", Hash: "h2"},
}
result := dedupeHopsByPair(hops, 20)
if len(result) != 1 {
t.Fatalf("expected 1 entry, got %d", len(result))
}
if result[0]["obsCount"].(int) != 2 {
t.Errorf("expected obsCount 2, got %v", result[0]["obsCount"])
}
if result[0]["dist"].(float64) != 80 {
t.Errorf("expected dist 80, got %v", result[0]["dist"])
}
}
func TestDedupeTopHopsNilSNR(t *testing.T) {
hops := []distHopRecord{
{FromPk: "AAA", ToPk: "BBB", FromName: "A", ToName: "B", Dist: 100, Type: "R↔R", SNR: nil, Hash: "h1"},
{FromPk: "AAA", ToPk: "BBB", FromName: "A", ToName: "B", Dist: 90, Type: "R↔R", SNR: nil, Hash: "h2"},
}
result := dedupeHopsByPair(hops, 20)
if len(result) != 1 {
t.Fatalf("expected 1 entry, got %d", len(result))
}
if result[0]["bestSnr"] != nil {
t.Errorf("expected bestSnr nil, got %v", result[0]["bestSnr"])
}
if result[0]["medianSnr"] != nil {
t.Errorf("expected medianSnr nil, got %v", result[0]["medianSnr"])
}
}
func TestDedupeTopHopsLimit(t *testing.T) {
// Generate 25 unique pairs, verify limit=20 caps output
hops := make([]distHopRecord, 25)
for i := range hops {
hops[i] = distHopRecord{
FromPk: "A", ToPk: string(rune('a' + i)),
Dist: float64(i), Type: "R↔R", Hash: "h",
}
}
result := dedupeHopsByPair(hops, 20)
if len(result) != 20 {
t.Errorf("expected 20 entries, got %d", len(result))
}
}
func TestDedupeTopHopsEvenMedian(t *testing.T) {
// Even count: median = avg of two middle values
hops := []distHopRecord{
{FromPk: "A", ToPk: "B", Dist: 10, Type: "R↔R", SNR: f64(2.0), Hash: "h1"},
{FromPk: "A", ToPk: "B", Dist: 20, Type: "R↔R", SNR: f64(4.0), Hash: "h2"},
{FromPk: "A", ToPk: "B", Dist: 30, Type: "R↔R", SNR: f64(6.0), Hash: "h3"},
{FromPk: "A", ToPk: "B", Dist: 40, Type: "R↔R", SNR: f64(8.0), Hash: "h4"},
}
result := dedupeHopsByPair(hops, 20)
// sorted SNR: [2,4,6,8], median = (4+6)/2 = 5.0
if result[0]["medianSnr"].(float64) != 5.0 {
t.Errorf("expected medianSnr 5.0, got %v", result[0]["medianSnr"])
}
}
+4 -10
View File
@@ -42,20 +42,14 @@
"type": {
"type": "string"
},
"snr": {
"type": "number"
},
"hash": {
"type": "string"
},
"timestamp": {
"type": "string"
},
"bestSnr": {
"type": "number"
},
"medianSnr": {
"type": "number"
},
"obsCount": {
"type": "number"
}
}
}
@@ -1586,4 +1580,4 @@
}
}
}
}
}
+21 -10
View File
@@ -69,11 +69,13 @@ func TestTouchRelayLastSeen_Debouncing(t *testing.T) {
lastSeenTouched: make(map[string]time.Time),
}
// After #800, touchRelayLastSeen takes a []string of pubkeys (from decode-window)
pks := []string{"relay1"}
pk := "relay1"
tx := &StoreTx{
ResolvedPath: []*string{&pk},
}
now := time.Now()
s.touchRelayLastSeen(pks, now)
s.touchRelayLastSeen(tx, now)
// Verify it was written
var lastSeen sql.NullString
@@ -86,7 +88,7 @@ func TestTouchRelayLastSeen_Debouncing(t *testing.T) {
db.conn.Exec("UPDATE nodes SET last_seen = NULL WHERE public_key = ?", "relay1")
// Call again within 5 minutes — should be debounced (no write)
s.touchRelayLastSeen(pks, now.Add(2*time.Minute))
s.touchRelayLastSeen(tx, now.Add(2*time.Minute))
db.conn.QueryRow("SELECT last_seen FROM nodes WHERE public_key = ?", "relay1").Scan(&lastSeen)
if lastSeen.Valid {
@@ -94,14 +96,14 @@ func TestTouchRelayLastSeen_Debouncing(t *testing.T) {
}
// Call after 5 minutes — should write again
s.touchRelayLastSeen(pks, now.Add(6*time.Minute))
s.touchRelayLastSeen(tx, now.Add(6*time.Minute))
db.conn.QueryRow("SELECT last_seen FROM nodes WHERE public_key = ?", "relay1").Scan(&lastSeen)
if !lastSeen.Valid {
t.Fatal("expected write after debounce interval expired")
}
}
func TestTouchRelayLastSeen_SkipsEmptyPubkeys(t *testing.T) {
func TestTouchRelayLastSeen_SkipsNilResolvedPath(t *testing.T) {
db := setupTestDB(t)
defer db.Close()
@@ -110,9 +112,13 @@ func TestTouchRelayLastSeen_SkipsEmptyPubkeys(t *testing.T) {
lastSeenTouched: make(map[string]time.Time),
}
// Empty pubkeys — should not panic or error
s.touchRelayLastSeen([]string{}, time.Now())
s.touchRelayLastSeen(nil, time.Now())
// tx with nil entries and empty resolved_path
tx := &StoreTx{
ResolvedPath: []*string{nil, nil},
}
// Should not panic or error
s.touchRelayLastSeen(tx, time.Now())
}
func TestTouchRelayLastSeen_NilDB(t *testing.T) {
@@ -121,6 +127,11 @@ func TestTouchRelayLastSeen_NilDB(t *testing.T) {
lastSeenTouched: make(map[string]time.Time),
}
pk := "abc"
tx := &StoreTx{
ResolvedPath: []*string{&pk},
}
// Should not panic with nil db
s.touchRelayLastSeen([]string{"abc"}, time.Now())
s.touchRelayLastSeen(tx, time.Now())
}
+26 -22
View File
@@ -28,7 +28,7 @@ func TestEstimateStoreTxBytes_ReasonableValues(t *testing.T) {
}
// TestEstimateStoreTxBytes_ManyHopsSubpaths verifies that packets with many
// hops estimate significantly more due to O(path²) subpath index entries.
// hops estimate more due to per-hop byPathHop index entries.
func TestEstimateStoreTxBytes_ManyHopsSubpaths(t *testing.T) {
tx2 := &StoreTx{
Hash: "aabb",
@@ -43,35 +43,37 @@ func TestEstimateStoreTxBytes_ManyHopsSubpaths(t *testing.T) {
est2 := estimateStoreTxBytes(tx2)
est10 := estimateStoreTxBytes(tx10)
// 10 hops → 45 subpath combos × 40 = 1800 bytes just for subpaths
// 10 hops vs 2 hops → 8 extra byPathHop entries × perPathHopBytes
if est10 <= est2 {
t.Errorf("10-hop (%d) should estimate more than 2-hop (%d)", est10, est2)
}
if est10 < est2+1500 {
t.Errorf("10-hop (%d) should estimate at least 1500 more than 2-hop (%d)", est10, est2)
// spTxIndex eliminated in #791; cost difference is now linear (per-hop only)
expectedDiff := int64(8) * perPathHopBytes // 8 extra hops
if est10 < est2+expectedDiff {
t.Errorf("10-hop (%d) should estimate at least %d more than 2-hop (%d)", est10, expectedDiff, est2)
}
}
// TestEstimateStoreObsBytes_AfterRefactor verifies that after #800 refactor,
// observations no longer have ResolvedPath overhead in their estimate.
func TestEstimateStoreObsBytes_AfterRefactor(t *testing.T) {
obs := &StoreObs{
// TestEstimateStoreObsBytes_WithResolvedPath verifies that observations with
// ResolvedPath estimate more than those without.
func TestEstimateStoreObsBytes_WithResolvedPath(t *testing.T) {
s1, s2, s3 := "node1", "node2", "node3"
obsNoRP := &StoreObs{
ObserverID: "obs1",
PathJSON: `["a","b"]`,
}
obsWithRP := &StoreObs{
ObserverID: "obs1",
PathJSON: `["a","b"]`,
ResolvedPath: []*string{&s1, &s2, &s3},
}
est := estimateStoreObsBytes(obs)
if est <= 0 {
t.Errorf("estimate should be positive, got %d", est)
}
// After #800, all obs estimates should be the same (no RP field variation)
obs2 := &StoreObs{
ObserverID: "obs1",
PathJSON: `["a","b"]`,
}
est2 := estimateStoreObsBytes(obs2)
if est != est2 {
t.Errorf("estimates should be equal after #800 (no RP field), got %d vs %d", est, est2)
estNo := estimateStoreObsBytes(obsNoRP)
estWith := estimateStoreObsBytes(obsWithRP)
if estWith <= estNo {
t.Errorf("obs with ResolvedPath (%d) should estimate more than without (%d)", estWith, estNo)
}
}
@@ -155,9 +157,11 @@ func BenchmarkEstimateStoreTxBytes(b *testing.B) {
// BenchmarkEstimateStoreObsBytes verifies the obs estimate function is fast.
func BenchmarkEstimateStoreObsBytes(b *testing.B) {
s := "resolvedNodePubkey123456"
obs := &StoreObs{
ObserverID: "observer1234",
PathJSON: `["a","b","c"]`,
ObserverID: "observer1234",
PathJSON: `["a","b","c"]`,
ResolvedPath: []*string{&s, &s, &s},
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
+4 -22
View File
@@ -72,22 +72,6 @@ type StatsResponse struct {
BackfillProgress float64 `json:"backfillProgress"`
SignatureDrops int64 `json:"signatureDrops,omitempty"`
HashMigrationComplete bool `json:"hashMigrationComplete"`
// Memory accounting (issue #832). All values in MB.
//
// StoreDataMB ("trackedMB" historically) is the in-store packet byte
// estimate — useful packet bytes only. Subset of HeapInuse. Used as
// the eviction watermark input. NOT a proxy for RSS; ops dashboards
// should prefer ProcessRSSMB for capacity decisions.
//
// Old field name TrackedMB is retained for backward compatibility
// with pre-v3.6 consumers; it carries the same value as StoreDataMB
// and is deprecated.
TrackedMB float64 `json:"trackedMB"` // deprecated alias for storeDataMB
StoreDataMB float64 `json:"storeDataMB"` // in-store packet bytes (subset of heap)
ProcessRSSMB float64 `json:"processRSSMB"` // process RSS from /proc (Linux) or runtime.Sys fallback
GoHeapInuseMB float64 `json:"goHeapInuseMB"` // runtime.MemStats.HeapInuse
GoSysMB float64 `json:"goSysMB"` // runtime.MemStats.Sys (total Go-managed)
}
// ─── Health ────────────────────────────────────────────────────────────────────
@@ -263,6 +247,7 @@ type TransmissionResp struct {
SNR interface{} `json:"snr"`
RSSI interface{} `json:"rssi"`
PathJSON interface{} `json:"path_json"`
ResolvedPath []*string `json:"resolved_path,omitempty"`
Direction interface{} `json:"direction"`
Score interface{} `json:"score,omitempty"`
Observations []ObservationResp `json:"observations,omitempty"`
@@ -277,9 +262,7 @@ type ObservationResp struct {
SNR interface{} `json:"snr"`
RSSI interface{} `json:"rssi"`
PathJSON interface{} `json:"path_json"`
ResolvedPath interface{} `json:"resolved_path,omitempty"`
Direction interface{} `json:"direction,omitempty"`
RawHex interface{} `json:"raw_hex,omitempty"`
ResolvedPath []*string `json:"resolved_path,omitempty"`
Timestamp interface{} `json:"timestamp"`
}
@@ -315,6 +298,7 @@ type PacketTimestampsResponse struct {
type PacketDetailResponse struct {
Packet interface{} `json:"packet"`
Path []interface{} `json:"path"`
Breakdown *Breakdown `json:"breakdown"`
ObservationCount int `json:"observation_count"`
Observations []ObservationResp `json:"observations,omitempty"`
}
@@ -680,9 +664,7 @@ type DistanceHop struct {
ToPk string `json:"toPk"`
Dist float64 `json:"dist"`
Type string `json:"type"`
BestSnr interface{} `json:"bestSnr"`
MedianSnr interface{} `json:"medianSnr"`
ObsCount int `json:"obsCount"`
SNR interface{} `json:"snr"`
Hash string `json:"hash"`
Timestamp string `json:"timestamp"`
}
-3
View File
@@ -1,3 +0,0 @@
module github.com/meshcore-analyzer/packetpath
go 1.22
-76
View File
@@ -1,76 +0,0 @@
// Package packetpath provides shared helpers for extracting path hops from
// raw MeshCore packet hex bytes.
package packetpath
import (
"encoding/hex"
"fmt"
"strings"
)
// DecodePathFromRawHex extracts the header path hops directly from raw hex bytes.
// This is the authoritative path that matches what's in raw_hex, as opposed to
// decoded.Path.Hops which may be overwritten for TRACE packets (issue #886).
//
// WARNING: This function returns the literal header path bytes regardless of
// payload type. For TRACE packets these bytes are SNR values, NOT hop hashes.
// Callers that may receive TRACE packets MUST check PathBytesAreHops(payloadType)
// first, or use the safer DecodeHopsForPayload wrapper.
func DecodePathFromRawHex(rawHex string) ([]string, error) {
buf, err := hex.DecodeString(rawHex)
if err != nil || len(buf) < 2 {
return nil, fmt.Errorf("invalid or too-short hex")
}
headerByte := buf[0]
offset := 1
if IsTransportRoute(int(headerByte & 0x03)) {
if len(buf) < offset+4 {
return nil, fmt.Errorf("too short for transport codes")
}
offset += 4
}
if offset >= len(buf) {
return nil, fmt.Errorf("too short for path byte")
}
pathByte := buf[offset]
offset++
hashSize := int(pathByte>>6) + 1
hashCount := int(pathByte & 0x3F)
hops := make([]string, 0, hashCount)
for i := 0; i < hashCount; i++ {
start := offset + i*hashSize
end := start + hashSize
if end > len(buf) {
break
}
hops = append(hops, strings.ToUpper(hex.EncodeToString(buf[start:end])))
}
return hops, nil
}
// DecodeHopsForPayload returns the header path hops only when the payload type's
// header bytes are actually route hops (i.e. PathBytesAreHops(payloadType) is true).
// For TRACE packets it returns (nil, ErrPayloadHasNoHeaderHops) so the caller is
// forced to source hops from the decoded payload instead.
//
// Prefer this over DecodePathFromRawHex when the payload type is known.
func DecodeHopsForPayload(rawHex string, payloadType byte) ([]string, error) {
if !PathBytesAreHops(payloadType) {
return nil, ErrPayloadHasNoHeaderHops
}
return DecodePathFromRawHex(rawHex)
}
// ErrPayloadHasNoHeaderHops is returned by DecodeHopsForPayload when the
// payload type repurposes the raw_hex header path bytes (e.g. TRACE → SNR values).
var ErrPayloadHasNoHeaderHops = errPayloadHasNoHeaderHops{}
type errPayloadHasNoHeaderHops struct{}
func (errPayloadHasNoHeaderHops) Error() string {
return "payload type repurposes header path bytes; source hops from decoded payload"
}
-150
View File
@@ -1,150 +0,0 @@
package packetpath
import (
"encoding/hex"
"encoding/json"
"strings"
"testing"
)
func TestDecodePathFromRawHex_Basic(t *testing.T) {
// Build a simple FLOOD packet (route_type=1) with 2 hops of hashSize=1
// header: route_type=1, payload_type=2 (TXT_MSG), version=0 → 0b00_0010_01 = 0x09
// path byte: hashSize=1 (bits 7-6 = 0), hashCount=2 (bits 5-0 = 2) → 0x02
// hops: AB, CD
// payload: some bytes
raw := "0902ABCD" + "DEADBEEF"
hops, err := DecodePathFromRawHex(raw)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if len(hops) != 2 || hops[0] != "AB" || hops[1] != "CD" {
t.Fatalf("expected [AB, CD], got %v", hops)
}
}
func TestDecodePathFromRawHex_ZeroHops(t *testing.T) {
// DIRECT route (type=2), no hops → 0b00_0010_10 = 0x0A
// path byte: 0x00 (0 hops)
raw := "0A00" + "DEADBEEF"
hops, err := DecodePathFromRawHex(raw)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if len(hops) != 0 {
t.Fatalf("expected 0 hops, got %v", hops)
}
}
func TestDecodePathFromRawHex_TransportRoute(t *testing.T) {
// TRANSPORT_FLOOD (route_type=0), payload_type=5 (GRP_TXT), version=0
// header: 0b00_0101_00 = 0x14
// transport codes: 4 bytes
// path byte: hashSize=1, hashCount=1 → 0x01
// hop: FF
raw := "14" + "00112233" + "01" + "FF" + "DEAD"
hops, err := DecodePathFromRawHex(raw)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if len(hops) != 1 || hops[0] != "FF" {
t.Fatalf("expected [FF], got %v", hops)
}
}
// buildTracePacket creates a TRACE packet hex string where header path bytes are
// SNR values, and payload contains the actual route hops.
func buildTracePacket() (rawHex string, headerPathHops []string, payloadHops []string) {
// DIRECT route (type=2), TRACE payload (type=9), version=0
// header byte: 0b00_1001_10 = 0x26
headerByte := byte(0x26)
// Header path: 2 SNR bytes (hashSize=1, hashCount=2) → path byte = 0x02
// SNR values: 0x1A (26 dB), 0x0F (15 dB)
pathByte := byte(0x02)
snrBytes := []byte{0x1A, 0x0F}
// TRACE payload: tag(4) + authCode(4) + flags(1) + path hops
tag := []byte{0x01, 0x00, 0x00, 0x00}
authCode := []byte{0x02, 0x00, 0x00, 0x00}
// flags: path_sz=0 (1 byte hops), other bits=0 → 0x00
flags := byte(0x00)
// Payload hops: AA, BB, CC (the actual route)
payloadPathBytes := []byte{0xAA, 0xBB, 0xCC}
var buf []byte
buf = append(buf, headerByte, pathByte)
buf = append(buf, snrBytes...)
buf = append(buf, tag...)
buf = append(buf, authCode...)
buf = append(buf, flags)
buf = append(buf, payloadPathBytes...)
rawHex = strings.ToUpper(hex.EncodeToString(buf))
headerPathHops = []string{"1A", "0F"} // SNR values — NOT route hops
payloadHops = []string{"AA", "BB", "CC"} // actual route hops from payload
return
}
func TestDecodePathFromRawHex_TraceReturnsSNR(t *testing.T) {
rawHex, expectedSNR, _ := buildTracePacket()
hops, err := DecodePathFromRawHex(rawHex)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
// DecodePathFromRawHex always returns header path bytes — for TRACE these are SNR values
if len(hops) != len(expectedSNR) {
t.Fatalf("expected %d hops (SNR), got %d: %v", len(expectedSNR), len(hops), hops)
}
for i, h := range hops {
if h != expectedSNR[i] {
t.Errorf("hop[%d]: expected %s, got %s", i, expectedSNR[i], h)
}
}
}
func TestTracePathJSON_UsesPayloadHops(t *testing.T) {
// This test validates the TRACE vs non-TRACE logic that callers should implement:
// For TRACE: path_json = decoded.Path.Hops (payload-decoded route hops)
// For non-TRACE: path_json = DecodePathFromRawHex(raw_hex)
rawHex, snrHops, payloadHops := buildTracePacket()
// DecodePathFromRawHex returns SNR bytes for TRACE
headerHops, _ := DecodePathFromRawHex(rawHex)
headerJSON, _ := json.Marshal(headerHops)
// payload hops (what decoded.Path.Hops would return after TRACE decoding)
payloadJSON, _ := json.Marshal(payloadHops)
// They must differ — SNR != route hops
if string(headerJSON) == string(payloadJSON) {
t.Fatalf("SNR hops and payload hops should differ for TRACE; both are %s", headerJSON)
}
// For TRACE, path_json should be payloadHops, not headerHops
_ = snrHops // snrHops == headerHops — used for documentation
t.Logf("TRACE: header path (SNR) = %s, payload path (route) = %s", headerJSON, payloadJSON)
}
func TestDecodeHopsForPayload_NonTrace(t *testing.T) {
// header 0x01, path_len 0x02, hops 0xAA 0xBB, then payload bytes
raw := "0102AABB00"
hops, err := DecodeHopsForPayload(raw, 0x05) // GRP_TXT — header path bytes ARE hops
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if len(hops) != 2 || hops[0] != "AA" || hops[1] != "BB" {
t.Errorf("expected [AA BB], got %v", hops)
}
}
func TestDecodeHopsForPayload_TraceReturnsError(t *testing.T) {
raw := "010205F00100"
hops, err := DecodeHopsForPayload(raw, PayloadTRACE)
if err != ErrPayloadHasNoHeaderHops {
t.Errorf("expected ErrPayloadHasNoHeaderHops, got %v", err)
}
if hops != nil {
t.Errorf("expected nil hops for TRACE, got %v", hops)
}
}
-24
View File
@@ -1,24 +0,0 @@
package packetpath
// Route type constants (header bits 1-0).
const (
RouteTransportFlood = 0
RouteFlood = 1
RouteDirect = 2
RouteTransportDirect = 3
)
// PayloadTRACE is the payload type constant for TRACE packets.
const PayloadTRACE = 0x09
// IsTransportRoute returns true for TRANSPORT_FLOOD (0) and TRANSPORT_DIRECT (3).
func IsTransportRoute(routeType int) bool {
return routeType == RouteTransportFlood || routeType == RouteTransportDirect
}
// PathBytesAreHops returns true when the raw_hex header path bytes represent
// route hop hashes (the normal case). Returns false for packet types where
// header path bytes are repurposed (e.g. TRACE uses them for SNR values).
func PathBytesAreHops(payloadType byte) bool {
return payloadType != PayloadTRACE
}
-31
View File
@@ -1,31 +0,0 @@
package packetpath
import "testing"
func TestIsTransportRoute(t *testing.T) {
if !IsTransportRoute(RouteTransportFlood) {
t.Error("RouteTransportFlood should be transport")
}
if !IsTransportRoute(RouteTransportDirect) {
t.Error("RouteTransportDirect should be transport")
}
if IsTransportRoute(RouteFlood) {
t.Error("RouteFlood should not be transport")
}
if IsTransportRoute(RouteDirect) {
t.Error("RouteDirect should not be transport")
}
}
func TestPathBytesAreHops(t *testing.T) {
if PathBytesAreHops(PayloadTRACE) {
t.Error("PathBytesAreHops(PayloadTRACE) should be false")
}
// All other known payload types should return true.
otherTypes := []byte{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}
for _, pt := range otherTypes {
if !PathBytesAreHops(pt) {
t.Errorf("PathBytesAreHops(0x%02X) should be true", pt)
}
}
}
+18 -56
View File
@@ -28,7 +28,7 @@
function barChart(data, labels, colors, w = 800, h = 220, pad = 40) {
const max = Math.max(...data, 1);
const barW = Math.max(1, Math.min((w - pad * 2) / data.length - 2, 30));
const barW = Math.min((w - pad * 2) / data.length - 2, 30);
let svg = `<svg viewBox="0 0 ${w} ${h}" style="width:100%;max-height:${h}px" role="img" aria-label="Bar chart showing data distribution"><title>Bar chart showing data distribution</title>`;
// Grid
for (let i = 0; i <= 4; i++) {
@@ -263,25 +263,7 @@
<div class="analytics-row">
<div class="analytics-card flex-1">
<h3>📈 Packets / Hour</h3>
${(() => {
const pph = rf.packetsPerHour;
const counts = pph.map(h => h.count);
// Decimate x-axis labels to avoid overlap
const totalHours = pph.length;
// Pick label interval: <=24h show every 6h, <=72h every 12h, else every 24h
const labelInterval = totalHours <= 24 ? 6 : totalHours <= 72 ? 12 : 24;
const labels = pph.map((h, i) => {
const hh = h.hour.slice(11, 13); // "HH"
const hourNum = parseInt(hh, 10);
if (hourNum % labelInterval === 0) {
// For multi-day ranges, show date on 00h boundaries
if (totalHours > 48 && hourNum === 0) return h.hour.slice(5, 10);
return hh + 'h';
}
return ''; // skip label
});
return barChart(counts, labels, 'var(--accent)');
})()}
${barChart(rf.packetsPerHour.map(h=>h.count), rf.packetsPerHour.map(h=>h.hour.slice(11)+'h'), 'var(--accent)')}
</div>
</div>
@@ -642,13 +624,14 @@
if (!data || !data.rings.length) return '<div class="text-muted">No path data for this observer</div>';
let html = `<div class="reach-rings">`;
data.rings.forEach(ring => {
const opacity = Math.max(0.3, 1 - ring.hops * 0.06);
const nodeLinks = ring.nodes.slice(0, 8).map(n => {
const label = n.name ? `<a href="#/nodes/${encodeURIComponent(n.pubkey)}" class="analytics-link">${esc(n.name)}</a>` : `<span class="mono">${n.hop}</span>`;
const detail = n.distRange ? ` <span class="text-muted">(${n.distRange})</span>` : '';
return label + detail;
}).join(', ');
const extra = ring.nodes.length > 8 ? ` <span class="text-muted">+${ring.nodes.length - 8} more</span>` : '';
html += `<div class="reach-ring">
html += `<div class="reach-ring" style="opacity:${opacity}">
<div class="reach-hop">${ring.hops} hop${ring.hops > 1 ? 's' : ''}</div>
<div class="reach-nodes">${nodeLinks}${extra}</div>
<div class="reach-count">${ring.nodes.length} node${ring.nodes.length > 1 ? 's' : ''}</div>
@@ -692,6 +675,7 @@
});
let html = '<div class="reach-rings">';
Object.entries(byDist).sort((a, b) => +a[0] - +b[0]).forEach(([dist, nodes]) => {
const opacity = Math.max(0.3, 1 - (+dist) * 0.06);
const nodeLinks = nodes.slice(0, 10).map(n => {
const label = n.name
? `<a href="#/nodes/${encodeURIComponent(n.pubkey)}" class="analytics-link">${esc(n.name)}</a>`
@@ -699,7 +683,7 @@
return label + ` <span class="text-muted">via ${esc(n.observer_name)}</span>`;
}).join(', ');
const extra = nodes.length > 10 ? ` <span class="text-muted">+${nodes.length - 10} more</span>` : '';
html += `<div class="reach-ring">
html += `<div class="reach-ring" style="opacity:${opacity}">
<div class="reach-hop">${dist} hop${+dist > 1 ? 's' : ''}</div>
<div class="reach-nodes">${nodeLinks}${extra}</div>
<div class="reach-count">${nodes.length} node${nodes.length > 1 ? 's' : ''}</div>
@@ -856,44 +840,29 @@
}
}
var CHANNEL_TIMELINE_MAX_SERIES = 8;
function renderChannelTimeline(data) {
if (!data.length) return '<div class="text-muted">No data</div>';
var hours = []; var hourSet = {};
var channelList = []; var channelSet = {};
var lookup = {};
var channelVolume = {};
var maxCount = 1;
for (var i = 0; i < data.length; i++) {
var d = data[i];
if (!hourSet[d.hour]) { hourSet[d.hour] = 1; hours.push(d.hour); }
if (!channelSet[d.channel]) { channelSet[d.channel] = 1; channelList.push(d.channel); }
lookup[d.hour + '|' + d.channel] = d.count;
channelVolume[d.channel] = (channelVolume[d.channel] || 0) + d.count;
if (d.count > maxCount) maxCount = d.count;
}
hours.sort();
// Sort channels by total volume descending, cap to top N
channelList.sort(function(a, b) { return channelVolume[b] - channelVolume[a]; });
var hiddenCount = Math.max(0, channelList.length - CHANNEL_TIMELINE_MAX_SERIES);
var visibleChannels = channelList.slice(0, CHANNEL_TIMELINE_MAX_SERIES);
var maxCount = 1;
for (var vi = 0; vi < visibleChannels.length; vi++) {
for (var hi2 = 0; hi2 < hours.length; hi2++) {
var c = lookup[hours[hi2] + '|' + visibleChannels[vi]] || 0;
if (c > maxCount) maxCount = c;
}
}
var colors = ['#ef4444','#22c55e','#3b82f6','#f59e0b','#8b5cf6','#ec4899','#14b8a6','#64748b'];
var w = 600, h = 180, pad = 35;
var xScale = (w - pad * 2) / Math.max(hours.length - 1, 1);
var yScale = (h - pad * 2) / maxCount;
var svg = '<svg viewBox="0 0 ' + w + ' ' + h + '" style="width:100%;max-height:180px" role="img" aria-label="Channel message activity over time"><title>Channel message activity over time</title>';
for (var ci = 0; ci < visibleChannels.length; ci++) {
for (var ci = 0; ci < channelList.length; ci++) {
var pts = [];
for (var hi = 0; hi < hours.length; hi++) {
var count = lookup[hours[hi] + '|' + visibleChannels[ci]] || 0;
var count = lookup[hours[hi] + '|' + channelList[ci]] || 0;
var x = pad + hi * xScale;
var y = h - pad - count * yScale;
pts.push(x + ',' + y);
@@ -907,11 +876,8 @@
}
svg += '</svg>';
var legendParts = [];
for (var lci = 0; lci < visibleChannels.length; lci++) {
legendParts.push('<span><span class="legend-dot" style="background:' + colors[lci % colors.length] + '"></span>' + esc(visibleChannels[lci]) + '</span>');
}
if (hiddenCount > 0) {
legendParts.push('<span class="text-muted">+' + hiddenCount + ' more</span>');
for (var lci = 0; lci < channelList.length; lci++) {
legendParts.push('<span><span class="legend-dot" style="background:' + colors[lci % colors.length] + '"></span>' + esc(channelList[lci]) + '</span>');
}
svg += '<div class="timeline-legend">' + legendParts.join('') + '</div>';
return svg;
@@ -1971,18 +1937,15 @@
}
// Top hops leaderboard
html += `<div class="analytics-section"><h3>🏆 Top 20 Longest Hops</h3><table class="data-table"><thead><tr><th scope="col">#</th><th scope="col">From</th><th scope="col">To</th><th scope="col">Distance (${distUnitLabel})</th><th scope="col">Type</th><th scope="col">Obs</th><th scope="col">Best SNR</th><th scope="col">Median SNR</th><th scope="col">Packet</th><th scope="col"></th></tr></thead><tbody>`;
html += `<div class="analytics-section"><h3>🏆 Top 20 Longest Hops</h3><table class="data-table"><thead><tr><th scope="col">#</th><th scope="col">From</th><th scope="col">To</th><th scope="col">Distance (${distUnitLabel})</th><th scope="col">Type</th><th scope="col">SNR</th><th scope="col">Packet</th><th scope="col"></th></tr></thead><tbody>`;
const top20 = data.topHops.slice(0, 20);
top20.forEach((h, i) => {
const fromLink = h.fromPk ? `<a href="#/nodes/${encodeURIComponent(h.fromPk)}" class="analytics-link">${esc(h.fromName)}</a>` : esc(h.fromName || '?');
const toLink = h.toPk ? `<a href="#/nodes/${encodeURIComponent(h.toPk)}" class="analytics-link">${esc(h.toName)}</a>` : esc(h.toName || '?');
const bestSnr = h.bestSnr != null ? Number(h.bestSnr).toFixed(1) + ' dB' : '<span class="text-muted">—</span>';
const medianSnr = h.medianSnr != null ? Number(h.medianSnr).toFixed(1) + ' dB' : '<span class="text-muted">—</span>';
const obs = h.obsCount != null ? h.obsCount : 1;
const snr = h.snr != null ? h.snr + ' dB' : '<span class="text-muted">—</span>';
const pktLink = h.hash ? `<a href="#/packet/${encodeURIComponent(h.hash)}" class="analytics-link mono" style="font-size:0.85em">${esc(h.hash.slice(0, 12))}…</a>` : '—';
const mapBtn = h.fromPk && h.toPk ? `<button class="btn-icon dist-map-hop" data-from="${esc(h.fromPk)}" data-to="${esc(h.toPk)}" title="View on map">🗺️</button>` : '';
const tsTitle = h.timestamp ? `Best observation: ${h.timestamp}` : '';
html += `<tr title="${esc(tsTitle)}"><td>${i+1}</td><td>${fromLink}</td><td>${toLink}</td><td><strong>${formatDistance(h.dist)}</strong></td><td>${esc(h.type)}</td><td>${obs}</td><td>${bestSnr}</td><td>${medianSnr}</td><td>${pktLink}</td><td>${mapBtn}</td></tr>`;
html += `<tr><td>${i+1}</td><td>${fromLink}</td><td>${toLink}</td><td><strong>${formatDistance(h.dist)}</strong></td><td>${esc(h.type)}</td><td>${snr}</td><td>${pktLink}</td><td>${mapBtn}</td></tr>`;
});
html += `</tbody></table></div>`;
@@ -3485,7 +3448,7 @@ function destroy() { _analyticsData = {}; _channelData = null; if (_ngState && _
if (sortKey === 'severity') {
v = (SKEW_SEVERITY_ORDER[a.severity] || 9) - (SKEW_SEVERITY_ORDER[b.severity] || 9);
} else if (sortKey === 'skew') {
v = Math.abs(window.currentSkewValue(b) || 0) - Math.abs(window.currentSkewValue(a) || 0);
v = Math.abs(b.medianSkewSec || 0) - Math.abs(a.medianSkewSec || 0);
} else if (sortKey === 'name') {
v = (a.nodeName || '').localeCompare(b.nodeName || '');
} else if (sortKey === 'drift') {
@@ -3512,13 +3475,12 @@ function destroy() { _analyticsData = {}; _channelData = null; if (_ngState && _
var rowsHtml = filtered.map(function(n) {
var rowClass = 'clock-fleet-row--' + (n.severity || 'ok');
var lastAdv = n.lastObservedTS ? new Date(n.lastObservedTS * 1000).toISOString().replace('T', ' ').replace(/\.\d+Z/, ' UTC') : '—';
var skewVal = window.currentSkewValue(n);
var skewText = n.severity === 'no_clock' ? 'No Clock' : formatSkew(skewVal);
var skewText = n.severity === 'no_clock' ? 'No Clock' : formatSkew(n.medianSkewSec);
var driftText = n.severity === 'no_clock' || !n.driftPerDaySec ? '' : formatDrift(n.driftPerDaySec);
return '<tr class="' + rowClass + '" data-pubkey="' + esc(n.pubkey) + '" style="cursor:pointer">' +
'<td><strong>' + esc(n.nodeName || n.pubkey.slice(0, 12)) + '</strong></td>' +
'<td style="font-family:var(--mono,monospace)">' + skewText + '</td>' +
'<td>' + renderSkewBadge(n.severity, skewVal, n) + '</td>' +
'<td>' + renderSkewBadge(n.severity, n.medianSkewSec) + '</td>' +
'<td style="font-family:var(--mono,monospace)">' + driftText + '</td>' +
'<td style="font-size:11px">' + lastAdv + '</td>' +
'</tr>';
+1 -67
View File
@@ -10,75 +10,8 @@ function routeTypeName(n) { return ROUTE_TYPES[n] || 'UNKNOWN'; }
function payloadTypeName(n) { return PAYLOAD_TYPES[n] || 'UNKNOWN'; }
function payloadTypeColor(n) { return PAYLOAD_COLORS[n] || 'unknown'; }
function isTransportRoute(rt) { return rt === 0 || rt === 3; }
/** Byte offset of path_len in raw_hex: 5 for transport routes (4 bytes of next/last hop codes precede it), 1 otherwise. */
function getPathLenOffset(routeType) { return isTransportRoute(routeType) ? 5 : 1; }
function transportBadge(rt) { return isTransportRoute(rt) ? ' <span class="badge badge-transport" title="' + routeTypeName(rt) + '">T</span>' : ''; }
/**
* Compute breakdown byte ranges from raw_hex on the client.
* Mirrors cmd/server/decoder.go BuildBreakdown(). Used so per-observation raw_hex
* (which can differ in path length from the top-level packet) gets accurate
* highlighted byte ranges, instead of using the server-supplied breakdown
* computed once from the top-level raw_hex.
*/
function computeBreakdownRanges(hexString, routeType, payloadType) {
if (!hexString) return [];
const clean = hexString.replace(/\s+/g, '');
const bytes = clean.length / 2;
if (bytes < 2) return [];
const ranges = [];
// Header
ranges.push({ start: 0, end: 0, label: 'Header' });
let offset = 1;
if (isTransportRoute(routeType)) {
if (bytes < offset + 4) return ranges;
ranges.push({ start: offset, end: offset + 3, label: 'Transport Codes' });
offset += 4;
}
if (offset >= bytes) return ranges;
// Path Length byte
ranges.push({ start: offset, end: offset, label: 'Path Length' });
const pathByte = parseInt(clean.slice(offset * 2, offset * 2 + 2), 16);
offset += 1;
if (isNaN(pathByte)) return ranges;
const hashSize = (pathByte >> 6) + 1;
const hashCount = pathByte & 0x3F;
const pathBytes = hashSize * hashCount;
if (hashCount > 0 && offset + pathBytes <= bytes) {
ranges.push({ start: offset, end: offset + pathBytes - 1, label: 'Path' });
}
offset += pathBytes;
if (offset >= bytes) return ranges;
const payloadStart = offset;
// ADVERT (payload_type 4) gets sub-fields when full record present
if (payloadType === 4 && bytes - payloadStart >= 100) {
ranges.push({ start: payloadStart, end: payloadStart + 31, label: 'PubKey' });
ranges.push({ start: payloadStart + 32, end: payloadStart + 35, label: 'Timestamp' });
ranges.push({ start: payloadStart + 36, end: payloadStart + 99, label: 'Signature' });
const appStart = payloadStart + 100;
if (appStart < bytes) {
ranges.push({ start: appStart, end: appStart, label: 'Flags' });
const appFlags = parseInt(clean.slice(appStart * 2, appStart * 2 + 2), 16);
let fOff = appStart + 1;
if (!isNaN(appFlags)) {
if ((appFlags & 0x10) && fOff + 8 <= bytes) {
ranges.push({ start: fOff, end: fOff + 3, label: 'Latitude' });
ranges.push({ start: fOff + 4, end: fOff + 7, label: 'Longitude' });
fOff += 8;
}
if ((appFlags & 0x20) && fOff + 2 <= bytes) fOff += 2;
if ((appFlags & 0x40) && fOff + 2 <= bytes) fOff += 2;
if ((appFlags & 0x80) && fOff < bytes) {
ranges.push({ start: fOff, end: bytes - 1, label: 'Name' });
}
}
}
} else {
ranges.push({ start: payloadStart, end: bytes - 1, label: 'Payload' });
}
return ranges;
}
// --- Utilities ---
const _apiPerf = { calls: 0, totalMs: 0, log: [], cacheHits: 0 };
const _apiCache = new Map();
@@ -1094,6 +1027,7 @@ function makeColumnsResizable(tableSelector, storageKey) {
// Add resize handles
ths.forEach((th, i) => {
if (i === ths.length - 1) return;
th.style.position = 'relative';
const handle = document.createElement('div');
handle.className = 'col-resize-handle';
handle.addEventListener('mousedown', (e) => {
+17 -78
View File
@@ -393,25 +393,17 @@
}
}
// Merge user-stored keys into the channel list.
// If a stored key matches a server-known channel, mark that channel as
// userAdded so the ✕ button appears — otherwise the user has no way to
// remove a key they added but that the server already knows about.
// Merge user-stored keys into the channel list
function mergeUserChannels() {
var keys = ChannelDecrypt.getStoredKeys();
var names = Object.keys(keys);
for (var i = 0; i < names.length; i++) {
var name = names[i];
var matched = false;
for (var j = 0; j < channels.length; j++) {
var ch = channels[j];
if (ch.name === name || ch.hash === name || ch.hash === ('user:' + name)) {
ch.userAdded = true;
matched = true;
break;
}
}
if (!matched) {
// Check if channel already exists by name
var exists = channels.some(function (ch) {
return ch.name === name || ch.hash === name || ch.hash === ('user:' + name);
});
if (!exists) {
channels.push({
hash: 'user:' + name,
name: name,
@@ -757,38 +749,19 @@
e.stopPropagation();
var channelHash = removeBtn.getAttribute('data-remove-channel');
if (!channelHash) return;
// The localStorage key is the channel name. For user:-prefixed entries
// strip the prefix; for server-known channels look up the channel
// object so we use its display name (the hash itself isn't the key).
var ch = channels.find(function (c) { return c.hash === channelHash; });
var chName = channelHash.startsWith('user:')
? channelHash.substring(5)
: (ch && ch.name) || channelHash;
var chName = channelHash.startsWith('user:') ? channelHash.substring(5) : channelHash;
if (!confirm('Remove channel "' + chName + '"? This will clear saved keys and cached messages.')) return;
ChannelDecrypt.removeKey(chName);
if (channelHash.startsWith('user:')) {
// Pure user-added channel — drop from the list entirely.
channels = channels.filter(function (c) { return c.hash !== channelHash; });
if (selectedHash === channelHash) {
selectedHash = null;
messages = [];
history.replaceState(null, '', '#/channels');
var msgEl2 = document.getElementById('chMessages');
if (msgEl2) msgEl2.innerHTML = '<div class="ch-empty">Choose a channel from the sidebar to view messages</div>';
var header2 = document.getElementById('chHeader');
if (header2) header2.querySelector('.ch-header-text').textContent = 'Select a channel';
}
} else if (ch) {
// Server-known channel: keep the row, just unmark as user-added so
// the ✕ disappears until they re-add a key.
ch.userAdded = false;
// If this was the selected channel, clear decrypted messages since
// the key is gone — they can't be re-decrypted without re-adding it.
if (selectedHash === channelHash) {
messages = [];
var msgEl2 = document.getElementById('chMessages');
if (msgEl2) msgEl2.innerHTML = '<div class="ch-empty">Key removed — add a key to decrypt messages</div>';
}
// Remove from channels array
channels = channels.filter(function (c) { return c.hash !== channelHash; });
if (selectedHash === channelHash) {
selectedHash = null;
messages = [];
history.replaceState(null, '', '#/channels');
var msgEl2 = document.getElementById('chMessages');
if (msgEl2) msgEl2.innerHTML = '<div class="ch-empty">Choose a channel from the sidebar to view messages</div>';
var header2 = document.getElementById('chHeader');
if (header2) header2.querySelector('.ch-header-text').textContent = 'Select a channel';
}
renderChannelList();
return;
@@ -1192,40 +1165,6 @@
return;
}
// #811: Deep link to a `#`-named channel that's not in the loaded list.
// If a stored key matches, decrypt. Otherwise we must distinguish an
// encrypted-no-key channel (show lock) from an unencrypted channel that
// simply isn't in the toggle-off list (#825 — must fall through to REST).
if (hash.charAt(0) === '#') {
if (storedKeys[hash]) {
var keyHex2 = storedKeys[hash];
var keyBytes2 = ChannelDecrypt.hexToBytes(keyHex2);
var hashByte2 = await ChannelDecrypt.computeChannelHash(keyBytes2);
await decryptAndRender(keyHex2, hashByte2, hash);
return;
}
// #825: confirm encrypted-ness via an encrypted-included channel list
// before assuming a lock state. Conservative on error — fall through.
// Show a loading affordance so cold deep links don't display stale content
// for the duration of the metadata RTT (cached 15s thereafter).
msgEl.innerHTML = '<div class="ch-loading">Loading messages…</div>';
try {
var rpInc = RegionFilter.getRegionParam();
var paramsInc = ['includeEncrypted=true'];
if (rpInc) paramsInc.push('region=' + encodeURIComponent(rpInc));
var allCh = await api('/channels?' + paramsInc.join('&'), { ttl: CLIENT_TTL.channels });
if (isStaleMessageRequest(request)) return;
var foundCh = (allCh.channels || []).find(function (c) { return c.hash === hash; });
if (foundCh && foundCh.encrypted === true) {
msgEl.innerHTML = '<div class="ch-empty">🔒 This channel is encrypted and no decryption key is configured</div>';
return;
}
// Unencrypted (or unknown) — fall through to the REST fetch below.
} catch (e) {
// ignore — fall through to REST fetch
}
}
msgEl.innerHTML = '<div class="ch-loading">Loading messages…</div>';
try {
+1 -5
View File
@@ -81,13 +81,9 @@ window.HopDisplay = (function() {
const regionalConflicts = conflicts.filter(c => c.regional);
const badgeCount = regionalConflicts.length > 0 ? regionalConflicts.length : (globalFallback ? conflicts.length : 0);
const conflictData = escapeHtml(JSON.stringify({ h, conflicts, globalFallback }));
const conflictBadge = badgeCount > 1
const warnBadge = badgeCount > 1
? ` <button class="hop-conflict-btn" data-conflict='${conflictData}' onclick="event.preventDefault();event.stopPropagation();HopDisplay._showFromBtn(this)" title="${badgeCount} candidates — click for details">⚠${badgeCount}</button>`
: '';
const unreliableBadge = unreliable
? ' <button class="hop-unreliable-btn" aria-label="Unreliable name resolution" title="Unreliable name resolution — this hash\u2192name match is geographically inconsistent with the surrounding path hops. The repeater itself may be fine; this specific hop assignment is uncertain.">⚠️</button>'
: '';
const warnBadge = conflictBadge + unreliableBadge;
const cls = [
'hop',
+68 -138
View File
@@ -7,14 +7,6 @@ window.HopResolver = (function() {
const MAX_HOP_DIST = 1.8; // ~200km in degrees
const REGION_RADIUS_KM = 300;
// Only repeaters and room servers can appear as path hops per protocol.
// Companions/sensors originate but never relay packets.
function canAppearInPath(role) {
if (!role) return false;
var r = String(role).toLowerCase();
return r.indexOf('repeater') >= 0 || r.indexOf('room_server') >= 0 || r === 'room';
}
let prefixIdx = {}; // lowercase hex prefix → [node, ...]
let pubkeyIdx = {}; // full lowercase pubkey → node (O(1) lookup)
let nodesList = [];
@@ -48,11 +40,7 @@ window.HopResolver = (function() {
for (const n of nodesList) {
if (!n.public_key) continue;
const pk = n.public_key.toLowerCase();
// pubkeyIdx includes ALL nodes — used by resolveFromServer for
// server-confirmed full-pubkey lookups (any node type).
pubkeyIdx[pk] = n;
// prefixIdx only includes nodes that can appear as path hops.
if (!canAppearInPath(n.role)) continue;
for (let len = 1; len <= 3; len++) {
const p = pk.slice(0, len * 2);
if (!prefixIdx[p]) prefixIdx[p] = [];
@@ -84,89 +72,33 @@ window.HopResolver = (function() {
}
/**
* Pick the best candidate by scoring against BOTH prev and next resolved hops.
*
* Strategy (in priority order):
* 1. Neighbor-graph edge weight: sum of edge scores to prevPubkey + nextPubkey. Pick max.
* 2. Geographic centroid: if no candidate has graph edges, compute centroid of
* prev+next positions and pick closest candidate by haversine distance.
* 3. Single-anchor geo fallback: if only one neighbor is resolved, use it as anchor.
* 4. Original heuristic: first candidate (when no context at all).
*
* Pick the best candidate using affinity first, then geo-distance fallback.
* @param {Array} candidates - candidates with lat/lon/pubkey/name
* @param {string|null} prevPubkey - pubkey of previous resolved hop
* @param {string|null} nextPubkey - pubkey of next resolved hop
* @param {Object|null} prevPos - {lat, lon} of previous resolved hop or origin
* @param {Object|null} nextPos - {lat, lon} of next resolved hop or observer
* @param {string|null} adjacentPubkey - pubkey of the previously/next resolved hop
* @param {Object|null} anchor - {lat, lon} for geo fallback
* @param {number|null} fallbackLat - fallback anchor lat (e.g. observer)
* @param {number|null} fallbackLon - fallback anchor lon
* @returns {Object} best candidate
*/
function pickByAffinity(candidates, prevPubkey, nextPubkey, prevPos, nextPos) {
const hasGraph = Object.keys(affinityMap).length > 0;
const hasAdj = prevPubkey || nextPubkey;
// Strategy 1: neighbor-graph edge weights (sum of prev + next)
if (hasGraph && hasAdj) {
const scored = candidates.map(function(c) {
let s = 0;
if (prevPubkey) s += getAffinity(prevPubkey, c.pubkey);
if (nextPubkey) s += getAffinity(nextPubkey, c.pubkey);
return { candidate: c, edgeScore: s };
});
const withEdges = scored.filter(function(s) { return s.edgeScore > 0; });
if (withEdges.length > 0) {
withEdges.sort(function(a, b) { return b.edgeScore - a.edgeScore; });
_traceMultiCandidate(candidates, scored, withEdges[0].candidate, 'graph');
return withEdges[0].candidate;
function pickByAffinity(candidates, adjacentPubkey, anchor, fallbackLat, fallbackLon) {
// If we have affinity data and an adjacent hop, prefer neighbors
if (adjacentPubkey && Object.keys(affinityMap).length > 0) {
const withAffinity = candidates
.map(c => ({ ...c, affinity: getAffinity(adjacentPubkey, c.pubkey) }))
.filter(c => c.affinity > 0);
if (withAffinity.length > 0) {
withAffinity.sort((a, b) => b.affinity - a.affinity);
return withAffinity[0];
}
}
// Strategy 2/3: geographic — centroid of prev+next, or single anchor
let anchorLat = null, anchorLon = null, anchorCount = 0;
if (prevPos && prevPos.lat != null && prevPos.lon != null) {
anchorLat = (anchorLat || 0) + prevPos.lat;
anchorLon = (anchorLon || 0) + prevPos.lon;
anchorCount++;
// Fallback: geo-distance sort (existing behavior)
const effectiveAnchor = anchor || (fallbackLat != null ? { lat: fallbackLat, lon: fallbackLon } : null);
if (effectiveAnchor) {
candidates.sort((a, b) => dist(a.lat, a.lon, effectiveAnchor.lat, effectiveAnchor.lon) - dist(b.lat, b.lon, effectiveAnchor.lat, effectiveAnchor.lon));
}
if (nextPos && nextPos.lat != null && nextPos.lon != null) {
anchorLat = (anchorLat || 0) + nextPos.lat;
anchorLon = (anchorLon || 0) + nextPos.lon;
anchorCount++;
}
if (anchorCount > 0) {
anchorLat /= anchorCount;
anchorLon /= anchorCount;
const geoScored = candidates.map(function(c) {
const d = (c.lat != null && c.lon != null && !(c.lat === 0 && c.lon === 0))
? haversineKm(c.lat, c.lon, anchorLat, anchorLon) : 999999;
return { candidate: c, distKm: d };
});
geoScored.sort(function(a, b) { return a.distKm - b.distKm; });
_traceMultiCandidate(candidates, geoScored, geoScored[0].candidate, 'centroid');
return geoScored[0].candidate;
}
// Strategy 4: no context — return first candidate
_traceMultiCandidate(candidates, null, candidates[0], 'fallback');
return candidates[0];
}
/** Dev-mode console trace for multi-candidate picks */
function _traceMultiCandidate(candidates, scored, chosen, method) {
if (typeof console === 'undefined' || !console.debug) return;
if (candidates.length < 2) return;
try {
const prefix = candidates[0].pubkey ? candidates[0].pubkey.slice(0, 2) : '??';
const scoreSummary = scored ? scored.map(function(s) {
const pk = (s.candidate || s).pubkey || '?';
const val = s.edgeScore != null ? s.edgeScore : (s.distKm != null ? s.distKm + 'km' : '?');
return pk.slice(0, 8) + ':' + val;
}) : [];
console.debug('[hop-resolver] hash=' + prefix + ' candidates=' + candidates.length +
' scored=[' + scoreSummary.join(',') + '] chose=' + (chosen.pubkey || '?').slice(0, 8) +
' method=' + method);
} catch(e) { /* trace is best-effort */ }
}
/**
* Resolve an array of hex hop prefixes to node info.
* Returns a map: { hop: {name, pubkey, lat, lon, ambiguous, unreliable} }
@@ -237,54 +169,52 @@ window.HopResolver = (function() {
}
}
// Combined disambiguation: resolve ambiguous hops using both neighbors.
// We iterate until no more hops can be resolved (handles cascading dependencies).
const originPos = (originLat != null && originLon != null) ? { lat: originLat, lon: originLon } : null;
const observerPos = (observerLat != null && observerLon != null) ? { lat: observerLat, lon: observerLon } : null;
let changed = true;
let maxIter = hops.length + 1; // prevent infinite loops
while (changed && maxIter-- > 0) {
changed = false;
for (let i = 0; i < hops.length; i++) {
const hop = hops[i];
if (hopPositions[hop]) continue; // already resolved
const r = resolved[hop];
if (!r || !r.ambiguous) continue;
const withLoc = r.candidates.filter(c => c.lat != null && c.lon != null && !(c.lat === 0 && c.lon === 0));
if (!withLoc.length) continue;
// Find prev resolved neighbor
let prevPubkey = null, prevPos = null;
for (let j = i - 1; j >= 0; j--) {
if (hopPositions[hops[j]]) {
prevPos = hopPositions[hops[j]];
prevPubkey = resolved[hops[j]] ? resolved[hops[j]].pubkey : null;
break;
}
}
if (!prevPos && originPos) prevPos = originPos;
// Find next resolved neighbor
let nextPubkey = null, nextPos = null;
for (let j = i + 1; j < hops.length; j++) {
if (hopPositions[hops[j]]) {
nextPos = hopPositions[hops[j]];
nextPubkey = resolved[hops[j]] ? resolved[hops[j]].pubkey : null;
break;
}
}
if (!nextPos && observerPos) nextPos = observerPos;
// Skip if we have zero context (wait for a later iteration or neighbor resolution)
if (!prevPubkey && !nextPubkey && !prevPos && !nextPos) continue;
const picked = pickByAffinity(withLoc, prevPubkey, nextPubkey, prevPos, nextPos);
r.name = picked.name;
r.pubkey = picked.pubkey;
hopPositions[hop] = { lat: picked.lat, lon: picked.lon };
changed = true;
// Forward pass
let lastPos = (originLat != null && originLon != null) ? { lat: originLat, lon: originLon } : null;
let lastResolvedPubkey = null;
for (let i = 0; i < hops.length; i++) {
const hop = hops[i];
if (hopPositions[hop]) {
lastPos = hopPositions[hop];
lastResolvedPubkey = resolved[hop] ? resolved[hop].pubkey : null;
continue;
}
const r = resolved[hop];
if (!r || !r.ambiguous) continue;
const withLoc = r.candidates.filter(c => c.lat && c.lon && !(c.lat === 0 && c.lon === 0));
if (!withLoc.length) continue;
// Affinity-aware: prefer candidates that are neighbors of the previous hop
const picked = pickByAffinity(withLoc, lastResolvedPubkey, lastPos, i === hops.length - 1 ? observerLat : null, i === hops.length - 1 ? observerLon : null);
r.name = picked.name;
r.pubkey = picked.pubkey;
hopPositions[hop] = { lat: picked.lat, lon: picked.lon };
lastPos = hopPositions[hop];
lastResolvedPubkey = picked.pubkey;
}
// Backward pass
let nextPos = (observerLat != null && observerLon != null) ? { lat: observerLat, lon: observerLon } : null;
let nextResolvedPubkey = null;
for (let i = hops.length - 1; i >= 0; i--) {
const hop = hops[i];
if (hopPositions[hop]) {
nextPos = hopPositions[hop];
nextResolvedPubkey = resolved[hop] ? resolved[hop].pubkey : null;
continue;
}
const r = resolved[hop];
if (!r || !r.ambiguous) continue;
const withLoc = r.candidates.filter(c => c.lat && c.lon && !(c.lat === 0 && c.lon === 0));
if (!withLoc.length || !nextPos) continue;
// Affinity-aware: prefer candidates that are neighbors of the next hop
const picked = pickByAffinity(withLoc, nextResolvedPubkey, nextPos, null, null);
r.name = picked.name;
r.pubkey = picked.pubkey;
hopPositions[hop] = { lat: picked.lat, lon: picked.lon };
nextPos = hopPositions[hop];
nextResolvedPubkey = picked.pubkey;
}
// Sanity check: drop hops impossibly far from neighbors
@@ -346,13 +276,13 @@ window.HopResolver = (function() {
*/
function resolveFromServer(hops, resolvedPath) {
if (!hops || !resolvedPath || hops.length !== resolvedPath.length) return {};
const result = {};
for (let i = 0; i < hops.length; i++) {
const hop = hops[i];
const pubkey = resolvedPath[i];
var result = {};
for (var i = 0; i < hops.length; i++) {
var hop = hops[i];
var pubkey = resolvedPath[i];
if (!pubkey) continue; // null = unresolved, leave for client-side fallback
// O(1) lookup via pubkeyIdx built during init()
const node = pubkeyIdx[pubkey.toLowerCase()] || null;
var node = pubkeyIdx[pubkey.toLowerCase()] || null;
result[hop] = {
name: node ? node.name : pubkey.slice(0, 8),
pubkey: pubkey,
+3 -18
View File
@@ -132,7 +132,7 @@
/* ---- Node Detail Panel ---- */
.live-node-detail {
top: 64px;
top: 60px;
right: 12px;
width: 320px;
max-height: calc(100vh - 140px);
@@ -325,14 +325,11 @@
}
.live-stats-row { flex-wrap: wrap; gap: 4px; }
.live-stat-pill { font-size: 11px; padding: 2px 7px; }
.live-toggles { font-size: 10px; gap: 6px; margin-left: 0; overflow-x: auto; flex-wrap: nowrap; -webkit-overflow-scrolling: touch; width: 100%; min-width: 0; }
.live-toggles { font-size: 10px; gap: 6px; margin-left: 0; }
.live-title { font-size: 12px; letter-spacing: 1px; }
/* #203 — bottom-sheet node detail on mobile */
.live-node-detail { width: 100%; right: 0; left: 0; top: auto; bottom: 0; max-height: 60dvh; border-radius: 16px 16px 0 0; overflow-y: auto; z-index: 1050; }
.live-node-detail { width: 100%; right: 0; left: 0; top: auto; bottom: 0; max-height: 60vh; border-radius: 16px 16px 0 0; overflow-y: auto; }
.live-node-detail.hidden { transform: translateY(100%); }
/* Close button was unreachable: panel-header collapsed to 8px on mobile, panel-content
scroll area started at y=8, overlapping the button's 36px tap target (y=642) */
.live-node-detail .panel-header { min-height: 44px; }
.feed-detail-card {
position: fixed !important;
right: 0 !important;
@@ -692,18 +689,6 @@
.live-feed { bottom: 68px; }
.feed-show-btn { bottom: 68px !important; }
/* Backdrop for mobile tap-outside-to-close (#797) */
.node-detail-backdrop {
display: none;
position: absolute;
inset: 0;
z-index: 1049;
background: rgba(0, 0, 0, 0.25);
}
@media (max-width: 640px) {
.node-detail-backdrop.active { display: block; }
}
/* Mobile VCR */
@media (max-width: 640px) {
/* Mobile VCR: two-row stacked layout */
+2 -8
View File
@@ -849,7 +849,6 @@
<div class="panel-content" aria-live="polite" aria-relevant="additions" role="log"></div>
</div>
<button class="feed-show-btn hidden" id="feedShowBtn" title="Show feed">📋</button>
<div id="nodeDetailBackdrop" class="node-detail-backdrop"></div>
<div class="live-overlay live-node-detail hidden" id="liveNodeDetail">
<div class="panel-header">
<button class="panel-corner-btn" data-panel="liveNodeDetail" title="Move panel to next corner" aria-label="Move panel to next corner"></button>
@@ -1217,14 +1216,10 @@
// Node detail panel
const nodeDetailPanel = document.getElementById('liveNodeDetail');
const nodeDetailContent = document.getElementById('nodeDetailContent');
const nodeDetailBackdrop = document.getElementById('nodeDetailBackdrop');
function closeNodeDetail() {
document.getElementById('nodeDetailClose').addEventListener('click', () => {
activeNodeDetailKey = null;
nodeDetailPanel.classList.add('hidden');
nodeDetailBackdrop.classList.remove('active');
}
document.getElementById('nodeDetailClose').addEventListener('click', closeNodeDetail);
nodeDetailBackdrop.addEventListener('click', closeNodeDetail);
});
// Feed panel resize handle (#27)
const savedFeedWidth = localStorage.getItem('live-feed-width');
@@ -1456,7 +1451,6 @@
const panel = document.getElementById('liveNodeDetail');
const content = document.getElementById('nodeDetailContent');
panel.classList.remove('hidden');
document.getElementById('nodeDetailBackdrop').classList.add('active');
content.innerHTML = '<div style="padding:20px;color:var(--text-muted)">Loading…</div>';
try {
const [data, healthData] = await Promise.all([
+1 -1
View File
@@ -965,7 +965,7 @@
</dl>
<div style="margin-top:8px;clear:both;">
<a href="#/nodes/${node.public_key}" style="color:var(--accent);font-size:12px;">View Node </a>
${node.public_key ? ` · <a href="javascript:void(0)" role="button" data-show-neighbors data-pubkey="${escapeHtml(node.public_key)}" data-name="${escapeHtml(node.name || 'Unknown')}" style="color:var(--accent);font-size:12px;cursor:pointer;">Show Neighbors</a>` : ''}
${node.public_key ? ` · <a href="#" data-show-neighbors data-pubkey="${escapeHtml(node.public_key)}" data-name="${escapeHtml(node.name || 'Unknown')}" style="color:var(--accent);font-size:12px;">Show Neighbors</a>` : ''}
</div>
</div>`;
}
+56 -118
View File
@@ -286,29 +286,11 @@
if (h) h.textContent = 'Neighbors (' + data.neighbors.length + ')';
}
var html = renderNeighborTable(data.neighbors, limit);
if (limit && data.neighbors.length > limit) {
html += '<div style="margin-top:6px;text-align:right"><button class="btn-link show-all-neighbors-btn" style="font-size:12px;cursor:pointer;background:none;border:none;color:var(--accent);padding:0">Show all ' + data.neighbors.length + ' neighbors </button></div>';
} else if (!limit && data.neighbors.length > 5) {
// Collapse toggle when expanded (#855)
html += '<div style="margin-top:6px;text-align:right"><button class="btn-link collapse-neighbors-btn" style="font-size:12px;cursor:pointer;background:none;border:none;color:var(--accent);padding:0">Show fewer ▲</button></div>';
if (limit && data.neighbors.length > limit && viewAllPubkey) {
html += '<div style="margin-top:6px;text-align:right"><a href="#/nodes/' + encodeURIComponent(viewAllPubkey) + '?section=node-neighbors" style="font-size:12px">View all ' + data.neighbors.length + ' neighbors </a></div>';
}
el.innerHTML = html;
// Wire "Show all neighbors" expand button (#855)
var expandBtn = el.querySelector('.show-all-neighbors-btn');
if (expandBtn) {
expandBtn.addEventListener('click', function() {
renderNeighborData(data, containerId, 0, headerSelector, null);
});
}
// Wire collapse button (#855)
var collapseBtn = el.querySelector('.collapse-neighbors-btn');
if (collapseBtn) {
collapseBtn.addEventListener('click', function() {
renderNeighborData(data, containerId, 5, headerSelector, null);
});
}
// Initialize TableSort on neighbor table
var neighborTable = el.querySelector('.neighbor-sort-table');
if (neighborTable && window.TableSort) {
@@ -336,11 +318,8 @@
function init(app, routeParam) {
directNode = routeParam || null;
if (directNode) {
// Full-screen single node view (desktop + mobile).
// Reached via the 🔍 Details link or a deep link to #/nodes/{pubkey}.
// Row clicks use history.replaceState (no hashchange → no re-init),
// so the split-panel UX on desktop is preserved.
if (directNode && window.innerWidth <= 640) {
// Full-screen single node view (mobile only)
app.innerHTML = `<div class="node-fullscreen">
<div class="node-full-header">
<button class="detail-back-btn node-back-btn" id="nodeBackBtn" aria-label="Back to nodes"></button>
@@ -373,7 +352,7 @@
app.innerHTML = `<div class="nodes-page">
<div class="nodes-topbar">
<input type="text" class="nodes-search" id="nodeSearch" placeholder="Search by name or pubkey prefix…" aria-label="Search nodes by name or pubkey prefix">
<input type="text" class="nodes-search" id="nodeSearch" placeholder="Search nodes by name…" aria-label="Search nodes by name">
<div class="nodes-counts" id="nodeCounts"></div>
</div>
<div id="nodesRegionFilter" class="region-filter-container"></div>
@@ -559,10 +538,9 @@
</div>
<div class="node-full-card" id="node-packets">
${(() => { const validPackets = adverts.filter(p => p.hash && p.timestamp); return `
<h4>Recent Packets (${validPackets.length})</h4>
<h4>Recent Packets (${adverts.length})</h4>
<div class="node-activity-list">
${validPackets.length ? validPackets.map(p => {
${adverts.length ? adverts.map(p => {
let decoded; try { decoded = JSON.parse(p.decoded_json); } catch {}
const typeLabel = p.payload_type === 4 ? '📡 Advert' : p.payload_type === 5 ? '💬 Channel' : p.payload_type === 2 ? '✉️ DM' : '📦 Packet';
const detail = decoded?.text ? ': ' + escapeHtml(truncate(decoded.text, 50)) : decoded?.name ? ' — ' + escapeHtml(decoded.name) : '';
@@ -588,7 +566,6 @@
</div>`;
}).join('') : '<div class="text-muted">No recent packets</div>'}
</div>
`; })()}
</div>`;
// Map
@@ -651,9 +628,34 @@
headerSelector: '#fullNeighborsHeader'
});
// #690 — Clock Skew detail section (full-screen view)
loadClockSkewInto(document.getElementById('node-clock-skew'), n.public_key);
// #690 — Clock Skew detail section
(async function loadClockSkew() {
var container = document.getElementById('node-clock-skew');
if (!container) return;
try {
var cs = await api('/nodes/' + encodeURIComponent(n.public_key) + '/clock-skew', { ttl: 30000 });
if (!cs || !cs.severity) return;
container.style.display = '';
var severityColor = SKEW_SEVERITY_COLORS[cs.severity] || 'var(--text-muted)';
var severityLabel = SKEW_SEVERITY_LABELS[cs.severity] || cs.severity;
var driftHtml = cs.driftPerDaySec ? '<div style="font-size:12px;color:var(--text-muted);margin-top:2px">Drift: ' + formatDrift(cs.driftPerDaySec) + '</div>' : '';
var sparkHtml = renderSkewSparkline(cs.samples, 200, 32);
var skewDisplay = cs.severity === 'no_clock'
? '<span style="font-size:18px;font-weight:700;color:var(--text-muted)">No Clock</span>'
: '<span style="font-size:18px;font-weight:700;font-family:var(--mono)">' + formatSkew(cs.medianSkewSec) + '</span>';
container.innerHTML =
'<h4 style="margin:0 0 6px">⏰ Clock Skew</h4>' +
'<div style="display:flex;align-items:center;gap:12px;flex-wrap:wrap">' +
skewDisplay +
renderSkewBadge(cs.severity, cs.medianSkewSec) +
(cs.calibrated ? ' <span style="font-size:10px;color:var(--text-muted)" title="Observer-calibrated">✓ calibrated</span>' : '') +
'</div>' +
driftHtml +
(sparkHtml ? '<div class="skew-sparkline-wrap" style="margin-top:8px">' + sparkHtml + '<div style="font-size:10px;color:var(--text-muted)">Skew over time (' + (cs.samples || []).length + ' samples)</div></div>' : '');
} catch (e) {
// Non-fatal — section stays hidden
}
})();
// Affinity debug panel — show if debugAffinity is enabled
(function loadAffinityDebug() {
@@ -808,44 +810,7 @@
let _themeRefreshHandler = null;
let _allNodes = null; // cached full node list
let _fleetSkew = null; // cached clock skew map: pubkey → {severity, recentMedianSkewSec, medianSkewSec, ...}
/**
* Fetch per-node clock skew and render into the given container element.
* Shared between the full-screen detail page and the side panel (#813, #690).
* No-op if the container is missing, the API errors, or the response lacks severity.
*/
async function loadClockSkewInto(container, pubkey) {
if (!container) return;
try {
var cs = await api('/nodes/' + encodeURIComponent(pubkey) + '/clock-skew', { ttl: 30000 });
if (!cs || !cs.severity) return;
container.style.display = '';
var driftHtml = cs.driftPerDaySec ? '<div style="font-size:12px;color:var(--text-muted);margin-top:2px">Drift: ' + formatDrift(cs.driftPerDaySec) + '</div>' : '';
var sparkHtml = renderSkewSparkline(cs.samples, 200, 32);
var skewVal = window.currentSkewValue(cs);
var skewDisplay = cs.severity === 'no_clock'
? '<span style="font-size:18px;font-weight:700;color:var(--text-muted)">No Clock</span>'
: '<span style="font-size:18px;font-weight:700;font-family:var(--mono)">' + formatSkew(skewVal) + '</span>';
var bimodalWarning = '';
if (cs.severity === 'bimodal_clock') {
var totalRecent = cs.recentSampleCount || 0;
bimodalWarning = '<div style="font-size:12px;color:var(--status-amber-text);margin-top:4px">⚠️ ' + (cs.recentBadSampleCount || '?') + ' of last ' + (totalRecent || '?') + ' adverts had nonsense timestamps (likely RTC reset)</div>';
}
container.innerHTML =
'<h4 style="margin:0 0 6px">⏰ Clock Skew</h4>' +
'<div style="display:flex;align-items:center;gap:12px;flex-wrap:wrap">' +
skewDisplay +
renderSkewBadge(cs.severity, skewVal, cs) +
(cs.calibrated ? ' <span style="font-size:10px;color:var(--text-muted)" title="Observer-calibrated">✓ calibrated</span>' : '') +
'</div>' +
driftHtml +
(sparkHtml ? '<div class="skew-sparkline-wrap" style="margin-top:8px">' + sparkHtml + '<div style="font-size:10px;color:var(--text-muted)">Skew over time (' + (cs.samples || []).length + ' samples)</div></div>' : '') +
bimodalWarning;
} catch (e) {
// Non-fatal — section stays hidden
}
}
let _fleetSkew = null; // cached clock skew map: pubkey → {severity, medianSkewSec, ...}
/** Fetch fleet clock skew once, return map keyed by pubkey */
async function getFleetSkew() {
@@ -902,7 +867,8 @@
let filtered = _allNodes;
if (activeTab !== 'all') filtered = filtered.filter(n => (n.role || '').toLowerCase() === activeTab);
if (search) {
filtered = filtered.filter(n => window._nodesMatchesSearch(n, search));
const q = search.toLowerCase();
filtered = filtered.filter(n => (n.name || '').toLowerCase().includes(q) || (n.public_key || '').toLowerCase().includes(q));
}
if (lastHeard) {
const ms = { '1h': 3600000, '2h': 7200000, '6h': 21600000, '12h': 43200000, '24h': 86400000, '48h': 172800000, '3d': 259200000, '7d': 604800000, '14d': 1209600000, '30d': 2592000000 }[lastHeard];
@@ -1073,13 +1039,24 @@
// #630: Close button for node detail panel (important for mobile full-screen overlay)
document.getElementById('nodesRight').addEventListener('click', function(e) {
// #778/#856: Analytics link — force hashchange via replaceState + assign.
// (Details button is handled separately via .node-detail-btn click listener)
// #778: Details/Analytics links don't navigate because replaceState
// already set the hash to #/nodes/PUBKEY, so clicking <a href="#/nodes/PUBKEY">
// is a same-hash no-op. For the detail link (same page), call init()
// directly — faster than a full router teardown/rebuild cycle.
// For analytics (different page), force hashchange via replaceState + assign.
var link = e.target.closest('a.btn-primary[href^="#/nodes/"]');
if (link) {
e.preventDefault();
var href = link.getAttribute('href');
if (href.indexOf('/analytics') !== -1) {
if (href.indexOf('/analytics') === -1) {
// Detail link — re-init with the pubkey directly;
// destroy() first to clean up WS handlers, maps, listeners
destroy();
var pubkey = href.replace('#/nodes/', '').split('/')[0];
var appEl = document.getElementById('app');
init(appEl, decodeURIComponent(pubkey));
history.replaceState(null, '', href);
} else {
// Analytics link — different page, force hashchange via replaceState + assign
history.replaceState(null, '', '#/');
location.hash = href.substring(1);
@@ -1131,7 +1108,7 @@
const status = getNodeStatus(n.role || 'companion', lastSeenTime ? new Date(lastSeenTime).getTime() : 0);
const lastSeenClass = status === 'active' ? 'last-seen-active' : 'last-seen-stale';
const cs = _fleetSkew && _fleetSkew[n.public_key];
const skewBadgeHtml = cs && cs.severity && cs.severity !== 'ok' ? renderSkewBadge(cs.severity, window.currentSkewValue(cs), cs) : '';
const skewBadgeHtml = cs && cs.severity && cs.severity !== 'ok' ? renderSkewBadge(cs.severity, cs.medianSkewSec) : '';
return `<tr data-key="${n.public_key}" data-action="select" data-value="${n.public_key}" tabindex="0" role="row" class="${selectedKey === n.public_key ? 'selected' : ''}${isClaimed ? ' claimed-row' : ''}">
<td>${favStar(n.public_key, 'node-fav')}${isClaimed ? '<span class="claimed-badge" title="My Mesh">★</span> ' : ''}<strong>${n.name || '(unnamed)'}</strong>${dupNameBadge(n.name, n.public_key, dupMap)}${skewBadgeHtml}</td>
<td class="mono col-pubkey">${truncate(n.public_key, 16)}</td>
@@ -1144,19 +1121,6 @@
makeColumnsResizable('#nodesTable', 'meshcore-nodes-col-widths');
}
/**
* Navigate to the full-screen node view for `pubkey` from anywhere within
* the nodes module. Single source of navigation truth works regardless
* of current hash state (hash assignment alone is a no-op when the hash
* is already the target).
*/
function navigateToNode(pubkey) {
destroy();
var appEl = document.getElementById('app');
history.replaceState(null, '', '#/nodes/' + encodeURIComponent(pubkey));
init(appEl, pubkey);
}
async function selectNode(pubkey) {
// On mobile, navigate to full-screen node view
if (window.innerWidth <= 640) {
@@ -1203,7 +1167,7 @@
<div class="node-detail">
<div class="node-detail-name">${escapeHtml(n.name || '(unnamed)')}${dupBadge}</div>
<div class="node-detail-role">${renderNodeBadges(n, roleColor)}
<button class="btn-primary node-detail-btn" data-pubkey="${encodeURIComponent(n.public_key)}" aria-label="View details for ${escapeHtml(n.name || n.public_key)}" style="font-size:11px;padding:2px 8px;margin-left:8px;cursor:pointer">🔍 Details</button>
<a href="#/nodes/${encodeURIComponent(n.public_key)}" class="btn-primary" style="display:inline-block;text-decoration:none;font-size:11px;padding:2px 8px;margin-left:8px">🔍 Details</a>
<a href="#/nodes/${encodeURIComponent(n.public_key)}/analytics" class="btn-primary" style="display:inline-block;margin-left:4px;text-decoration:none;font-size:11px;padding:2px 8px">📊 Analytics</a>
</div>
${renderStatusExplanation(n)}
@@ -1230,8 +1194,6 @@
</dl>
</div>
<div class="node-detail-section skew-detail-section" id="node-clock-skew" style="display:none"></div>
${observers.length ? `<div class="node-detail-section">
${(() => { const regions = [...new Set(observers.map(o => o.iata).filter(Boolean))]; return regions.length ? `<div style="margin-bottom:6px;font-size:12px"><strong>Regions:</strong> ${regions.join(', ')}</div>` : ''; })()}
<h4>Heard By (${observers.length} observer${observers.length > 1 ? 's' : ''})</h4>
@@ -1254,10 +1216,9 @@
</div>
<div class="node-detail-section">
${(() => { const validPackets = adverts.filter(a => a.hash && a.timestamp); return `
<h4>Recent Packets (${validPackets.length})</h4>
<h4>Recent Packets (${adverts.length})</h4>
<div id="advertTimeline">
${validPackets.length ? validPackets.map(a => {
${adverts.length ? adverts.map(a => {
let decoded;
try { decoded = JSON.parse(a.decoded_json); } catch {}
const pType = PAYLOAD_TYPES[a.payload_type] || 'Packet';
@@ -1276,7 +1237,6 @@
</div>`;
}).join('') : '<div class="text-muted" style="padding:8px">No recent packets</div>'}
</div>
`; })()}
</div>
</div>`;
@@ -1320,14 +1280,6 @@
} catch {}
}
// Wire "Details" button via the unified navigateToNode helper
var detailBtn = panel.querySelector('.node-detail-btn');
if (detailBtn) {
detailBtn.addEventListener('click', function() {
navigateToNode(decodeURIComponent(detailBtn.getAttribute('data-pubkey')));
});
}
// Fetch neighbors for this node (condensed panel — top 5)
fetchAndRenderNeighbors(n.public_key, 'panelNeighborsContent', {
limit: 5,
@@ -1335,10 +1287,6 @@
viewAllPubkey: n.public_key
});
// #813 — Clock Skew section in side panel (mirrors full-screen view)
loadClockSkewInto(document.getElementById('node-clock-skew'), n.public_key);
// Fetch paths through this node
api('/nodes/' + encodeURIComponent(n.public_key) + '/paths', { ttl: CLIENT_TTL.nodeDetail }).then(pathData => {
const el = document.getElementById('pathsContent');
@@ -1437,14 +1385,4 @@
window._nodesRenderNodeTimestampText = renderNodeTimestampText;
window._nodesGetStatusInfo = getStatusInfo;
window._nodesGetStatusTooltip = getStatusTooltip;
// #862: Expose search filter logic for testing
window._nodesMatchesSearch = function(node, query) {
if (!query) return true;
var q = query.toLowerCase();
var isHex = /^[0-9a-f]+$/i.test(q);
if ((node.name || '').toLowerCase().includes(q)) return true;
if (isHex && (node.public_key || '').toLowerCase().startsWith(q)) return true;
return false;
};
})();
+34 -209
View File
@@ -48,7 +48,6 @@
if (filters.hash) parts.push('hash=' + encodeURIComponent(filters.hash));
if (filters.node) parts.push('node=' + encodeURIComponent(filters.node));
if (filters.observer) parts.push('observer=' + encodeURIComponent(filters.observer));
if (filters.channel) parts.push('channel=' + encodeURIComponent(filters.channel));
if (filters._filterExpr) parts.push('filter=' + encodeURIComponent(filters._filterExpr));
return parts.length ? '?' + parts.join('&') : '';
}
@@ -353,8 +352,6 @@
if (_urlNode) { filters.node = _urlNode; filters.nodeName = _urlNode.slice(0, 8); }
var _urlObserver = _initUrlParams.get('observer');
if (_urlObserver) filters.observer = _urlObserver;
var _urlChannel = _initUrlParams.get('channel');
if (_urlChannel) filters.channel = _urlChannel;
var _urlFilterExpr = _initUrlParams.get('filter');
if (_urlFilterExpr) filters._filterExpr = _urlFilterExpr;
@@ -387,9 +384,9 @@
const obs = data.observations.find(o => String(o.id) === String(obsTarget));
if (obs) {
expandedHashes.add(h);
const obsPacket = {...data.packet, observer_id: obs.observer_id, observer_name: obs.observer_name, snr: obs.snr, rssi: obs.rssi, path_json: obs.path_json, resolved_path: obs.resolved_path, direction: obs.direction, timestamp: obs.timestamp, first_seen: obs.timestamp};
const obsPacket = {...data.packet, observer_id: obs.observer_id, observer_name: obs.observer_name, snr: obs.snr, rssi: obs.rssi, path_json: obs.path_json, resolved_path: obs.resolved_path, timestamp: obs.timestamp, first_seen: obs.timestamp};
clearParsedCache(obsPacket);
selectPacket(obs.id, h, {packet: obsPacket, observations: data.observations}, obs.id);
selectPacket(obs.id, h, {packet: obsPacket, breakdown: data.breakdown, observations: data.observations}, obs.id);
} else {
selectPacket(data.packet.id, h, data);
}
@@ -519,7 +516,7 @@
if (p.decoded_json) existing.decoded_json = p.decoded_json;
// Update expanded children if this group is expanded
if (expandedHashes.has(h) && existing._children) {
existing._children.unshift(clearParsedCache({...p, _isObservation: true}));
existing._children.unshift(p);
if (existing._children.length > 200) existing._children.length = 200;
sortGroupChildren(existing);
// Invalidate row counts — child count changed, so virtual scroll
@@ -625,7 +622,6 @@
if (filters.hash) params.set('hash', filters.hash);
if (filters.node) params.set('node', filters.node);
if (filters.observer) params.set('observer', filters.observer);
if (filters.channel) params.set('channel', filters.channel);
if (groupByHash) {
params.set('groupByHash', 'true');
} else {
@@ -683,14 +679,10 @@
// Restore expanded group children (parallel fetch, Map lookup)
if (groupByHash && expandedHashes.size > 0) {
const expandedArr = [...expandedHashes];
// Fetch the full packet detail (which includes per-observation rows) for each expanded hash.
// Previously this used `/packets?hash=X&limit=20` which returned ONE aggregate row, causing
// every "child" row in the table to carry the parent packet.id instead of unique observation
// ids — so clicking any child pointed the side pane at the same aggregate. See #866.
const results = await Promise.all(expandedArr.map(hash => {
const group = hashIndex.get(hash);
if (!group) return { hash, group: null, data: null };
return api(`/packets/${hash}`)
return api(`/packets?hash=${hash}&limit=20`)
.then(data => ({ hash, group, data }))
.catch(() => ({ hash, group, data: null }));
}));
@@ -698,15 +690,7 @@
if (!group) {
expandedHashes.delete(hash);
} else if (data) {
const pkt = data.packet || group;
// Build per-observation children. Spread (pkt, obs) so obs-level fields
// (id, observer_id/name, path_json, snr/rssi, timestamp, raw_hex) override
// the aggregate. Each child's `id` is the observation id (unique per observer).
const obs = data.observations || [];
group._children = obs.length
? obs.map(o => clearParsedCache({...pkt, ...o, _isObservation: true}))
: [pkt];
group._fetchedData = { packet: pkt, observations: obs };
group._children = data.packets || [];
sortGroupChildren(group);
}
}
@@ -766,11 +750,6 @@
<button class="multi-select-trigger" id="typeTrigger" title="Filter by packet type">All Types </button>
<div class="multi-select-menu" id="typeMenu"></div>
</div>
<div class="filter-group" style="display:inline-flex;align-items:center;gap:4px">
<select id="fChannel" class="filter-select" aria-label="Filter by channel" title="Filter Channel Messages (GRP_TXT) by channel">
<option value="">All Channels</option>
</select>
</div>
</div>
<div class="filter-group">
<button class="btn ${groupByHash ? 'active' : ''}" id="fGroup" title="Collapse duplicate observations of the same packet into expandable groups">Group by Hash</button>
@@ -959,63 +938,6 @@
renderTableRows();
});
// --- Channel filter (#812) ---
// Server-side filter: /api/packets?channel=<hash>. Triggers loadPackets()
// (not just renderTableRows) so the filter applies before pagination.
const channelSel = document.getElementById('fChannel');
if (channelSel) {
if (filters.channel) {
// Pre-seed an option so the current filter shows as selected even
// before the channels list arrives. Replaced when populateChannels resolves.
const opt = document.createElement('option');
opt.value = filters.channel;
opt.textContent = filters.channel;
opt.selected = true;
channelSel.appendChild(opt);
}
api('/channels').then(data => {
const channels = (data && data.channels) || [];
// Build options via DOM API: channel names are network-supplied
// and must NOT be interpolated into innerHTML (XSS, #812).
// Sort alphabetically (case-insensitive) for predictable picker order;
// the API returns last-activity order which is unstable for a dropdown.
const sorted = channels.slice().sort((a, b) => {
const an = (a.name || a.hash || '').toLowerCase();
const bn = (b.name || b.hash || '').toLowerCase();
return an < bn ? -1 : an > bn ? 1 : 0;
});
channelSel.textContent = '';
const allOpt = document.createElement('option');
allOpt.value = '';
allOpt.textContent = 'All Channels';
channelSel.appendChild(allOpt);
let matched = false;
for (const ch of sorted) {
const v = ch.hash || ch.name || '';
if (!v) continue;
const opt = document.createElement('option');
opt.value = v;
opt.textContent = ch.name || v;
if (v === filters.channel) { opt.selected = true; matched = true; }
channelSel.appendChild(opt);
}
// If current filter isn't in the list (encrypted hash, stale, or
// race with cache), keep it as a selected option so the UI reflects state.
if (filters.channel && !matched) {
const opt = document.createElement('option');
opt.value = filters.channel;
opt.textContent = filters.channel;
opt.selected = true;
channelSel.appendChild(opt);
}
}).catch(() => {});
channelSel.addEventListener('change', (e) => {
filters.channel = e.target.value || undefined;
updatePacketsUrl();
loadPackets();
});
}
// Close multi-select menus on outside click
bindDocumentHandler('menu', 'click', (e) => {
const obsWrap = document.getElementById('observerFilterWrap');
@@ -1177,7 +1099,7 @@
const nodes = data.nodes || [];
if (nodes.length === 0) { fNodeDrop.classList.add('hidden'); fNode.setAttribute('aria-expanded', 'false'); return; }
fNodeDrop.innerHTML = nodes.map((n, i) =>
`<div class="node-filter-option" id="fNodeOpt-${i}" role="option" data-key="${n.public_key}" data-name="${escapeHtml(n.name || n.public_key.slice(0,8))}">${escapeHtml(n.name || n.public_key.slice(0,8))} <span style="color:var(--text-muted);font-size:0.8em">${n.public_key.slice(0,8)}</span></div>`
`<div class="node-filter-option" id="fNodeOpt-${i}" role="option" data-key="${n.public_key}" data-name="${escapeHtml(n.name || n.public_key.slice(0,8))}">${escapeHtml(n.name || n.public_key.slice(0,8))} <span style="color:var(--muted);font-size:0.8em">${n.public_key.slice(0,8)}</span></div>`
).join('');
fNodeDrop.classList.remove('hidden');
fNode.setAttribute('aria-expanded', 'true');
@@ -1258,9 +1180,9 @@
const child = group?._children?.find(c => String(c.id) === String(value));
if (child) {
const parentData = group._fetchedData;
const obsPacket = parentData ? {...parentData.packet, observer_id: child.observer_id, observer_name: child.observer_name, snr: child.snr, rssi: child.rssi, path_json: child.path_json, resolved_path: child.resolved_path, direction: child.direction, timestamp: child.timestamp, first_seen: child.timestamp} : child;
const obsPacket = parentData ? {...parentData.packet, observer_id: child.observer_id, observer_name: child.observer_name, snr: child.snr, rssi: child.rssi, path_json: child.path_json, resolved_path: child.resolved_path, timestamp: child.timestamp, first_seen: child.timestamp} : child;
if (parentData) { clearParsedCache(obsPacket); }
selectPacket(child.id, parentHash, {packet: obsPacket, observations: parentData?.observations}, child.id);
selectPacket(child.id, parentHash, {packet: obsPacket, breakdown: parentData?.breakdown, observations: parentData?.observations}, child.id);
}
}
else if (action === 'select-hash') pktSelectHash(value);
@@ -1809,56 +1731,19 @@
panel.innerHTML = isMobileNow ? '' : '<div class="panel-resize-handle" id="pktResizeHandle"></div>' + PANEL_CLOSE_HTML;
const content = document.createElement('div');
panel.appendChild(content);
await renderDetail(content, data, selectedObservationId);
await renderDetail(content, data);
if (!isMobileNow) initPanelResize();
} catch (e) {
panel.innerHTML = `<div class="text-muted">Error: ${e.message}</div>`;
}
}
async function renderDetail(panel, data, chosenObsId) {
async function renderDetail(panel, data) {
const pkt = data.packet;
const observations = data.observations || [];
// Per-observation rendering (issue #849):
// When opened from a packet row (no specific observer), default to first observation.
// When opened from an observation child row, use that observation.
// Clicking a different observation row in the detail re-renders with that observation.
let currentObs = null;
const targetObsId = chosenObsId || selectedObservationId;
if (targetObsId && observations.length) {
currentObs = observations.find(o => String(o.id) === String(targetObsId));
}
if (!currentObs && observations.length) {
currentObs = observations[0]; // fall back to first observation
}
// If we have a current observation, build pkt fields from it so summary is per-observation
const effectivePkt = currentObs ? clearParsedCache({...pkt, ...currentObs, _isObservation: true}) : pkt;
const decoded = getParsedDecoded(effectivePkt) || {};
const pathHops = getParsedPath(effectivePkt) || [];
// Compute breakdown ranges from the actually-rendered raw_hex (per-observation).
// Single source of truth — derived from the same bytes we display, so a
// post-#882 per-obs raw_hex with a different path length than the top-level
// packet's raw_hex still gets accurate byte highlights.
const obsRawHexForRanges = effectivePkt.raw_hex || pkt.raw_hex || '';
const ranges = obsRawHexForRanges
? computeBreakdownRanges(obsRawHexForRanges, pkt.route_type, pkt.payload_type)
: [];
// Cross-check: hop count from raw_hex path_len byte vs path_json length
const obsRawHex = effectivePkt.raw_hex || pkt.raw_hex || '';
let rawHopCount = null;
if (obsRawHex.length >= 4) {
// path_len byte position depends on route type
const plOff = getPathLenOffset(pkt.route_type);
const plByte = parseInt(obsRawHex.slice(plOff * 2, plOff * 2 + 2), 16);
if (!isNaN(plByte)) rawHopCount = plByte & 0x3F;
}
if (rawHopCount != null && pathHops.length !== rawHopCount) {
console.warn(`[CoreScope] Hop count inconsistency for packet ${pkt.hash}: path_json has ${pathHops.length} hops but raw_hex path_len has ${rawHopCount}. UI shows path_json.`);
}
const breakdown = data.breakdown || {};
const ranges = breakdown.ranges || [];
const decoded = getParsedDecoded(pkt) || {};
const pathHops = getParsedPath(pkt) || [];
// Resolve sender GPS — from packet directly, or from known node in DB
let senderLat = decoded.lat != null ? decoded.lat : (decoded.latitude || null);
@@ -1902,16 +1787,15 @@
}
// Parse hash size from path byte
const plOff = getPathLenOffset(pkt.route_type);
const rawPathByte = pkt.raw_hex ? parseInt(pkt.raw_hex.slice(plOff * 2, plOff * 2 + 2), 16) : NaN;
const rawPathByte = pkt.raw_hex ? parseInt(pkt.raw_hex.slice(2, 4), 16) : NaN;
const hashSize = (isNaN(rawPathByte) || (rawPathByte & 0x3F) === 0) ? null : ((rawPathByte >> 6) + 1);
const size = effectivePkt.raw_hex ? Math.floor(effectivePkt.raw_hex.length / 2) : (pkt.raw_hex ? Math.floor(pkt.raw_hex.length / 2) : 0);
const size = pkt.raw_hex ? Math.floor(pkt.raw_hex.length / 2) : 0;
const typeName = payloadTypeName(pkt.payload_type);
const snr = effectivePkt.snr ?? decoded.SNR ?? decoded.snr ?? null;
const rssi = effectivePkt.rssi ?? decoded.RSSI ?? decoded.rssi ?? null;
const hasRawHex = !!(effectivePkt.raw_hex || pkt.raw_hex);
const snr = pkt.snr ?? decoded.SNR ?? decoded.snr ?? null;
const rssi = pkt.rssi ?? decoded.RSSI ?? decoded.rssi ?? null;
const hasRawHex = !!pkt.raw_hex;
// Build message preview
let messageHtml = '';
@@ -1922,16 +1806,17 @@
const meta = [chLabel, hopLabel, snrLabel].filter(Boolean).join(' · ');
messageHtml = `<div class="detail-message" style="padding:12px;margin:8px 0;background:var(--card-bg);border-radius:8px;border-left:3px solid var(--accent)">
<div style="font-size:1.1em">${escapeHtml(decoded.text)}</div>
${meta ? `<div style="font-size:0.85em;color:var(--text-muted);margin-top:4px">${meta}</div>` : ''}
${meta ? `<div style="font-size:0.85em;color:var(--muted);margin-top:4px">${meta}</div>` : ''}
</div>`;
} else if (decoded.type === 'GRP_TXT' && decoded.channelHash != null) {
const hashHex = decoded.channelHashHex || decoded.channelHash.toString(16).padStart(2, '0').toUpperCase();
const statusLabel = decoded.decryptionStatus === 'no_key' ? 'no key' : 'decryption failed';
messageHtml = `<div class="detail-message" style="padding:12px;margin:8px 0;background:var(--card-bg);border-radius:8px;border-left:3px solid var(--warning, #f0ad4e)">
<div style="font-size:1.1em">🔒 Channel Hash: 0x${hashHex} <span style="color:var(--text-muted)">(${statusLabel})</span></div>
<div style="font-size:1.1em">🔒 Channel Hash: 0x${hashHex} <span style="color:var(--muted)">(${statusLabel})</span></div>
</div>`;
}
const observations = data.observations || [];
const obsCount = data.observation_count || observations.length || 1;
const uniqueObservers = new Set(observations.map(o => o.observer_id)).size;
@@ -1994,30 +1879,21 @@
? `<div class="anomaly-banner" style="background:var(--warning, #f0ad4e); color:#000; padding:8px 12px; border-radius:4px; margin-bottom:8px; font-weight:600;">⚠️ Anomaly: ${escapeHtml(decoded.anomaly)}</div>`
: '';
// Hop count display: use pathHops length (= effective observation's path_json).
// The raw_hex/path_json mismatch warning is logged above for diagnostics; the UI
// must stay self-consistent — top pill names and byte breakdown rows must agree.
const displayHopCount = pathHops.length;
const obsIndicator = currentObs && observations.length > 1
? `<span style="font-size:0.8em;color:var(--text-muted);margin-left:6px">(observation ${observations.indexOf(currentObs) + 1} of ${observations.length})</span>`
: '';
panel.innerHTML = `
${anomalyBanner}
<div class="detail-title">${hasRawHex ? `Packet Byte Breakdown (${size} bytes)` : typeName + ' Packet'}</div>
<div class="detail-hash">${pkt.hash || 'Packet #' + pkt.id}${obsIndicator}</div>
<div class="detail-hash">${pkt.hash || 'Packet #' + pkt.id}</div>
${messageHtml}
<dl class="detail-meta">
<dt>Observer</dt><dd>${obsName(effectivePkt.observer_id)}</dd>
<dt>Observer</dt><dd>${obsName(pkt.observer_id)}</dd>
<dt>Location</dt><dd>${locationHtml}</dd>
<dt>SNR / RSSI</dt><dd>${snr != null ? snr + ' dB' : '—'} / ${rssi != null ? rssi + ' dBm' : '—'}</dd>
<dt>Route Type</dt><dd>${routeTypeName(pkt.route_type)}</dd>
<dt>Payload Type</dt><dd><span class="badge badge-${payloadTypeColor(pkt.payload_type)}">${typeName}</span></dd>
${hashSize ? `<dt>Hash Size</dt><dd>${hashSize} byte${hashSize !== 1 ? 's' : ''}</dd>` : ''}
<dt>Timestamp</dt><dd>${renderTimestampCell(effectivePkt.timestamp)}</dd>
<dt>Timestamp</dt><dd>${renderTimestampCell(pkt.timestamp)}</dd>
<dt>Propagation</dt><dd>${propagationHtml}</dd>
<dt>Path</dt><dd>${displayHopCount > 0 ? `<span class="badge badge-info">${displayHopCount} hop${displayHopCount !== 1 ? 's' : ''}</span> ` + renderPath(pathHops, effectivePkt.observer_id) : ' (direct)'}</dd>
${effectivePkt.direction ? `<dt>Direction</dt><dd>${escapeHtml(effectivePkt.direction)}</dd>` : ''}
<dt>Path</dt><dd>${pathHops.length ? renderPath(pathHops, pkt.observer_id) : ''}</dd>
</dl>
<div class="detail-actions">
<button class="copy-link-btn" data-packet-hash="${pkt.hash || ''}" data-packet-id="${pkt.id}" title="Copy link to this packet">🔗 Copy Link</button>
@@ -2027,59 +1903,11 @@
</div>
${hasRawHex ? `<div class="hex-legend">${buildHexLegend(ranges)}</div>
<div class="hex-dump">${createColoredHexDump(effectivePkt.raw_hex || pkt.raw_hex, ranges)}</div>` : ''}
<div class="hex-dump">${createColoredHexDump(pkt.raw_hex, ranges)}</div>` : ''}
${hasRawHex ? buildFieldTable(effectivePkt.raw_hex ? effectivePkt : pkt, decoded, pathHops, ranges) : buildDecodedTable(decoded)}
${observations.length > 1 ? `
<div class="detail-observations" style="margin-top:16px">
<div style="font-weight:600;margin-bottom:6px">Observations (${observations.length})</div>
<table class="detail-obs-table" style="width:100%;border-collapse:collapse;font-size:0.9em">
<thead><tr style="border-bottom:1px solid var(--border)">
<th style="padding:4px 6px;text-align:left">Observer</th>
<th style="padding:4px 6px;text-align:left">Hops</th>
<th style="padding:4px 6px;text-align:left">SNR</th>
<th style="padding:4px 6px;text-align:left">RSSI</th>
<th style="padding:4px 6px;text-align:left">Time</th>
</tr></thead>
<tbody>${observations.map(o => {
const oPath = getParsedPath(o);
const isCurrent = currentObs && String(o.id) === String(currentObs.id);
return `<tr class="detail-obs-row${isCurrent ? ' observation-current' : ''}" data-obs-id="${o.id}" style="cursor:pointer;${isCurrent ? 'background:var(--accent-bg, rgba(0,122,255,0.1))' : ''}" title="Click to view this observation">
<td style="padding:4px 6px">${obsName(o.observer_id)}</td>
<td style="padding:4px 6px">${oPath.length}</td>
<td style="padding:4px 6px">${o.snr != null ? o.snr + ' dB' : '—'}</td>
<td style="padding:4px 6px">${o.rssi != null ? o.rssi + ' dBm' : '—'}</td>
<td style="padding:4px 6px">${renderTimestampCell(o.timestamp)}</td>
</tr>`;
}).join('')}</tbody>
</table>
</div>` : ''}
${observations.length > 1 ? (() => {
// Cross-observer aggregate (Option B): show longest observed path across all observers
const aggregatePath = getParsedPath(pkt) || [];
return `<div class="detail-aggregate" style="margin-top:12px;padding:10px;background:var(--card-bg);border-radius:6px;border:1px solid var(--border);font-size:0.9em">
<div style="font-weight:600;margin-bottom:4px;color:var(--text-muted)">Cross-observer aggregate</div>
<div>Longest observed path: ${aggregatePath.length ? `${aggregatePath.length} hops — ${renderPath(aggregatePath, pkt.observer_id)}` : '— (direct)'}</div>
<div style="font-size:0.8em;color:var(--text-muted);margin-top:2px">Longest path seen across all ${uniqueObservers} observer${uniqueObservers !== 1 ? 's' : ''}</div>
</div>`;
})() : ''}
${hasRawHex ? buildFieldTable(pkt, decoded, pathHops, ranges) : buildDecodedTable(decoded)}
`;
// Wire up observation row click handlers — re-render detail with clicked observation
panel.querySelectorAll('.detail-obs-row').forEach(row => {
row.addEventListener('click', () => {
const obsId = row.dataset.obsId;
selectedObservationId = obsId;
// Update URL hash to reflect selected observation (deep linking)
const pktHash = pkt.hash || pkt.id;
const obsParam = obsId ? `?obs=${obsId}` : '';
history.replaceState(null, '', `#/packets/${pktHash}${obsParam}`);
renderDetail(panel, data, obsId);
});
});
// Wire up copy link button
const copyLinkBtn = panel.querySelector('.copy-link-btn');
if (copyLinkBtn) {
@@ -2187,7 +2015,7 @@
// Transport codes come BEFORE path length for transport routes (bytes 1-4)
let off = 1;
if (isTransportRoute(pkt.route_type)) {
if (pkt.route_type === 0 || pkt.route_type === 3) {
rows += sectionRow('Transport Codes', 'section-transport');
rows += fieldRow(off, 'Next Hop', buf.slice(off * 2, (off + 2) * 2), '');
rows += fieldRow(off + 2, 'Last Hop', buf.slice((off + 2) * 2, (off + 4) * 2), '');
@@ -2202,17 +2030,14 @@
rows += fieldRow(off, 'Path Length', '0x' + (buf.slice(off * 2, off * 2 + 2) || '??'), hashCountVal === 0 ? `hash_count=0 (direct advert)` : `hash_size=${hashSizeVal} byte${hashSizeVal !== 1 ? 's' : ''}, hash_count=${hashCountVal}`);
off += 1;
// Path — render hops from path_json (what this observation reported).
// Byte offsets advance by hashSize * pathHops.length to match.
const hashSize = isNaN(pathByte0) ? 1 : ((pathByte0 >> 6) + 1);
// Path
if (pathHops.length > 0) {
rows += sectionRow('Path (' + pathHops.length + ' hops)', 'section-path');
const hashSize = isNaN(pathByte0) ? 1 : ((pathByte0 >> 6) + 1);
for (let i = 0; i < pathHops.length; i++) {
const hopOff = off + i * hashSize;
const hex = String(pathHops[i] || '').toUpperCase();
const hopHtml = HopDisplay.renderHop(hex, hopNameCache[hex]);
const hopHtml = HopDisplay.renderHop(pathHops[i], hopNameCache[pathHops[i]]);
const label = `Hop ${i}${hopHtml}`;
rows += fieldRow(hopOff, label, hex, '');
rows += fieldRow(off + i * hashSize, label, pathHops[i], '');
}
off += hashSize * pathHops.length;
}
@@ -2488,7 +2313,7 @@
renderTableRows();
return;
}
// Single fetch — gets packet + observations + path
// Single fetch — gets packet + observations + path + breakdown
try {
const data = await api(`/packets/${hash}`);
const pkt = data.packet;
+3 -18
View File
@@ -401,13 +401,12 @@
warning: 'var(--status-yellow)',
critical: 'var(--status-orange)',
absurd: 'var(--status-purple)',
bimodal_clock: 'var(--status-amber)',
no_clock: 'var(--text-muted)'
};
var SKEW_SEVERITY_LABELS = {
ok: 'OK', warning: 'Warning', critical: 'Critical', absurd: 'Absurd', bimodal_clock: 'Bimodal', no_clock: 'No Clock'
ok: 'OK', warning: 'Warning', critical: 'Critical', absurd: 'Absurd', no_clock: 'No Clock'
};
var SKEW_SEVERITY_ORDER = { no_clock: 0, bimodal_clock: 1, absurd: 2, critical: 3, warning: 4, ok: 5 };
var SKEW_SEVERITY_ORDER = { no_clock: 0, absurd: 1, critical: 2, warning: 3, ok: 4 };
window.SKEW_SEVERITY_COLORS = SKEW_SEVERITY_COLORS;
window.SKEW_SEVERITY_LABELS = SKEW_SEVERITY_LABELS;
@@ -430,27 +429,13 @@
return (secPerDay >= 0 ? '+' : '') + secPerDay.toFixed(1) + ' s/day';
};
/** Pick the skew value that drives current-health UI: prefer the
* recent-window median (#789, current health) over the all-time median
* (poisoned by historical bad samples). Falls back gracefully if the
* field isn't present (older API responses). */
window.currentSkewValue = function(cs) {
if (!cs) return null;
return cs.recentMedianSkewSec != null ? cs.recentMedianSkewSec : cs.medianSkewSec;
};
/** Render a clock skew badge HTML */
window.renderSkewBadge = function(severity, skewSec, cs) {
window.renderSkewBadge = function(severity, skewSec) {
if (!severity) return '';
var cls = 'skew-badge skew-badge--' + severity;
if (severity === 'no_clock') {
return '<span class="' + cls + '" title="Uninitialized RTC — no valid clock">🚫 No Clock</span>';
}
if (severity === 'bimodal_clock' && cs) {
var badPct = cs.goodFraction != null ? Math.round((1 - cs.goodFraction) * 100) : '?';
var label = '⏰ ' + window.formatSkew(skewSec);
return '<span class="' + cls + '" title="Clock skew: ' + window.formatSkew(skewSec) + ' (bimodal: ' + badPct + '% of recent adverts have nonsense timestamps)">' + label + '</span>';
}
var label = severity === 'ok' ? '⏰' : '⏰ ' + window.formatSkew(skewSec);
return '<span class="' + cls + '" title="Clock skew: ' + window.formatSkew(skewSec) + ' (' + (SKEW_SEVERITY_LABELS[severity] || severity) + ')">' + label + '</span>';
};
+2 -19
View File
@@ -13,9 +13,6 @@
--status-red: #ef4444;
--status-orange: #f97316;
--status-purple: #a855f7;
--status-amber: #f59e0b;
--status-amber-light: #fef3c7;
--status-amber-text: #92400e;
--role-observer: #8b5cf6;
--accent-hover: #6db3ff;
--text: #1a1a2e;
@@ -49,9 +46,6 @@
--status-red: #ef4444;
--status-orange: #f97316;
--status-purple: #a855f7;
--status-amber: #f59e0b;
--status-amber-light: #422006;
--status-amber-text: #fcd34d;
--surface-0: #0f0f23;
--surface-1: #1a1a2e;
--surface-2: #232340;
@@ -78,9 +72,6 @@
--status-red: #ef4444;
--status-orange: #f97316;
--status-purple: #a855f7;
--status-amber: #f59e0b;
--status-amber-light: #422006;
--status-amber-text: #fcd34d;
--surface-0: #0f0f23;
--surface-1: #1a1a2e;
--surface-2: #232340;
@@ -354,9 +345,6 @@ a:focus-visible, button:focus-visible, input:focus-visible, select:focus-visible
}
.detail-meta dt { color: var(--text-muted); font-size: 11px; text-transform: uppercase; letter-spacing: .3px; }
.detail-meta dd { font-weight: 500; margin-bottom: 4px; }
.observation-current { background: var(--accent-bg, rgba(0,122,255,0.1)); font-weight: 600; }
.detail-obs-row:hover { background: var(--hover-bg, rgba(255,255,255,0.05)); }
.detail-obs-table th { font-size: 0.8em; text-transform: uppercase; color: var(--text-muted); }
/* === Hex Dump === */
.hex-dump {
@@ -709,9 +697,7 @@ button.ch-item:hover .ch-remove-btn { opacity: 0.6; }
.advert-dot {
width: 10px; height: 10px; border-radius: 50%; flex-shrink: 0; margin-top: 4px;
}
/* #829: explicit color so text stays readable when inherited color matches card-bg */
.advert-info { font-size: 12px; line-height: 1.5; color: var(--text); }
.advert-info a { color: var(--accent); }
.advert-info { font-size: 12px; line-height: 1.5; }
/* === Traces Page === */
.traces-page { padding: 16px; max-width: var(--trace-max-width, 95vw); margin: 0 auto; }
@@ -1437,9 +1423,7 @@ button.ch-item.ch-item-encrypted .ch-badge { filter: grayscale(0.6); }
.hop-conflict-name { font-weight: 600; flex: 1; overflow: hidden; text-overflow: ellipsis; white-space: nowrap; }
.hop-conflict-dist { font-size: 11px; color: var(--text-muted); font-family: var(--mono); white-space: nowrap; }
.hop-conflict-pk { font-size: 10px; color: var(--text-muted); font-family: var(--mono); }
.hop-unreliable { opacity: 0.85; }
.hop-unreliable-btn { background: none; border: none; color: var(--status-yellow, #f59e0b); font-size: 13px;
cursor: help; vertical-align: middle; margin-left: 2px; padding: 0 2px; line-height: 1; }
.hop-unreliable { opacity: 0.5; text-decoration: line-through; }
.hop-global-fallback { border-bottom: 1px dashed var(--status-red); }
.hop-current { font-weight: 700 !important; color: var(--accent) !important; }
@@ -2296,7 +2280,6 @@ th.sort-active { color: var(--accent, #60a5fa); }
.skew-badge--critical { background: var(--status-orange); color: #fff; }
.skew-badge--absurd { background: var(--status-purple); color: #fff; }
.skew-badge--no_clock { background: var(--text-muted); color: #fff; }
.skew-badge--bimodal_clock { background: var(--status-amber-light); color: var(--status-amber-text); border: 1px solid var(--status-amber); }
.skew-detail-section { padding: 10px 16px; margin-bottom: 8px; }
.skew-sparkline-wrap { margin-top: 6px; }
-45
View File
@@ -1,45 +0,0 @@
# CoreScope QA artifacts
Project-specific assets for the [`qa-suite`](https://github.com/Kpa-clawbot/ai-sdlc/tree/master/skills/qa-suite) skill.
## Layout
```
qa/
├── README.md ← this file
├── plans/
│ └── <release>.md ← per-release test plans (one file per RC)
└── scripts/
└── api-contract-diff.sh ← CoreScope-tuned API contract diff
```
## How to run
```
qa staging # use the latest plans/v*-rc.md against staging
qa pr 806 # use plans/pr-806.md if it exists, else latest plans/v*-rc.md
qa v3.6.0-rc # use plans/v3.6.0-rc.md
```
The parent agent loads the qa-suite skill, which reads:
1. The plan file from `qa/plans/`
2. Bundled scripts from `qa/scripts/`
3. The reusable engine + qa-engineer persona from the skill itself
## Adding a new plan
For each release candidate, copy the latest `plans/v*-rc.md` to `plans/<new-tag>.md` and update:
- The commit-range header (`vN.M..master`)
- Any new sections for new features in the release
- The "Test data" section if new fixture types are needed
- The GO criteria (which sections are blockers)
## Adding a new script
Custom scripts go in `qa/scripts/` with `mode=auto: <script-name>` referenced from the plan. The qa-engineer subagent runs them with two args: `BASELINE_URL TARGET_URL`.
Authoring rules from the qa-suite skill:
- 4-way error classification: `curl-failed` / `parse-empty` / `shape-diff` / field-missing
- Distinguish HTTP errors from jq parse failures
- Don't silence stderr — script bugs must surface
- Exit code = number of failures
-108
View File
@@ -1,108 +0,0 @@
# Plan: v3.6.0-rc
Targets the changes between v3.5.1 and v3.6.0 candidate (~34 commits).
## Test data
The qa-engineer should pick concrete test fixtures at run time and include them in the report:
- **Pivot node pubkey**: pick the top-result from `/api/nodes?limit=20&sort=advert_count` that has `role=repeater` AND a non-zero `totalPaths` from `/api/nodes/{pk}/paths`. Used for sections 5.1, 8.1, 8.2.
- **Multi-role pubkey** (section 8.6): pick a node whose pubkey appears in BOTH `/api/observers` and `/api/nodes?role=repeater`. If none → mark 8.6 `needs-human`.
- **Sample packet hash**: `/api/packets?limit=1``.packets[0].hash`. Used for sections 3.x.
- **Channel sample**: pick a channel name from `/api/channels` (if endpoint exists) or scrape `/#channels` page.
Record every fixture used in the final report so failures are reproducible.
## Sections
### 1. Memory & Load
| # | Step | Pass criteria | Source | Mode |
|---|---|---|---|---|
| 1.1 | Container with **3 GB** limit starts on heaviest available DB | No OOM, steady state under limit. Note: 1 GB cap is unrealistic without `GOMEMLIMIT` and bounded cold-load — see #836 | #806/#836 | human |
| 1.2 | Hit `/debug/pprof/heap` after Load completes; run `pprof-snapshot.sh` | `unmarshalResolvedPath` absent from top-15 inuse_space; `Load()`-attributed inuse_space ≤ 250 MB on staging-sized DB (~1.5M obs); total heap < 1 GB | #806 | auto: pprof-snapshot.sh |
| 1.3 | Set tight `MaxLoadMemMB`, restart | Load stops gracefully at budget; server still serves `/api/stats` 200 | #790 | human |
| 1.4 | Watch `processRSSMB` (from `/api/stats`) vs procfs RSS over ingest+eviction cycles | `processRSSMB` tracks `cat /proc/$(pidof corescope)/status | awk '/VmRSS/{print $2}'` (kB → MB) within ±20% across one full eviction cycle. Note: `storeDataMB` (formerly `trackedMB`) is the in-store packet byte estimate and is expected to be a **subset** of RSS, not equal to it. | #751, #832 | human |
| 1.5 | Run 30 min under live ingest | Sawtooth heap pattern (≥1 eviction-driven dip), not monotonic ramp | #806/#807 | human |
### 2. API contract
Run `scripts/api-contract-diff.sh BASELINE_URL TARGET_URL` once. Report the script's exit code; nonzero = failures.
| # | Step | Pass criteria | Source | Mode |
|---|---|---|---|---|
| 2.1 | api-contract-diff baseline vs target | Exit code 0; all endpoints carry `resolved_path` where expected | #806 | auto: api-contract-diff.sh |
| 2.2 | WebSocket `/ws` carries `resolved_path` on broadcasts | Run JS hook in browser console: `(function(){let n=0,r=0; const W=WebSocket; window.WebSocket=function(...a){const s=new W(...a); s.addEventListener('message',e=>{n++; try{const m=JSON.parse(e.data); if(m && (m.resolved_path !== undefined || (m.observations||[]).some(o=>o.resolved_path!==undefined))) r++;}catch{}}); return s;}; window.__wsCount=()=>({n,r});})()` then navigate to `/`, wait 30s, eval `__wsCount()``r` must be ≥ 1 if `n` ≥ 1 | #806 | browser |
### 3. Decoder & hashing
| # | Step | Pass criteria | Source | Mode |
|---|---|---|---|---|
| 3.1 | Recompute content hashes for sample of recent packets vs stored | All match (hash uses payload-type bits only) | #787 | human |
| 3.2 | Inspect a TRACE packet detail panel | path_json length matches path_sz from flags byte | #732 | browser |
| 3.3 | Check `hash_size` on transport-route packet AND zero-hop advert | Correct hash_size detected | #747 | browser |
| 3.4 | Field-table column offsets for transport-route packet | Snapshot of detail panel: each field row has nonzero `offset`/`length` cells AND offsets monotonically increase | #766 | browser |
| 3.5 | Corrupt advert ingest log check | Rejected, counted, no DB entry | #794 | human |
| 3.6 | Public channel packet rendering | No empty/garbled decode | #761 | browser |
### 4. Channels (#725)
| # | Step | Pass criteria | Source | Mode |
|---|---|---|---|---|
| 4.1 | Channel list — full message history loads from DB | Past messages persist across reload | #726 | browser |
| 4.2 | Add custom channel via UI (then remove it as teardown) | Channel appears, encrypted msgs decrypt; teardown removes it cleanly. STAGING ONLY. | #733 | browser |
| 4.3 | PSK channel add + channel removal (already a self-teardown) | Both work, UI state correct after | #750 | browser |
| 4.4 | Deep link to encrypted channel without key | Lock message shows | #783 | browser |
| 4.5 | Undecryptable msgs hidden by default + toggle | Hidden default; toggle shows | #728 | browser |
| 4.6 | Add-channel button + hint + status feedback | All present | #760 | browser |
| 4.7 | Filter packets by channel | Functional: filter applies, packet count drops; performance: response time ≤ 500 ms for `/api/packets?channel=<name>&limit=100` (timed via `curl -w '%{time_total}'`) | #762/#763 | browser+auto |
### 5. Clock skew (#690)
| # | Step | Pass criteria | Source | Mode |
|---|---|---|---|---|
| 5.1 | Node detail clock-skew badge + sparkline | Both render | #746/#752 | browser |
| 5.2 | Analytics fleet clock-skew page | Renders, epoch-0 filtered | #769 | browser |
| 5.3 | Outlier sample doesn't poison median | Sanity caps respected; severity uses `recentMedianSkewSec` (#789), not all-time `medianSkewSec` | #769/#789 | human |
| 5.4 | Roles page clock-skew indicator | Renders | #752 | browser |
### 6. Observers
| # | Step | Pass criteria | Source | Mode |
|---|---|---|---|---|
| 6.1 | Observer with no packets in N days disappears after retention sweep | Removed | #764 | human |
| 6.2 | Analytics observer-graph (M1+M2) | Renders (`#observerGraph` element present at `public/analytics.js:2048-2051`) | #774 | browser |
### 7. Multi-byte hash adopters
| # | Step | Pass criteria | Source | Mode |
|---|---|---|---|---|
| 7.1 | Hash Usage Matrix collision details for all hash sizes | Click cell → colliding pubkeys shown | #758 | browser |
| 7.2 | Multi-byte adopter table includes all node types | Repeaters, room servers, sensors all present | #767 | browser |
| 7.3 | Role column reflects multi-byte adoption + advert precedence | For 3 sample multi-byte adopter pubkeys (from #758 matrix), the Role column on `/#nodes` matches the role inferred from their latest advert flags via `/api/nodes/{pk}/health` | #767 | browser |
### 8. Frontend nav & deep linking
| # | Step | Pass criteria | Source | Mode |
|---|---|---|---|---|
| 8.1 | Click node on map/list — URL hash updates, panel opens | Hash matches | #739 | browser |
| 8.2 | Open saved deep-link to a node | Full-screen detail view opens (post-#823: desktop deep links match the Details-link path) | #739/#823 | browser |
| 8.3 | Packets page filter URL hash | Reload preserves filters | #740 | browser |
| 8.4 | Details/Analytics links in node detail panel | Navigate without router glitch | #779/#785 | browser |
| 8.5 | Neighbor graph slider | Persists across reloads, default 0.7 | #776 | browser |
| 8.6 | Repeater that's also observer | Single map marker | #745 | browser |
| 8.7 | Side-panel "Recent Packets" — click any entry, lands on packet detail (no 404), entry text is readable in current theme | DB-fallback works (#827); `.advert-info` has explicit color (#829) | #827/#829 | browser |
### 9. Geofilter & customizer
| # | Step | Pass criteria | Source | Mode |
|---|---|---|---|---|
| 9.1 | Customize → "Open geofilter builder" link | Opens app-served builder | #735 | browser |
| 9.2 | Build a filter, save, reload (STAGING ONLY; teardown: delete the saved filter) | Persists across reload; teardown removes it | #735 | browser |
| 9.3 | Geofilter docs page | Renders, content matches behavior | #734 | browser |
### 10. Node blacklist
| # | Step | Pass criteria | Source | Mode |
|---|---|---|---|---|
| 10.1 | Add node pubkey to nodeBlacklist config; restart | Hidden from listings/map/neighbor graph | #742 | auto: blacklist-test.sh |
| 10.2 | Packets still in DB | Yes (filter not delete) | #742 | auto: blacklist-test.sh |
`blacklist-test.sh` covers both 10.1 and 10.2 in one run. Required env: `TEST_NODE_PUBKEY` (hex, of a real visible node on TARGET), `TARGET_SSH_HOST`, `TARGET_CONFIG_PATH`, `TARGET_CONTAINER`. Optional: `TARGET_DB_PATH` or `ADMIN_API_TOKEN` for §10.2 probe; `TARGET_SSH_KEY` (default `/root/.ssh/id_ed25519`). Mandatory teardown removes the pubkey and verifies the node returns to listings.
### 11. Deploy/ops
| # | Step | Pass criteria | Source | Mode |
|---|---|---|---|---|
| 11.1 | Force-redeploy staging | Container removed cleanly even if `docker run`, not compose. Playwright E2E `Desktop: deep link #/nodes/{pubkey} opens full-screen detail view` passes (updated #833 — was asserting old pre-#823 split-panel behavior). | fa348ef/#833 | human |
## GO criteria
- Sections 1.2, 2, 3 must all pass — release blockers
- Section 4 (channels) — any visible regression must be fixed before tag
- Other sections: file follow-up issues; decide per-item whether to tag with known issues
-134
View File
@@ -1,134 +0,0 @@
#!/usr/bin/env bash
# api-contract-diff.sh — diff CoreScope API endpoints between two deployments.
# Usage: api-contract-diff.sh BASELINE_URL TARGET_URL [-k AUTH_HEADER]
#
# Compares JSON shape (recursive key set) per endpoint and asserts presence of
# `resolved_path` where contract requires it. Prints a per-endpoint result line
# (✅/❌) and a summary. Exit code = number of failures.
#
# Distinguishes:
# curl-failed → HTTP error or network timeout (real outage)
# parse-empty → curl succeeded but response shape unexpected (probable
# contract drift in this script or in the API)
# shape-diff → recursive key set differs between baseline and target
# rp-missing → resolved_path absent on target where it was promised
#
# PUBLIC repo: do not commit URLs or keys here. Caller passes them.
set -uo pipefail
OLD="${1:-}"; NEW="${2:-}"
[[ -z "$OLD" || -z "$NEW" ]] && { echo "usage: $0 BASELINE_URL TARGET_URL [-k AUTH_HEADER]" >&2; exit 2; }
shift 2 || true
AUTH=""
while [[ $# -gt 0 ]]; do
case "$1" in
-k) AUTH="$2"; shift 2 ;;
*) echo "unknown arg: $1" >&2; exit 2 ;;
esac
done
TMP=$(mktemp -d); trap 'rm -rf "$TMP"' EXIT
# Wrapper: fetch URL, return body on stdout, exit 1 on HTTP error / timeout.
fetch() {
local url="$1" out="$2"
local code
code=$(curl -s -m 30 -o "$out" -w "%{http_code}" ${AUTH:+-H "$AUTH"} "$url" 2>/dev/null) || code="000"
if [[ "$code" != "2"* ]]; then
echo " HTTP $code"
return 1
fi
return 0
}
# Seed lookups from TARGET (so the picked IDs are guaranteed present there).
seed_packets="$TMP/seed_packets.json"
seed_observers="$TMP/seed_observers.json"
seed_nodes="$TMP/seed_nodes.json"
if ! fetch "$NEW/api/packets?limit=1" "$seed_packets"; then echo "seed /api/packets failed" >&2; fi
if ! fetch "$NEW/api/observers" "$seed_observers"; then echo "seed /api/observers failed" >&2; fi
if ! fetch "$NEW/api/nodes?limit=1" "$seed_nodes"; then echo "seed /api/nodes failed" >&2; fi
HASH=$(jq -r '.packets[0].hash // empty' "$seed_packets" 2>/dev/null || true)
OBSID=$(jq -r '.observers[0].id // empty' "$seed_observers" 2>/dev/null || true)
NODEPK=$(jq -r '.nodes[0].public_key // empty' "$seed_nodes" 2>/dev/null || true)
[[ -z "$HASH" ]] && echo "warn: no packet hash from /api/packets — packet-detail endpoints will be skipped" >&2
[[ -z "$OBSID" ]] && echo "warn: no observer id from /api/observers — observer-detail endpoints will be skipped" >&2
[[ -z "$NODEPK" ]] && echo "warn: no node pubkey from /api/nodes — node-detail endpoints will be skipped" >&2
# Endpoints to diff: path | jq filter (selects subobject to compare) | RP-required(yes/no)
declare -a ENDPOINTS
ENDPOINTS+=("/api/packets?limit=20|.packets[0]|yes")
ENDPOINTS+=("/api/packets?limit=20&expandObservations=true|.packets[0]|yes")
ENDPOINTS+=("/api/observers|.observers[0]|no")
[[ -n "$HASH" ]] && ENDPOINTS+=("/api/packets/$HASH|.|yes")
[[ -n "$OBSID" ]] && ENDPOINTS+=("/api/observers/$OBSID|.|no")
[[ -n "$OBSID" ]] && ENDPOINTS+=("/api/observers/$OBSID/analytics|.|no")
[[ -n "$NODEPK" ]] && ENDPOINTS+=("/api/nodes/$NODEPK/health|.recentPackets[0]|yes")
[[ -n "$NODEPK" ]] && ENDPOINTS+=("/api/nodes/$NODEPK/paths|.|no")
# Strip volatile fields (timestamps + counters) from a JSON value.
STRIP='walk(if type=="object" then del(.timestamp, .first_seen, .last_seen, .last_heard, .updated_at, .server_time, .packet_count, .packetsLastHour, .uptime_secs, .battery_mv, .noise_floor, .observation_count, .advert_count) else . end)'
fails=0
for ep in "${ENDPOINTS[@]}"; do
IFS='|' read -r path filter need_rp <<<"$ep"
echo "=== $path (resolved_path required: $need_rp) ==="
oldfile="$TMP/old.json"; newfile="$TMP/new.json"
if ! fetch "$OLD$path" "$oldfile"; then echo " ❌ baseline curl-failed"; fails=$((fails+1)); continue; fi
if ! fetch "$NEW$path" "$newfile"; then echo " ❌ target curl-failed"; fails=$((fails+1)); continue; fi
# Selector + strip on each side. jq stderr is preserved so script bugs surface.
oldj=$(jq "$filter | $STRIP" "$oldfile")
jq_old_rc=$?
newj=$(jq "$filter | $STRIP" "$newfile")
jq_new_rc=$?
if [[ $jq_old_rc -ne 0 ]]; then
echo " ❌ baseline jq-error (filter='$filter') — likely script bug or API shape changed"
fails=$((fails+1)); continue
fi
if [[ $jq_new_rc -ne 0 ]]; then
echo " ❌ target jq-error (filter='$filter') — likely script bug or API shape changed"
fails=$((fails+1)); continue
fi
if [[ -z "$oldj" || "$oldj" == "null" ]]; then
echo " ❌ baseline parse-empty (filter returned empty/null; check API shape)"
fails=$((fails+1)); continue
fi
if [[ -z "$newj" || "$newj" == "null" ]]; then
echo " ❌ target parse-empty (filter returned empty/null; check API shape)"
fails=$((fails+1)); continue
fi
# Recursive key-set diff. Canonicalize array indices (numbers) → "[]" so two
# different sample responses with different array lengths don't false-positive.
KEYS_FILTER='[paths(scalars or type=="null" or (type=="array" and length==0) or (type=="object" and length==0)) | map(if type=="number" then "[]" else . end) | join(".")] | unique | .[]'
oldkeys=$(echo "$oldj" | jq -r "$KEYS_FILTER" | sort -u)
newkeys=$(echo "$newj" | jq -r "$KEYS_FILTER" | sort -u)
if ! diff <(echo "$oldkeys") <(echo "$newkeys") >/dev/null; then
echo " ❌ shape-diff (key set differs):"
diff <(echo "$oldkeys") <(echo "$newkeys") | sed 's/^/ /'
fails=$((fails+1))
continue
fi
# If RP expected, assert present on target (any value, may be null).
if [[ "$need_rp" == "yes" ]]; then
if ! echo "$newj" | jq -e '.. | objects | select(has("resolved_path")) | .resolved_path' >/dev/null 2>&1; then
echo " ❌ rp-missing (resolved_path not present anywhere in selector)"
fails=$((fails+1))
continue
fi
fi
echo " ✅ ok"
done
echo
echo "failures: $fails / ${#ENDPOINTS[@]}"
exit $fails
-271
View File
@@ -1,271 +0,0 @@
#!/usr/bin/env bash
# blacklist-test.sh — verify nodeBlacklist hides a pubkey from API surface
# while retaining its packets in the DB. Implements QA plan §10.1 + §10.2.
#
# Usage:
# blacklist-test.sh BASELINE_URL TARGET_URL
#
# BASELINE_URL is currently unused for assertions but kept as a positional
# arg for parity with other qa-suite scripts (always called with two URLs).
#
# Required env (target host control + test data):
# TEST_NODE_PUBKEY — hex pubkey of a real, currently-visible node on TARGET_URL
# TARGET_SSH_HOST — e.g. runner@example
# TARGET_SSH_KEY — path to ssh private key (default: /root/.ssh/id_ed25519)
# TARGET_CONFIG_PATH — absolute path to config.json on the target
# TARGET_CONTAINER — docker container name on the target
# Optional env:
# TARGET_DB_PATH — sqlite db path on the target (for §10.2 sqlite probe)
# ADMIN_API_TOKEN — if /api/admin/transmissions exists, use it instead of ssh+sqlite
# (read from env, not argv — never appears in ps)
# CURL_TIMEOUT — per-request curl timeout, seconds (default 60)
# RESTART_WAIT_S — max wait for /api/stats after restart (default 120)
#
# Distinguishes:
# ssh-failed → cannot reach/control target
# restart-stuck → /api/stats not 200 within RESTART_WAIT_S
# hide-failed → blacklisted pubkey still surfaced via API (§10.1 fail)
# retain-failed → blacklisted pubkey absent from DB (§10.2 fail)
# teardown-failed→ post-test removal did not restore listing
#
# Exit code = number of failures (0 = pass).
# PUBLIC repo: zero PII — no real pubkeys, IPs, or hostnames as defaults.
set -uo pipefail
BASELINE_URL="${1:-}"
TARGET_URL="${2:-}"
if [[ -z "$BASELINE_URL" || -z "$TARGET_URL" ]]; then
echo "usage: $0 BASELINE_URL TARGET_URL (TEST_NODE_PUBKEY+TARGET_* via env)" >&2
exit 2
fi
TEST_PUBKEY="${TEST_NODE_PUBKEY:-}"
TARGET_SSH_HOST="${TARGET_SSH_HOST:-}"
TARGET_SSH_KEY="${TARGET_SSH_KEY:-/root/.ssh/id_ed25519}"
TARGET_CONFIG_PATH="${TARGET_CONFIG_PATH:-}"
TARGET_CONTAINER="${TARGET_CONTAINER:-}"
TARGET_DB_PATH="${TARGET_DB_PATH:-}"
ADMIN_API_TOKEN="${ADMIN_API_TOKEN:-}"
if [[ -z "$TEST_PUBKEY" || -z "$TARGET_SSH_HOST" || -z "$TARGET_CONFIG_PATH" || -z "$TARGET_CONTAINER" ]]; then
echo "error: TEST_NODE_PUBKEY, TARGET_SSH_HOST, TARGET_CONFIG_PATH, TARGET_CONTAINER are required" >&2
exit 2
fi
# Hard input validation — these strings are interpolated into remote shell/SQL.
# Pubkey must be hex (MeshCore pubkeys are hex-encoded ed25519 prefixes).
if ! [[ "$TEST_PUBKEY" =~ ^[0-9a-fA-F]+$ ]]; then
echo "error: TEST_NODE_PUBKEY must be hex (got: redacted)" >&2
exit 2
fi
# Container name must match docker's allowed chars: [a-zA-Z0-9][a-zA-Z0-9_.-]*
if ! [[ "$TARGET_CONTAINER" =~ ^[a-zA-Z0-9][a-zA-Z0-9_.-]*$ ]]; then
echo "error: TARGET_CONTAINER has illegal chars" >&2
exit 2
fi
# Config path must be an absolute, sane path (no spaces, quotes, $, ;, etc.).
if ! [[ "$TARGET_CONFIG_PATH" =~ ^/[A-Za-z0-9_./-]+$ ]]; then
echo "error: TARGET_CONFIG_PATH must be a sane absolute path" >&2
exit 2
fi
if [[ -n "$TARGET_DB_PATH" ]] && ! [[ "$TARGET_DB_PATH" =~ ^/[A-Za-z0-9_./-]+$ ]]; then
echo "error: TARGET_DB_PATH must be a sane absolute path" >&2
exit 2
fi
CURL_TIMEOUT="${CURL_TIMEOUT:-60}"
RESTART_WAIT_S="${RESTART_WAIT_S:-120}"
SSH_OPTS=(-i "$TARGET_SSH_KEY" -o StrictHostKeyChecking=accept-new -o ConnectTimeout=15 -o BatchMode=yes)
ssh_t() { ssh "${SSH_OPTS[@]}" "$TARGET_SSH_HOST" "$@"; }
TMP=$(mktemp -d)
fails=0
TEARDOWN_DONE=0
# -----------------------------------------------------------------------------
# Teardown — MANDATORY in all exit paths.
# -----------------------------------------------------------------------------
teardown() {
local rc=$?
if [[ "$TEARDOWN_DONE" == "1" ]]; then rm -rf "$TMP"; exit "$rc"; fi
TEARDOWN_DONE=1
echo "=== teardown: removing $TEST_PUBKEY from nodeBlacklist ==="
if remove_from_blacklist && restart_target && wait_for_stats; then
if node_visible; then
echo " ✅ teardown ok — node returned to listings"
else
echo " ❌ teardown-failed: node still hidden after removal"
rc=$((rc + 1))
fi
else
echo " ❌ teardown-failed: could not restore config / restart / stats"
rc=$((rc + 1))
fi
rm -rf "$TMP"
exit "$rc"
}
trap teardown EXIT INT TERM
# -----------------------------------------------------------------------------
# Helpers
# -----------------------------------------------------------------------------
fetch_code() {
local url="$1" out="$2"
curl -s -m "$CURL_TIMEOUT" -o "$out" -w "%{http_code}" "$url" 2>/dev/null || echo "000"
}
wait_for_stats() {
local deadline code
echo " waiting up to ${RESTART_WAIT_S}s for $TARGET_URL/api/stats ..."
deadline=$(( $(date +%s) + RESTART_WAIT_S ))
while (( $(date +%s) < deadline )); do
code=$(fetch_code "$TARGET_URL/api/stats" "$TMP/stats.json")
if [[ "$code" == "200" ]]; then echo " stats OK"; return 0; fi
sleep 3
done
echo " ❌ restart-stuck: /api/stats never returned 200"
return 1
}
restart_target() {
echo " restarting container $TARGET_CONTAINER ..."
# TARGET_CONTAINER is validated above; still quote defensively.
if ! ssh_t "docker restart $(printf %q "$TARGET_CONTAINER")" >/dev/null; then
echo " ❌ ssh-failed: docker restart failed"
return 1
fi
return 0
}
# Mutate config.json on target. Values pass via env (printf %q + single-quoted
# heredoc) so $TEST_PUBKEY etc. never enter the remote shell as code.
set_blacklist_state() {
local mode="$1" # add | remove
ssh_t "CFG=$(printf %q "$TARGET_CONFIG_PATH") PK=$(printf %q "$TEST_PUBKEY") MODE=$(printf %q "$mode") bash -s" <<'REMOTE'
set -euo pipefail
TMP="$(mktemp)"
trap 'rm -f "$TMP"' EXIT
if command -v jq >/dev/null; then
if [ "$MODE" = "add" ]; then
jq --arg pk "$PK" '.nodeBlacklist = ((.nodeBlacklist // []) + [$pk] | unique)' "$CFG" > "$TMP"
else
jq --arg pk "$PK" '.nodeBlacklist = ((.nodeBlacklist // []) - [$pk])' "$CFG" > "$TMP"
fi
else
python3 - "$CFG" "$PK" "$MODE" "$TMP" <<'PY'
import json, sys
cfg, pk, mode, out = sys.argv[1:]
with open(cfg) as f: d = json.load(f)
bl = list(dict.fromkeys(d.get("nodeBlacklist") or []))
if mode == "add":
if pk not in bl: bl.append(pk)
else:
bl = [x for x in bl if x != pk]
d["nodeBlacklist"] = bl
with open(out, "w") as f: json.dump(d, f, indent=2)
PY
fi
# Preserve mode and ownership; mv across same FS is atomic.
chmod --reference="$CFG" "$TMP" 2>/dev/null || true
chown --reference="$CFG" "$TMP" 2>/dev/null || true
mv "$TMP" "$CFG"
trap - EXIT
REMOTE
local rc=$?
if (( rc != 0 )); then
echo " ❌ ssh-failed: could not edit $TARGET_CONFIG_PATH ($mode)"
return 1
fi
return 0
}
add_to_blacklist() { set_blacklist_state add; }
remove_from_blacklist() { set_blacklist_state remove; }
node_visible() {
# Returns 0 if the pubkey is currently visible via API.
local code
code=$(fetch_code "$TARGET_URL/api/nodes/$TEST_PUBKEY" "$TMP/node.json")
if [[ "$code" == "200" ]]; then return 0; fi
fetch_code "$TARGET_URL/api/nodes?limit=10000" "$TMP/nodes.json" >/dev/null
if grep -qF -- "\"$TEST_PUBKEY\"" "$TMP/nodes.json" 2>/dev/null; then
return 0
fi
return 1
}
# -----------------------------------------------------------------------------
# §10.1 — hide
# -----------------------------------------------------------------------------
echo "=== §10.1 add $TEST_PUBKEY to nodeBlacklist ==="
if ! add_to_blacklist; then fails=$((fails+1)); exit "$fails"; fi
if ! restart_target; then fails=$((fails+1)); exit "$fails"; fi
if ! wait_for_stats; then fails=$((fails+1)); exit "$fails"; fi
detail_code=$(fetch_code "$TARGET_URL/api/nodes/$TEST_PUBKEY" "$TMP/detail.json")
list_code=$(fetch_code "$TARGET_URL/api/nodes?limit=10000" "$TMP/list.json")
in_list=0
if [[ "$list_code" == "200" ]] && grep -qF -- "\"$TEST_PUBKEY\"" "$TMP/list.json"; then
in_list=1
fi
if [[ "$detail_code" == "404" || "$in_list" == "0" ]]; then
echo " ✅ hide ok: detail=$detail_code in_list=$in_list"
else
echo " ❌ hide-failed: detail=$detail_code in_list=$in_list — pubkey still surfaced"
fails=$((fails+1))
fi
topo_code=$(fetch_code "$TARGET_URL/api/topology" "$TMP/topo.json")
if [[ "$topo_code" != "200" ]]; then
echo " ⚠️ /api/topology HTTP $topo_code — skipping topology assertion"
elif grep -qF -- "$TEST_PUBKEY" "$TMP/topo.json"; then
echo " ❌ hide-failed: /api/topology references blacklisted pubkey"
fails=$((fails+1))
else
echo " ✅ topology clean"
fi
# -----------------------------------------------------------------------------
# §10.2 — DB retain
# -----------------------------------------------------------------------------
echo "=== §10.2 verify packets retained in DB ==="
count=""
if [[ -n "$ADMIN_API_TOKEN" ]]; then
# Read auth header from stdin so the token never enters argv (ps-safe).
code=$(printf 'header = "Authorization: Bearer %s"\n' "$ADMIN_API_TOKEN" | \
curl -s -m "$CURL_TIMEOUT" -K - -o "$TMP/admin.json" -w "%{http_code}" \
"$TARGET_URL/api/admin/transmissions?from_node=$TEST_PUBKEY&count=1" 2>/dev/null || echo "000")
if [[ "$code" == "200" ]]; then
count=$(jq -r '.count // ((.transmissions // []) | length)' "$TMP/admin.json" 2>/dev/null || echo "")
fi
fi
if [[ -z "$count" ]]; then
if [[ -z "$TARGET_DB_PATH" ]]; then
echo " ❌ retain-failed: TARGET_DB_PATH unset and no ADMIN_API_TOKEN — cannot probe"
fails=$((fails+1))
else
# TEST_PUBKEY is hex-validated → safe to inline single-quoted in SQL.
# Container/db path also validated; printf %q for defense in depth.
q="SELECT COUNT(*) FROM transmissions WHERE from_node = '$TEST_PUBKEY';"
qq=$(printf %q "$q")
if ! count=$(ssh_t "docker exec $(printf %q "$TARGET_CONTAINER") sqlite3 $(printf %q "$TARGET_DB_PATH") $qq" 2>/dev/null); then
count=$(ssh_t "sqlite3 $(printf %q "$TARGET_DB_PATH") $qq" 2>/dev/null || echo "")
fi
fi
fi
if [[ -z "$count" ]]; then
echo " ❌ retain-failed: could not read transmissions count"
fails=$((fails+1))
elif [[ "$count" =~ ^[0-9]+$ ]] && (( count > 0 )); then
echo " ✅ DB retains $count packets from $TEST_PUBKEY"
else
echo " ❌ retain-failed: count=$count (expected > 0)"
fails=$((fails+1))
fi
echo "=== summary: $fails failure(s) before teardown ==="
# trap handles teardown + exit
exit "$fails"
+7 -343
View File
@@ -15,11 +15,6 @@ async function test(name, fn) {
results.push({ name, pass: true });
console.log(` \u2705 ${name}`);
} catch (err) {
if (err.skip) {
results.push({ name, pass: true, skipped: true });
console.log(`${name}: ${err.message}`);
return;
}
results.push({ name, pass: false, error: err.message });
console.log(` \u274c ${name}: ${err.message}`);
console.log(`\nFail-fast: stopping after first failure.`);
@@ -1706,18 +1701,18 @@ async function run() {
assert(!url.includes('node-fullscreen') || await page.$('#nodesRight:not(.empty)'), 'Split panel should be visible on desktop');
});
// Test: loading #/nodes/{pubkey} on desktop opens full-screen detail view (#823)
// Updated from #676's earlier "split panel on desktop" assertion. The Details
// link now opens the full-screen single-node view on desktop too — see PR #824.
await test('Desktop: deep link #/nodes/{pubkey} opens full-screen detail view', async () => {
// Test: loading #/nodes/{pubkey} on desktop shows split panel (#676)
await test('Desktop: deep link #/nodes/{pubkey} opens split panel, not full-screen', async () => {
await page.setViewportSize({ width: 1280, height: 800 });
await page.goto(BASE + '#/nodes', { waitUntil: 'domcontentloaded' });
await page.waitForSelector('#nodesBody tr[data-key]', { timeout: 10000 });
const pubkey = await page.$eval('#nodesBody tr[data-key]', el => el.dataset.key);
await page.goto(BASE + '#/nodes/' + encodeURIComponent(pubkey), { waitUntil: 'domcontentloaded' });
await page.waitForTimeout(500);
const hasSplitPanel = await page.$('#nodesRight:not(.empty)');
const hasFullScreen = await page.$('.node-fullscreen');
assert(hasFullScreen, 'Full-screen detail view should be open on desktop deep link (#823)');
assert(hasSplitPanel, 'Split panel should be open on desktop deep link');
assert(!hasFullScreen, 'Full-screen view should NOT appear on desktop deep link');
});
// Test: packets timeWindow deep link
@@ -1783,343 +1778,12 @@ async function run() {
}
});
// Test: Expanded group children have unique observation ids (#866)
await test('Expanded group children update detail pane per-observation', async () => {
await page.goto(`${BASE}/#/packets`, { waitUntil: 'domcontentloaded' });
// Ensure grouped mode and wide time window
await page.evaluate(() => {
localStorage.setItem('meshcore-time-window', '525600');
localStorage.setItem('meshcore-groupbyhash', 'true');
});
await page.reload({ waitUntil: 'load' });
await page.waitForSelector('table tbody tr', { timeout: 15000 });
// Find a group row with observation_count > 1 (has expand button)
const expandBtn = await page.$('table tbody tr .expand-btn, table tbody tr [data-expand]');
if (!expandBtn) {
console.log(' ️ No expandable groups found — skipping child assertion');
return;
}
// Click expand and wait for the /packets/<hash> detail API call
const [detailResp] = await Promise.all([
page.waitForResponse(resp => {
const u = new URL(resp.url(), BASE);
// Match /api/packets/<hash> but not /api/packets?... or /api/packets/observations
return /\/api\/packets\/[A-Fa-f0-9]+$/.test(u.pathname) && resp.status() === 200;
}, { timeout: 15000 }),
expandBtn.click(),
]);
assert(detailResp, 'Expected /api/packets/<hash> response on expand');
// Wait for child rows to appear
await page.waitForSelector('table tbody tr.child-row, table tbody tr[class*="child"]', { timeout: 5000 });
const childRows = await page.$$('table tbody tr.child-row, table tbody tr[class*="child"]');
if (childRows.length < 2) {
console.log(' ️ Group has < 2 children — skipping per-observation assertion');
return;
}
// Click first child row
await childRows[0].click();
await page.waitForFunction(() => {
const panel = document.getElementById('pktRight');
return panel && !panel.classList.contains('empty') && panel.textContent.trim().length > 0;
}, { timeout: 10000 });
const content1 = await page.$eval('#pktRight', el => el.textContent.trim());
const url1 = page.url();
// Click second child row
await childRows[1].click();
await page.waitForTimeout(500);
const content2 = await page.$eval('#pktRight', el => el.textContent.trim());
const url2 = page.url();
// URL should contain ?obs= with a real observation id
assert(url1.includes('obs=') || url2.includes('obs='), `URL should contain obs= parameter, got: ${url1}`);
// The two children should show different detail pane content (different observers)
// At minimum, the URL obs= values should differ
if (url1.includes('obs=') && url2.includes('obs=')) {
const obs1 = new URL(url1).hash.match(/obs=(\d+)/)?.[1];
const obs2 = new URL(url2).hash.match(/obs=(\d+)/)?.[1];
if (obs1 && obs2) {
assert(obs1 !== obs2, `Two children should have different obs ids, both got obs=${obs1}`);
}
}
// Verify obs id is NOT the aggregate packet id (the bug from #866)
const obsMatch = url2.match(/obs=(\d+)/);
if (obsMatch) {
const detailJson = await detailResp.json().catch(() => null);
if (detailJson?.packet?.id) {
const aggId = String(detailJson.packet.id);
// At least one child obs id should differ from the aggregate packet id
const obs1 = url1.match(/obs=(\d+)/)?.[1];
const obs2 = url2.match(/obs=(\d+)/)?.[1];
const allSameAsAgg = obs1 === aggId && obs2 === aggId;
assert(!allSameAsAgg, `Child obs ids should not all equal aggregate packet.id (${aggId})`);
}
}
});
// Test: per-observation raw_hex — hex pane updates when switching observations (#881)
await test('Packet detail hex pane updates per observation', async () => {
await page.goto(BASE + '#/packets', { waitUntil: 'domcontentloaded' });
await page.waitForSelector('table tbody tr', { timeout: 15000 });
await page.waitForTimeout(500);
// Try clicking packet rows to find one with multiple observations
const rows = await page.$$('table tbody tr[data-action]');
let obsRows = [];
for (let i = 0; i < Math.min(rows.length, 10); i++) {
await rows[i].click({ timeout: 3000 }).catch(() => null);
await page.waitForTimeout(600);
obsRows = await page.$$('.detail-obs-row');
if (obsRows.length >= 2) break;
}
if (obsRows.length < 2) {
console.log(' ⏭ Skipped: no packet with ≥2 observations found in first 10 rows');
return;
}
// Click first observation, capture hex dump
await obsRows[0].click({ timeout: 5000 });
await page.waitForTimeout(500);
const hex1 = await page.$eval('.hex-dump', el => el.textContent).catch(() => '');
// Click second observation, capture hex dump
await obsRows[1].click({ timeout: 5000 });
await page.waitForTimeout(500);
const hex2 = await page.$eval('.hex-dump', el => el.textContent).catch(() => '');
// If both have content and differ, the feature works
if (hex1 && hex2 && hex1 !== hex2) {
console.log(' ✓ Hex pane content differs between observations');
} else if (hex1 && hex2 && hex1 === hex2) {
console.log(' ⏭ Hex same for both observations (likely historical NULL raw_hex — OK)');
} else {
console.log(' ⏭ Could not capture hex content from both observations');
}
});
// Test: path pill (top) and byte breakdown (bottom) agree on hop count
// Regression for visual mismatch where badge said "1 hop" but path text listed N names
await test('Packet detail path pill and byte breakdown agree on hop count', async () => {
await page.goto(BASE + '#/packets', { waitUntil: 'domcontentloaded' });
await page.waitForSelector('table tbody tr', { timeout: 15000 });
await page.waitForTimeout(500);
// Click rows until we find one whose detail pane renders a multi-hop path
const rows = await page.$$('table tbody tr[data-action]');
let found = false;
for (let i = 0; i < Math.min(rows.length, 15); i++) {
await rows[i].click({ timeout: 3000 }).catch(() => null);
await page.waitForTimeout(500);
const result = await page.evaluate(() => {
// Path pill: <dt>Path</dt><dd><span class="badge ...">N hops</span> ...names...</dd>
const dts = document.querySelectorAll('dl.detail-meta dt');
let pillBadgeCount = null;
let pillNameCount = null;
for (const dt of dts) {
if (dt.textContent.trim() === 'Path') {
const dd = dt.nextElementSibling;
if (!dd) break;
const badge = dd.querySelector('.badge');
if (badge) {
const m = badge.textContent.match(/(\d+)\s*hop/);
if (m) pillBadgeCount = parseInt(m[1], 10);
}
// Count rendered hop links/spans (HopDisplay.renderHop output)
const hops = dd.querySelectorAll('.hop-link, [data-hop-link], .hop-named, .hop-anonymous');
pillNameCount = hops.length;
break;
}
}
// Byte breakdown: section row "Path (N hops)" + N "Hop X — ..." rows
let breakdownSectionCount = null;
let breakdownRowCount = 0;
const fieldTable = document.querySelector('table.field-table');
if (fieldTable) {
for (const tr of fieldTable.querySelectorAll('tr')) {
const txt = tr.textContent.trim();
const sec = txt.match(/^Path\s*\((\d+)\s*hops?\)/);
if (sec) breakdownSectionCount = parseInt(sec[1], 10);
if (/^\s*\d+\s*Hop\s+\d+\s*—/.test(txt) || /^Hop\s+\d+\s*—/.test(txt.replace(/^\d+/, '').trim())) {
breakdownRowCount++;
}
}
}
return { pillBadgeCount, pillNameCount, breakdownSectionCount, breakdownRowCount };
});
if (result.pillBadgeCount && result.pillBadgeCount > 0 && result.breakdownSectionCount != null) {
found = true;
// Top badge count must equal bottom section count
assert(result.pillBadgeCount === result.breakdownSectionCount,
`Path pill badge says ${result.pillBadgeCount} hops but byte breakdown says ${result.breakdownSectionCount} hops`);
// Number of rendered hop names in pill should also match (within 1, since renderPath may add separators)
if (result.pillNameCount != null && result.pillNameCount > 0) {
assert(Math.abs(result.pillNameCount - result.pillBadgeCount) <= 1,
`Path pill badge ${result.pillBadgeCount} but rendered ${result.pillNameCount} hop names`);
}
// And breakdown rendered rows should match its own section count
assert(result.breakdownRowCount > 0,
'breakdown rows selector matched nothing — selector or DOM changed');
assert(result.breakdownRowCount === result.breakdownSectionCount,
`Byte breakdown section says ${result.breakdownSectionCount} hops but rendered ${result.breakdownRowCount} hop rows`);
console.log(` ✓ Path pill (${result.pillBadgeCount}) and byte breakdown (${result.breakdownSectionCount}) agree`);
break;
}
}
if (!found) {
if (process.env.E2E_REQUIRE_PATH_TEST === '1') {
throw new Error('BLOCKED — no multi-hop packet found in first 15 rows (E2E_REQUIRE_PATH_TEST=1 requires it)');
}
const skipErr = new Error('SKIP: No multi-hop packet with byte breakdown found in first 15 rows — needs fixture');
skipErr.skip = true;
throw skipErr;
}
});
// Test: hex-strip color spans match the labeled byte rows (per-obs raw_hex).
// Regression #891: server-supplied breakdown was computed once from top-level
// raw_hex, so per-observation rendering had off-by-N highlights vs the labels.
await test('Packet detail hex strip Path range matches hop row count', async () => {
await page.goto(BASE + '#/packets', { waitUntil: 'domcontentloaded' });
await page.waitForSelector('table tbody tr', { timeout: 15000 });
await page.waitForTimeout(500);
const rows = await page.$$('table tbody tr[data-action]');
let checked = 0;
for (let i = 0; i < Math.min(rows.length, 25) && checked < 3; i++) {
await rows[i].click({ timeout: 3000 }).catch(() => null);
await page.waitForTimeout(400);
const result = await page.evaluate(() => {
const dump = document.querySelector('.hex-dump');
const fieldTable = document.querySelector('table.field-table');
if (!dump || !fieldTable) return null;
const pathSpan = dump.querySelector('span.hex-byte.hex-path');
const pathBytes = pathSpan ? pathSpan.textContent.trim().split(/\s+/).filter(Boolean).length : 0;
const hopRows = [];
for (const tr of fieldTable.querySelectorAll('tr')) {
const cells = [...tr.cells].map(c => c.textContent.trim());
if (cells.length >= 2 && /^Hop\s+\d+/.test(cells[1])) hopRows.push(cells[2]);
}
return { pathBytes, hopRows };
});
if (!result || (result.pathBytes === 0 && result.hopRows.length === 0)) continue;
checked++;
// Either both zero, or the count of bytes inside hex-path == hop rows.
// (For multi-byte hash sizes this is bytes-per-hop * hops; for hash_size=1 it's just hops.)
// The simpler invariant: if there are hop rows, hex-path span must exist and have at least
// as many bytes as there are hops (== exactly hops * hash_size).
assert(result.hopRows.length > 0,
`row ${i}: hex-path span has ${result.pathBytes} bytes but no hop rows in the labeled table`);
assert(result.pathBytes >= result.hopRows.length,
`row ${i}: hex-path has ${result.pathBytes} bytes but ${result.hopRows.length} hop rows — strip and labels disagree`);
assert(result.pathBytes % result.hopRows.length === 0,
`row ${i}: hex-path has ${result.pathBytes} bytes but ${result.hopRows.length} hop rows — bytes/hops not divisible (hash_size violated)`);
console.log(` ✓ row ${i}: hex-path ${result.pathBytes} bytes / ${result.hopRows.length} hop rows (hash_size=${result.pathBytes / result.hopRows.length})`);
}
if (checked === 0) {
const skipErr = new Error('SKIP: no packet with rendered hex strip + hop rows found in first 25 rows');
skipErr.skip = true;
throw skipErr;
}
});
// Test: clicking a different observation row re-renders strip + breakdown consistently.
// Regression: observations of the same packet hash have different raw_hex (#882),
// so picking a different obs must recompute the byte ranges, not reuse the old ones.
await test('Packet detail switches consistently across observations', async () => {
await page.goto(BASE + '#/packets?groupByHash=1', { waitUntil: 'domcontentloaded' });
await page.waitForSelector('table tbody tr', { timeout: 15000 });
await page.waitForTimeout(500);
let opened = false;
const groupRows = await page.$$('table tbody tr[data-action]');
for (let i = 0; i < Math.min(groupRows.length, 10); i++) {
await groupRows[i].click({ timeout: 3000 }).catch(() => null);
await page.waitForTimeout(400);
const obsCount = await page.evaluate(() => {
return document.querySelectorAll('table.observations-table tbody tr, .obs-row').length;
});
if (obsCount >= 2) { opened = true; break; }
}
if (!opened) {
const skipErr = new Error('SKIP: no multi-observation packet found in first 10 group rows');
skipErr.skip = true;
throw skipErr;
}
async function snapshot() {
return page.evaluate(() => {
const dump = document.querySelector('.hex-dump');
const fieldTable = document.querySelector('table.field-table');
if (!dump || !fieldTable) return null;
const pathSpan = dump.querySelector('span.hex-byte.hex-path');
const pathBytes = pathSpan ? pathSpan.textContent.trim().split(/\s+/).filter(Boolean).length : 0;
const hopRows = [];
for (const tr of fieldTable.querySelectorAll('tr')) {
const cells = [...tr.cells].map(c => c.textContent.trim());
if (cells.length >= 2 && /^Hop\s+\d+/.test(cells[1])) hopRows.push(cells[2]);
}
const rawHexParts = [...dump.querySelectorAll('span.hex-byte')].map(s => s.textContent.trim());
return { pathBytes, hopCount: hopRows.length, rawHexJoined: rawHexParts.join('|') };
});
}
const snapA = await snapshot();
assert(snapA, 'first snapshot must have hex dump + field table');
assert(snapA.hopCount === 0 || snapA.pathBytes >= snapA.hopCount,
`obs A inconsistent: hex-path ${snapA.pathBytes} bytes vs ${snapA.hopCount} hop rows`);
const switched = await page.evaluate(() => {
const obsRows = [...document.querySelectorAll('table.observations-table tbody tr, .obs-row')];
if (obsRows.length < 2) return false;
obsRows[1].click();
return true;
});
assert(switched, 'should click second observation row');
await page.waitForTimeout(500);
const snapB = await snapshot();
assert(snapB, 'second snapshot must have hex dump + field table');
assert(snapB.hopCount === 0 || snapB.pathBytes >= snapB.hopCount,
`obs B inconsistent: hex-path ${snapB.pathBytes} bytes vs ${snapB.hopCount} hop rows`);
console.log(` ✓ obs A: ${snapA.pathBytes} path bytes / ${snapA.hopCount} hops; obs B: ${snapB.pathBytes} / ${snapB.hopCount}`);
});
// Test: clicking the 🔍 Details button in the nodes side panel navigates to
// the full-screen node detail view. Regression: hash already === target,
// so location.hash assignment was a no-op and the panel stayed open.
await test('Nodes side panel Details button opens full-screen view', async () => {
await page.goto(BASE + '#/nodes', { waitUntil: 'domcontentloaded' });
await page.waitForSelector('table tbody tr[data-action]', { timeout: 15000 });
await page.waitForTimeout(500);
// Open side panel
await page.click('table tbody tr[data-action]');
await page.waitForSelector('#nodesRight .node-detail-btn', { timeout: 5000 });
// Click Details
await page.click('#nodesRight .node-detail-btn');
// Wait for full-screen view to appear
await page.waitForSelector('.node-fullscreen', { timeout: 5000 });
const isFullScreen = await page.evaluate(() => !!document.querySelector('.node-fullscreen'));
assert(isFullScreen, 'Details button should open full-screen node view');
});
await browser.close();
// Summary
const skipped = results.filter(r => r.skipped).length;
const passed = results.filter(r => r.pass && !r.skipped).length;
const passed = results.filter(r => r.pass).length;
const failed = results.filter(r => !r.pass).length;
console.log(`\n${passed}/${results.length} tests passed${skipped ? `, ${skipped} skipped` : ''}${failed ? `, ${failed} failed` : ''}`);
console.log(`\n${passed}/${results.length} tests passed${failed ? `, ${failed} failed` : ''}`);
process.exit(failed > 0 ? 1 : 0);
}
-755
View File
@@ -222,10 +222,6 @@ console.log('\n=== app.js: routeTypeName / payloadTypeName ===');
test('payloadTypeName(4) = Advert', () => assert.strictEqual(ctx.payloadTypeName(4), 'Advert'));
test('payloadTypeName(2) = Direct Msg', () => assert.strictEqual(ctx.payloadTypeName(2), 'Direct Msg'));
test('payloadTypeName(99) = UNKNOWN', () => assert.strictEqual(ctx.payloadTypeName(99), 'UNKNOWN'));
test('getPathLenOffset: transport route (0) → 5', () => assert.strictEqual(ctx.getPathLenOffset(0), 5));
test('getPathLenOffset: transport route (3) → 5', () => assert.strictEqual(ctx.getPathLenOffset(3), 5));
test('getPathLenOffset: flood route (1) → 1', () => assert.strictEqual(ctx.getPathLenOffset(1), 1));
test('getPathLenOffset: direct route (2) → 1', () => assert.strictEqual(ctx.getPathLenOffset(2), 1));
}
console.log('\n=== app.js: truncate ===');
@@ -690,88 +686,6 @@ console.log('\n=== haversineKm (hop-resolver.js) ===');
});
}
// ===== pickByAffinity — neighbor-graph + centroid scoring (#874) =====
console.log('\n=== pickByAffinity neighbor-graph scoring (#874) ===');
{
const ctx = makeSandbox();
ctx.IATA_COORDS_GEO = {};
loadInCtx(ctx, 'public/hop-resolver.js');
const HR = ctx.window.HopResolver;
// Two nodes sharing prefix "ab", hundreds of km apart.
// NodeSF is near San Francisco, NodeDEN is near Denver.
const nodeSF = { public_key: 'ab11111111111111', name: 'NodeSF', lat: 37.7, lon: -122.4 };
const nodeDEN = { public_key: 'ab22222222222222', name: 'NodeDEN', lat: 39.7, lon: -104.9 };
// A known neighbor of NodeSF (in the graph)
const nodeNeighbor = { public_key: 'cc33333333333333', name: 'SFNeighbor', lat: 37.8, lon: -122.3 };
// Another known node near Denver
const nodeDenNeighbor = { public_key: 'dd44444444444444', name: 'DENNeighbor', lat: 39.8, lon: -105.0 };
test('#874: graph edge scoring picks correct regional candidate (SF)', () => {
HR.init([nodeSF, nodeDEN, nodeNeighbor, nodeDenNeighbor]);
HR.setAffinity({ edges: [
{ source: 'cc33333333333333', target: 'ab11111111111111', weight: 5 },
{ source: 'dd44444444444444', target: 'ab22222222222222', weight: 5 },
]});
// Path: SFNeighbor → [ab??] → DENNeighbor
// With graph edges, ab11 (NodeSF) has edge to SFNeighbor, ab22 (NodeDEN) has edge to DENNeighbor
// Prev=SFNeighbor, Next=DENNeighbor → both have score 5, but SFNeighbor edge only to ab11
const result = HR.resolve(['cc', 'ab', 'dd'],
null, null, null, null);
assert.strictEqual(result['ab'].name, 'NodeSF',
'Should pick NodeSF because it has a graph edge to prev hop SFNeighbor');
});
test('#874: graph edge scoring — next hop breaks tie', () => {
HR.init([nodeSF, nodeDEN, nodeNeighbor, nodeDenNeighbor]);
HR.setAffinity({ edges: [
{ source: 'dd44444444444444', target: 'ab22222222222222', weight: 8 },
// No edge from SFNeighbor to either ab node
]});
// Path: SFNeighbor → [ab??] → DENNeighbor
// Only ab22 (NodeDEN) has edge to DENNeighbor (next hop)
const result = HR.resolve(['cc', 'ab', 'dd'],
null, null, null, null);
assert.strictEqual(result['ab'].name, 'NodeDEN',
'Should pick NodeDEN because it has graph edge to next hop DENNeighbor');
});
test('#874: centroid fallback when no graph edges exist', () => {
HR.init([nodeSF, nodeDEN, nodeNeighbor]);
HR.setAffinity({ edges: [] }); // no edges at all
// Path: SFNeighbor → [ab??]
// SFNeighbor is at (37.8, -122.3), centroid is just that point
// NodeSF (37.7, -122.4) is ~14km away, NodeDEN (39.7, -104.9) is ~1500km away
const result = HR.resolve(['cc', 'ab'],
null, null, null, null);
assert.strictEqual(result['ab'].name, 'NodeSF',
'Should pick NodeSF via centroid proximity to SFNeighbor');
});
test('#874: centroid uses average of prev+next positions', () => {
// Prev near SF, next near Denver → centroid is midpoint (~Nevada)
// NodeDEN is closer to Nevada midpoint than NodeSF
const nodeMid = { public_key: 'ee55555555555555', name: 'MidNode', lat: 38.5, lon: -114.0 };
HR.init([nodeSF, nodeDEN, nodeNeighbor, nodeDenNeighbor, nodeMid]);
HR.setAffinity({ edges: [] });
// Path: SFNeighbor → [ab??] → DENNeighbor
// centroid = avg(37.8,-122.3, 39.8,-105.0) = (38.8, -113.65) — closer to Denver
const result = HR.resolve(['cc', 'ab', 'dd'],
null, null, null, null);
assert.strictEqual(result['ab'].name, 'NodeDEN',
'Should pick NodeDEN because centroid of SF+Denver neighbors is closer to Denver');
});
test('#874: fallback when no context at all', () => {
HR.init([nodeSF, nodeDEN]);
HR.setAffinity({ edges: [] });
// Single ambiguous hop, no origin/observer, no neighbors
const result = HR.resolve(['ab'], null, null, null, null);
assert.ok(result['ab'].ambiguous || result['ab'].name != null,
'Should resolve to some candidate without crashing');
});
}
// ===== SNR/RSSI Number casting =====
{
// These test the pattern used in observer-detail.js, home.js, traces.js, live.js
@@ -1804,128 +1718,6 @@ console.log('\n=== app.js: formatEngineBadge ===');
});
}
// ===== APP.JS: computeBreakdownRanges =====
console.log('\n=== app.js: computeBreakdownRanges ===');
{
const ctx = makeSandbox();
loadInCtx(ctx, 'public/roles.js');
loadInCtx(ctx, 'public/app.js');
const computeBreakdownRanges = ctx.computeBreakdownRanges;
function findRange(ranges, label) {
return ranges.find(r => r.label === label);
}
test('returns [] for empty hex', () => {
assert.deepEqual(computeBreakdownRanges('', 1, 5), []);
});
test('returns [] for too-short hex (< 2 bytes)', () => {
assert.deepEqual(computeBreakdownRanges('15', 1, 5), []);
});
test('FLOOD non-transport: 4-hop hash_size=1', () => {
// header=15, plb=04 → hash_size=1, hash_count=4
// bytes: 15 04 90 FA F9 10 6E 01 D9
const r = computeBreakdownRanges('150490FAF910 6E01D9'.replace(/\s/g,''), 1, 5);
assert.deepEqual(findRange(r, 'Header'), { start: 0, end: 0, label: 'Header' });
assert.deepEqual(findRange(r, 'Path Length'), { start: 1, end: 1, label: 'Path Length' });
assert.deepEqual(findRange(r, 'Path'), { start: 2, end: 5, label: 'Path' });
assert.deepEqual(findRange(r, 'Payload'), { start: 6, end: 8, label: 'Payload' });
assert.strictEqual(findRange(r, 'Transport Codes'), undefined);
});
test('FLOOD non-transport: 7-hop hash_size=1', () => {
// header=15, plb=07
const hex = '15077f6d7d1cadeca33988fd95e0851ebf01ea12e1879e';
const r = computeBreakdownRanges(hex, 1, 5);
assert.deepEqual(findRange(r, 'Path'), { start: 2, end: 8, label: 'Path' });
const payload = findRange(r, 'Payload');
assert.strictEqual(payload.start, 9, 'payload starts after the 7 path bytes');
});
test('FLOOD non-transport: 8-hop hash_size=1', () => {
const hex = '1508' + '11223344556677AA' + 'BBCCDD';
const r = computeBreakdownRanges(hex, 1, 5);
assert.deepEqual(findRange(r, 'Path'), { start: 2, end: 9, label: 'Path' });
assert.deepEqual(findRange(r, 'Payload'), { start: 10, end: 12, label: 'Payload' });
});
test('Direct advert: 0-hop, no Path range', () => {
// plb=00 → 0 hops; expect Path Length but NO Path range
const r = computeBreakdownRanges('1100AABBCCDD', 1, 4);
assert.deepEqual(findRange(r, 'Path Length'), { start: 1, end: 1, label: 'Path Length' });
assert.strictEqual(findRange(r, 'Path'), undefined);
});
test('Transport route shifts path-length offset by 4', () => {
// route_type=0 (TRANSPORT_FLOOD): bytes 1..4 are Transport Codes
// header=14, transport=AABBCCDD, plb=02, hops=11 22, payload=99
const hex = '14AABBCCDD021122' + '99';
const r = computeBreakdownRanges(hex, 0, 5);
assert.deepEqual(findRange(r, 'Transport Codes'), { start: 1, end: 4, label: 'Transport Codes' });
assert.deepEqual(findRange(r, 'Path Length'), { start: 5, end: 5, label: 'Path Length' });
assert.deepEqual(findRange(r, 'Path'), { start: 6, end: 7, label: 'Path' });
assert.deepEqual(findRange(r, 'Payload'), { start: 8, end: 8, label: 'Payload' });
});
test('hash_size=2 (plb top bits=01): 4 hops × 2 bytes', () => {
// plb = 01 0001 00 = 0x44 → hash_size=2, hash_count=4 → 8 path bytes
const hex = '15' + '44' + 'AABB' + 'CCDD' + 'EEFF' + '1122' + '9988';
const r = computeBreakdownRanges(hex, 1, 5);
assert.deepEqual(findRange(r, 'Path'), { start: 2, end: 9, label: 'Path' });
assert.deepEqual(findRange(r, 'Payload'), { start: 10, end: 11, label: 'Payload' });
});
test('hash_size=3 (plb top bits=10): 2 hops × 3 bytes', () => {
// plb = 10 0000 10 = 0x82 → hash_size=3, hash_count=2 → 6 path bytes
const hex = '15' + '82' + 'AABBCC' + 'DDEEFF' + '99';
const r = computeBreakdownRanges(hex, 1, 5);
assert.deepEqual(findRange(r, 'Path'), { start: 2, end: 7, label: 'Path' });
assert.deepEqual(findRange(r, 'Payload'), { start: 8, end: 8, label: 'Payload' });
});
test('hash_size=4 (plb top bits=11): 2 hops × 4 bytes', () => {
// plb = 11 0000 10 = 0xC2 → hash_size=4, hash_count=2 → 8 path bytes
const hex = '15' + 'C2' + 'AABBCCDD' + 'EEFF1122' + '99887766';
const r = computeBreakdownRanges(hex, 1, 5);
assert.deepEqual(findRange(r, 'Path'), { start: 2, end: 9, label: 'Path' });
assert.deepEqual(findRange(r, 'Payload'), { start: 10, end: 13, label: 'Payload' });
});
test('truncated path: not enough bytes → no Path range', () => {
// plb=04 says 4 hops but only 2 bytes remain
const hex = '1504AABB';
const r = computeBreakdownRanges(hex, 1, 5);
assert.strictEqual(findRange(r, 'Path'), undefined);
});
test('ADVERT (payload_type=4) with full record: PubKey/Timestamp/Signature/Flags', () => {
// header=11, plb=00 (direct advert)
// payload: 32 bytes pubkey + 4 bytes ts + 64 bytes sig + 1 byte flags
const pubkey = 'AB'.repeat(32);
const ts = '11223344';
const sig = 'CD'.repeat(64);
const flags = '00';
const hex = '1100' + pubkey + ts + sig + flags;
const r = computeBreakdownRanges(hex, 1, 4);
assert.deepEqual(findRange(r, 'PubKey'), { start: 2, end: 33, label: 'PubKey' });
assert.deepEqual(findRange(r, 'Timestamp'), { start: 34, end: 37, label: 'Timestamp' });
assert.deepEqual(findRange(r, 'Signature'), { start: 38, end: 101, label: 'Signature' });
assert.deepEqual(findRange(r, 'Flags'), { start: 102, end: 102, label: 'Flags' });
});
test('NaN-safe: malformed path-length byte produces no Path range', () => {
// hex with non-hex char in plb position would parseInt-fail → bail
// Use a 1-byte payload that makes pathByte parseInt produce NaN-ish via X
// (parseInt of 'XY' is NaN). Since fs reads only hex chars, simulate via short hex.
// Easier: empty string already returns []; 1-byte returns []. Both covered above.
// Use plb=FF (hash_size=4, hash_count=63) too long for input → no Path
const r = computeBreakdownRanges('15FF' + 'AA', 1, 5);
assert.strictEqual(findRange(r, 'Path'), undefined);
});
}
// ===== APP.JS: isTransportRoute + transportBadge =====
console.log('\n=== app.js: isTransportRoute + transportBadge ===');
{
@@ -3015,126 +2807,6 @@ console.log('\n=== channels.js: encrypted channel without key shows lock message
const messageApiFetched = apiCallPaths.some(p => p.indexOf('/messages') !== -1);
assert.ok(!messageApiFetched, 'should NOT fetch messages API for encrypted channel without key');
});
// #825 regression: deep link to a `#`-named channel not in the loaded list.
// The 3 acceptance cases (unencrypted / encrypted-no-key / encrypted-with-key)
// must each behave correctly without the unconditional lock affordance.
async function runHashDeepLinkScenario(opts) {
// opts: { includeEncryptedChannels: [...], storedKey: { name, hex } | null, target: '#name' }
const ctx = makeSandbox();
const dom = {};
function makeEl(id) {
if (dom[id]) return dom[id];
dom[id] = {
id, innerHTML: '', textContent: '', value: '',
scrollTop: 0, scrollHeight: 100, clientHeight: 80,
style: {}, dataset: {},
classList: { add() {}, remove() {}, toggle() {}, contains() { return false; } },
addEventListener() {}, removeEventListener() {},
querySelector() { return null; }, querySelectorAll() { return []; },
getBoundingClientRect() { return { left: 0, bottom: 0, width: 0 }; },
setAttribute() {}, removeAttribute() {}, focus() {},
};
return dom[id];
}
const headerText = { textContent: '' };
makeEl('chHeader').querySelector = (sel) => (sel === '.ch-header-text' ? headerText : null);
['chMessages', 'chList', 'chScrollBtn', 'chAriaLive', 'chBackBtn', 'chRegionFilter'].forEach(makeEl);
const appEl = {
innerHTML: '',
querySelector(sel) {
if (sel === '.ch-sidebar' || sel === '.ch-sidebar-resize' || sel === '.ch-main') return makeEl(sel);
if (sel === '.ch-layout') return { classList: { add() {}, remove() {}, contains() { return false; } } };
return makeEl(sel);
},
addEventListener() {},
};
let apiCallPaths = [];
ctx.document.getElementById = makeEl;
ctx.document.querySelector = (sel) => {
if (sel === '.ch-layout') return { classList: { add() {}, remove() {}, contains() { return false; } } };
return null;
};
ctx.document.querySelectorAll = () => [];
ctx.document.addEventListener = () => {};
ctx.document.removeEventListener = () => {};
ctx.document.documentElement = { getAttribute: () => null, setAttribute: () => {} };
ctx.document.body = { appendChild() {}, removeChild() {}, contains() { return false; } };
ctx.history = { replaceState() {} };
ctx.matchMedia = () => ({ matches: false });
ctx.window.matchMedia = ctx.matchMedia;
ctx.MutationObserver = function () { this.observe = () => {}; this.disconnect = () => {}; };
ctx.RegionFilter = { init() {}, onChange() { return () => {}; }, offChange() {}, getRegionParam() { return ''; } };
ctx.debouncedOnWS = (fn) => fn;
ctx.onWS = () => {};
ctx.offWS = () => {};
ctx.api = (path) => {
apiCallPaths.push(path);
if (path.indexOf('/observers') === 0) return Promise.resolve({ observers: [] });
if (path.indexOf('/channels') === 0 && path.indexOf('/messages') === -1) {
// Toggle-off list never includes encrypted channels for the initial load
if (path.indexOf('includeEncrypted=true') !== -1) {
return Promise.resolve({ channels: opts.includeEncryptedChannels || [] });
}
return Promise.resolve({ channels: [] });
}
if (path.indexOf('/messages') !== -1) {
return Promise.resolve({ messages: [{ sender: 'X', text: 'hello', timestamp: '2025-01-01T00:00:00Z' }] });
}
return Promise.resolve({});
};
ctx.CLIENT_TTL = { observers: 120000, channels: 15000, channelMessages: 10000, nodeDetail: 10000 };
ctx.ROLE_EMOJI = {}; ctx.ROLE_LABELS = {};
ctx.timeAgo = () => '1m ago';
ctx.registerPage = (name, handlers) => { ctx._pageHandlers = handlers; };
ctx.btoa = (s) => Buffer.from(String(s), 'utf8').toString('base64');
ctx.atob = (s) => Buffer.from(String(s), 'base64').toString('utf8');
ctx.crypto = { subtle: require('crypto').webcrypto.subtle };
ctx.TextEncoder = TextEncoder; ctx.TextDecoder = TextDecoder; ctx.Uint8Array = Uint8Array;
loadInCtx(ctx, 'public/channel-decrypt.js');
loadInCtx(ctx, 'public/channels.js');
if (opts.storedKey) {
ctx.ChannelDecrypt.saveKey(opts.storedKey.name, opts.storedKey.hex);
}
ctx._pageHandlers.init(appEl);
for (let i = 0; i < 10; i++) await Promise.resolve();
apiCallPaths = [];
await ctx.window._channelsSelectChannelForTest(opts.target);
return { msgHtml: dom['chMessages'].innerHTML, apiCallPaths };
}
test('#825: deep link to unencrypted #channel falls through to REST and renders messages', async () => {
const r = await runHashDeepLinkScenario({
target: '#test',
includeEncryptedChannels: [{ hash: '#test', name: '#test', messageCount: 3, lastActivity: null, encrypted: null }],
storedKey: null,
});
assert.ok(!r.msgHtml.includes('🔒'), 'unencrypted #channel must NOT show lock affordance');
const messageApiFetched = r.apiCallPaths.some(p => p.indexOf('/messages') !== -1);
assert.ok(messageApiFetched, 'unencrypted #channel must fetch messages REST endpoint');
});
test('#811 preserved: deep link to encrypted #channel without key shows lock', async () => {
const r = await runHashDeepLinkScenario({
target: '#private',
includeEncryptedChannels: [{ hash: '#private', name: '#private', messageCount: 5, lastActivity: null, encrypted: true }],
storedKey: null,
});
assert.ok(r.msgHtml.includes('🔒'), 'encrypted #channel without key must show lock affordance');
assert.ok(r.msgHtml.includes('no decryption key'), 'lock should mention no decryption key');
const messageApiFetched = r.apiCallPaths.some(p => p.indexOf('/messages') !== -1);
assert.ok(!messageApiFetched, 'must NOT fetch /messages REST for encrypted channel without key');
});
test('#815 preserved: deep link to #channel with stored key triggers decrypt path (no lock)', async () => {
const r = await runHashDeepLinkScenario({
target: '#private',
includeEncryptedChannels: [{ hash: '#private', name: '#private', messageCount: 5, lastActivity: null, encrypted: true }],
storedKey: { name: '#private', hex: 'abcd1234abcd1234abcd1234abcd1234' },
});
assert.ok(!r.msgHtml.includes('no decryption key'), 'must not show no-key lock when key is stored');
// Decrypt path either renders something or shows decrypt-specific empty/wrong-key state — never the no-key lock.
});
}
// ===== PACKETS.JS: savedTimeWindowMin default guard =====
console.log('\n=== packets.js: savedTimeWindowMin defaults ===');
@@ -5568,11 +5240,6 @@ console.log('\n=== packets.js: buildFieldTable transport offsets (#765) ===');
ftCtx.window.truncate = ftCtx.truncate;
ftCtx.escapeHtml = (s) => String(s || '').replace(/&/g,'&amp;').replace(/</g,'&lt;').replace(/>/g,'&gt;');
ftCtx.window.escapeHtml = ftCtx.escapeHtml;
ftCtx.window.HopDisplay = { renderHop: (hex) => hex };
ftCtx.isTransportRoute = (rt) => rt === 0 || rt === 3;
ftCtx.window.isTransportRoute = ftCtx.isTransportRoute;
ftCtx.getPathLenOffset = (rt) => ftCtx.isTransportRoute(rt) ? 5 : 1;
ftCtx.window.getPathLenOffset = ftCtx.getPathLenOffset;
loadInCtx(ftCtx, 'public/packets.js');
const { buildFieldTable, fieldRow } = ftCtx.window._packetsTestAPI;
@@ -5638,73 +5305,6 @@ console.log('\n=== packets.js: buildFieldTable transport offsets (#765) ===');
});
}
// ===== packets.js: buildFieldTable hop count from path_len (#844) =====
console.log('\n=== packets.js: buildFieldTable hop count from path_len (#844) ===');
{
const ftCtx = makeSandbox();
ftCtx.registerPage = () => {};
ftCtx.onWS = () => {};
ftCtx.offWS = () => {};
ftCtx.api = () => Promise.resolve({});
ftCtx.window.getParsedPath = () => [];
ftCtx.window.getParsedDecoded = () => ({});
const ROUTE_TYPES = {0:'TRANSPORT_FLOOD',1:'FLOOD',2:'DIRECT',3:'TRANSPORT_DIRECT'};
const PAYLOAD_TYPES = {0:'ADVERT',1:'TXT_MSG',2:'GRP_TXT',3:'REQ',4:'ACK'};
ftCtx.routeTypeName = (n) => ROUTE_TYPES[n] || 'UNKNOWN';
ftCtx.payloadTypeName = (n) => PAYLOAD_TYPES[n] || 'UNKNOWN';
ftCtx.window.routeTypeName = ftCtx.routeTypeName;
ftCtx.window.payloadTypeName = ftCtx.payloadTypeName;
ftCtx.truncate = (str, len) => str && str.length > len ? str.slice(0, len) + '…' : (str || '');
ftCtx.window.truncate = ftCtx.truncate;
ftCtx.escapeHtml = (s) => String(s || '').replace(/&/g,'&amp;').replace(/</g,'&lt;').replace(/>/g,'&gt;');
ftCtx.window.escapeHtml = ftCtx.escapeHtml;
ftCtx.window.HopDisplay = { renderHop: (hex) => hex };
ftCtx.isTransportRoute = (rt) => rt === 0 || rt === 3;
ftCtx.window.isTransportRoute = ftCtx.isTransportRoute;
ftCtx.getPathLenOffset = (rt) => ftCtx.isTransportRoute(rt) ? 5 : 1;
ftCtx.window.getPathLenOffset = ftCtx.getPathLenOffset;
loadInCtx(ftCtx, 'public/packets.js');
const { buildFieldTable } = ftCtx.window._packetsTestAPI;
test('#885: byte breakdown uses pathHops length (single source of truth)', () => {
// After #885 the byte breakdown agrees with the path pill: both render
// from the per-observation path_json. raw_hex is the underlying bytes
// for that same observation, so consistency is by construction.
// path_len = 0x42 → hash_size=2, hash_count=2
// raw_hex: header(11) + path_len(42) + hop0(41B1) + hop1(27D7) + pubkey(32 bytes)...
const pubkey = 'C0DEDAD4'.padEnd(64, '0'); // 32 bytes = 64 hex chars
const raw = '1142' + '41B1' + '27D7' + pubkey + '00000000' + '0'.repeat(128);
const pkt = { raw_hex: raw, route_type: 1, payload_type: 0 };
// Per-obs path_json IS the source of truth — pass the 2 hops that match raw_hex.
const pathHops = ['41B1', '27D7'];
const html = buildFieldTable(pkt, {}, pathHops, {});
assert.ok(html.includes('Path (2 hops)'), 'Should show "Path (2 hops)"');
assert.ok(html.includes('41B1'), 'Should show hop 0 = 41B1');
assert.ok(html.includes('27D7'), 'Should show hop 1 = 27D7');
});
test('#885: pubkey offset advances by hashSize * pathHops.length', () => {
const pubkey = 'C0DEDAD4'.padEnd(64, '0');
const raw = '1142' + '41B1' + '27D7' + pubkey + '00000000' + '0'.repeat(128);
const pkt = { raw_hex: raw, route_type: 1, payload_type: 0 };
const html = buildFieldTable(pkt, { type: 'ADVERT', pubKey: pubkey }, ['41B1', '27D7'], {});
// Public Key should be at offset 6 (1 header + 1 path_len + 2*2 hops = 6)
assert.ok(html.includes('>6<') || html.includes('"6"'),
'Public Key should be at offset 6');
});
test('#844: hashCountVal=0 (direct advert) skips Path section', () => {
// path_len = 0x00 → hash_size=1, hash_count=0
const raw = '1100' + '0'.repeat(200);
const pkt = { raw_hex: raw, route_type: 1, payload_type: 0 };
const html = buildFieldTable(pkt, {}, [], {});
assert.ok(!html.includes('section-path'), 'Should not render Path section for direct advert');
assert.ok(html.includes('direct advert'), 'Should note direct advert in path_length description');
});
}
// ===== live.js: anomaly icon in feed =====
console.log('\n=== live.js: anomaly icon in feed ===');
{
@@ -5904,15 +5504,6 @@ console.log('\n=== channel-decrypt.js: key derivation, MAC, parsing, storage ===
assert.strictEqual(ctx.window.renderSkewBadge(null, 0), '');
});
test('renderSkewBadge renders bimodal_clock badge with tooltip (#845)', () => {
var cs = { goodFraction: 0.6, recentBadSampleCount: 4, recentSampleCount: 10 };
var html = ctx.window.renderSkewBadge('bimodal_clock', -5, cs);
assert.ok(html.includes('skew-badge--bimodal_clock'), 'should contain bimodal_clock class');
assert.ok(html.includes('bimodal'), 'tooltip should mention bimodal');
assert.ok(html.includes('40%'), 'tooltip should show bad percentage');
assert.ok(html.includes('⏰'), 'should contain clock emoji');
});
test('renderSkewSparkline returns SVG with data points', () => {
var samples = [
{ ts: 1000, skew: 10 },
@@ -6105,352 +5696,6 @@ console.log('\n=== analytics.js: renderCollisionsFromServer collision table ==='
});
}
// ===== Issue #849: Per-observation packet detail tests =====
{
console.log('\n=== Issue #849: Per-observation packet detail ===');
// Test helper: extract hop count from raw_hex path_len byte
function extractRawHopCount(rawHex, routeType) {
if (!rawHex || rawHex.length < 4) return null;
let plOff = 1;
if (routeType === 0 || routeType === 3) plOff = 5;
const plByte = parseInt(rawHex.slice(plOff * 2, plOff * 2 + 2), 16);
if (isNaN(plByte)) return null;
return plByte & 0x3F;
}
test('#849: hop count from raw_hex path_len byte (2 hops)', () => {
// path_len byte = 0x82: hash_size=2+1=3, hash_count=2
const rawHex = '0482aabbccddee'; // header + path_len(0x82) + path data
assert.strictEqual(extractRawHopCount(rawHex, 1), 2);
});
test('#849: hop count from raw_hex path_len byte (0 hops = direct)', () => {
const rawHex = '0400'; // header + path_len=0x00
assert.strictEqual(extractRawHopCount(rawHex, 1), 0);
});
test('#849: hop count from raw_hex for transport route (offset 5)', () => {
// Transport routes have 4 bytes of transport codes before path_len
const rawHex = '00112233440541B127D7'; // header + 4 transport bytes + path_len(0x05)=5 hops
assert.strictEqual(extractRawHopCount(rawHex, 0), 5);
});
test('#849: hop count warns on inconsistency (path_json vs raw_hex)', () => {
// path_json has 3 hops, but raw_hex says 2
const pathJson = ['41B1', '27D7', '5EB0'];
const rawHopCount = 2;
assert.notStrictEqual(pathJson.length, rawHopCount, 'should detect inconsistency');
// In production code, rawHopCount is trusted
assert.strictEqual(rawHopCount, 2);
});
test('#849: per-observation fields override aggregated packet fields', () => {
const pkt = { id: 1, hash: 'abc', observer_id: 'obs-agg', snr: 10, rssi: -90, path_json: '["A","B","C"]', timestamp: '2026-01-01T00:00:00Z' };
const obs = { id: 2, observer_id: 'obs-1', snr: 5, rssi: -85, path_json: '["A"]', timestamp: '2026-01-01T00:01:00Z' };
// Simulate what renderDetail does: spread obs over pkt
const effective = {...pkt, ...obs, _isObservation: true};
delete effective._parsedPath; // clear cache
assert.strictEqual(effective.observer_id, 'obs-1');
assert.strictEqual(effective.snr, 5);
assert.strictEqual(effective.rssi, -85);
assert.strictEqual(effective.timestamp, '2026-01-01T00:01:00Z');
});
test('#849: first observation used when no specific observation selected', () => {
const observations = [
{ id: 10, observer_id: 'obs-A', path_json: '["X"]' },
{ id: 20, observer_id: 'obs-B', path_json: '["X","Y","Z"]' }
];
// No targetObsId → use observations[0]
const currentObs = observations[0];
assert.strictEqual(currentObs.id, 10);
assert.strictEqual(currentObs.observer_id, 'obs-A');
});
test('#849: clicking observation row selects that observation', () => {
const observations = [
{ id: 10, observer_id: 'obs-A', path_json: '["X"]' },
{ id: 20, observer_id: 'obs-B', path_json: '["X","Y","Z"]' }
];
const targetObsId = '20';
const currentObs = observations.find(o => String(o.id) === String(targetObsId));
assert.ok(currentObs);
assert.strictEqual(currentObs.observer_id, 'obs-B');
});
test('#849: null/missing raw_hex returns null hop count', () => {
assert.strictEqual(extractRawHopCount(null, 1), null);
assert.strictEqual(extractRawHopCount('', 1), null);
assert.strictEqual(extractRawHopCount('04', 1), null); // too short
});
}
// ===== Issue #852: hashSize offset + var(--muted) regression =====
{
console.log('\n=== Issue #852: hashSize path_len offset + var(--muted) regression ===');
// Use getPathLenOffset from app.js (loaded via vm context) to avoid duplicating offset logic
const ctx852 = makeSandbox();
loadInCtx(ctx852, 'public/roles.js');
loadInCtx(ctx852, 'public/app.js');
function extractHashSize(rawHex, routeType) {
const plOff = ctx852.getPathLenOffset(routeType);
const rawPathByte = rawHex ? parseInt(rawHex.slice(plOff * 2, plOff * 2 + 2), 16) : NaN;
return (isNaN(rawPathByte) || (rawPathByte & 0x3F) === 0) ? null : ((rawPathByte >> 6) + 1);
}
test('#852: hashSize for flood route (route_type=1, offset 1)', () => {
// Byte at offset 1 = 0x82 → hash_size = (0x82 >> 6) + 1 = 3
const rawHex = '0482aabbccddee';
assert.strictEqual(extractHashSize(rawHex, 1), 3);
});
test('#852: hashSize for direct transport route (route_type=0, offset 5)', () => {
// Bytes 1-4 are next_hop+last_hop, byte at offset 5 = 0x45 → hash_size = (0x45 >> 6) + 1 = 2
const rawHex = '001122334445aabb';
assert.strictEqual(extractHashSize(rawHex, 0), 2);
});
test('#852: hashSize for transport route flood (route_type=3, offset 5)', () => {
const rawHex = '00aabbccdd85aabb';
assert.strictEqual(extractHashSize(rawHex, 3), 3); // 0x85 >> 6 = 2, +1 = 3
});
test('#852: hashSize returns null for missing raw_hex', () => {
assert.strictEqual(extractHashSize(null, 1), null);
assert.strictEqual(extractHashSize('', 0), null);
});
test('#852: no var(--muted) in public/ files (regression guard)', () => {
const fs = require('fs');
const path = require('path');
const pubDir = path.join(__dirname, 'public');
const files = fs.readdirSync(pubDir).filter(f => f.endsWith('.js') || f.endsWith('.css'));
files.forEach(f => {
const content = fs.readFileSync(path.join(pubDir, f), 'utf8');
// Match var(--muted) but not var(--text-muted) or var(--bg-muted) etc.
const matches = content.match(/var\(--muted\)/g);
if (matches) throw new Error(`${f} contains undefined CSS var var(--muted); use var(--text-muted)`);
});
});
}
// ─── #862: Pubkey prefix search ──────────────────────────────────────────────
{
const ctx = makeSandbox();
ctx.ROLE_COLORS = { repeater: '#22c55e', room: '#6366f1', companion: '#3b82f6', sensor: '#f59e0b' };
ctx.ROLE_STYLE = {};
ctx.TYPE_COLORS = {};
ctx.getNodeStatus = () => 'active';
ctx.getHealthThresholds = () => ({ staleMs: 600000, degradedMs: 1800000, silentMs: 86400000 });
ctx.timeAgo = () => '1m ago';
ctx.truncate = (s) => s;
ctx.escapeHtml = (s) => String(s || '');
ctx.payloadTypeName = () => 'Advert';
ctx.payloadTypeColor = () => 'advert';
ctx.registerPage = () => {};
ctx.RegionFilter = { init: () => {}, onChange: () => () => {}, getRegionParam: () => '' };
ctx.debouncedOnWS = () => null;
ctx.onWS = () => {};
ctx.offWS = () => {};
ctx.debounce = (fn) => fn;
ctx.api = () => Promise.resolve({ nodes: [], counts: {} });
ctx.invalidateApiCache = () => {};
ctx.CLIENT_TTL = { nodeList: 90000, nodeDetail: 240000, nodeHealth: 240000 };
ctx.initTabBar = () => {};
ctx.getFavorites = () => [];
ctx.favStar = () => '';
ctx.bindFavStars = () => {};
ctx.makeColumnsResizable = () => {};
ctx.Set = Set;
ctx.HEALTH_THRESHOLDS = { infraSilentMs: 86400000, nodeSilentMs: 7200000 };
loadInCtx(ctx, 'public/nodes.js');
const matchesSearch = ctx.window._nodesMatchesSearch;
test('#862: _nodesMatchesSearch matches name substring', () => {
const node = { name: 'MyRepeater', public_key: '3faebb0011223344' };
assert.strictEqual(matchesSearch(node, 'repeat'), true);
assert.strictEqual(matchesSearch(node, 'REPEAT'), true);
});
test('#862: _nodesMatchesSearch matches pubkey prefix (hex)', () => {
const node = { name: 'MyRepeater', public_key: '3faebb0011223344' };
assert.strictEqual(matchesSearch(node, '3f'), true);
assert.strictEqual(matchesSearch(node, '3fae'), true);
assert.strictEqual(matchesSearch(node, '3FAEBB'), true);
});
test('#862: _nodesMatchesSearch does NOT match pubkey substring (only prefix)', () => {
const node = { name: 'MyRepeater', public_key: '3faebb0011223344' };
assert.strictEqual(matchesSearch(node, 'aebb'), false);
});
test('#862: _nodesMatchesSearch returns true for empty query', () => {
const node = { name: 'Test', public_key: 'abcdef1234567890' };
assert.strictEqual(matchesSearch(node, ''), true);
assert.strictEqual(matchesSearch(node, null), true);
});
test('#862: _nodesMatchesSearch mixed query (non-hex) only matches name', () => {
const node = { name: 'alpha', public_key: 'abcdef1234567890' };
assert.strictEqual(matchesSearch(node, 'xyz'), false);
assert.strictEqual(matchesSearch(node, 'alph'), true);
});
test('#862: _nodesMatchesSearch hex-named node — name "cafe" with pubkey "deadbeef..."', () => {
const node = { name: 'cafe', public_key: 'deadbeef11223344' };
// "cafe" matches by name (substring), NOT pubkey prefix
assert.strictEqual(matchesSearch(node, 'cafe'), true);
// "dead" matches by pubkey prefix
assert.strictEqual(matchesSearch(node, 'dead'), true);
// "cafe" should NOT match pubkey (not a prefix of "deadbeef")
assert.strictEqual(matchesSearch(node, 'beef'), false); // not a prefix, not in name
// "ca" matches name substring
assert.strictEqual(matchesSearch(node, 'ca'), true);
});
}
// ===== Issue #866: Full-page obs-switch — hex + path must update per observation =====
{
console.log('\n=== Issue #866: Full-page observation switch ===');
const ctx866 = makeSandbox();
loadInCtx(ctx866, 'public/roles.js');
loadInCtx(ctx866, 'public/app.js');
loadInCtx(ctx866, 'public/packet-helpers.js');
test('#866: switching observation updates effectivePkt path_json', () => {
const pkt = { id: 1, hash: 'abc123', observer_id: 'obs-agg', path_json: '["A","B","C","D"]', raw_hex: '0484A1B1C1D1', route_type: 1, timestamp: '2026-01-01T00:00:00Z' };
const obs1 = { id: 10, observer_id: 'obs-1', path_json: '["A","B"]', snr: 5, rssi: -80, timestamp: '2026-01-01T00:01:00Z' };
const obs2 = { id: 20, observer_id: 'obs-2', path_json: '["A","B","C","D"]', snr: 8, rssi: -75, timestamp: '2026-01-01T00:02:00Z' };
// Simulate renderDetail logic: pick obs1
const eff1 = ctx866.clearParsedCache({...pkt, ...obs1, _isObservation: true});
const path1 = ctx866.getParsedPath(eff1);
assert.deepStrictEqual(path1, ['A', 'B']);
assert.strictEqual(eff1.observer_id, 'obs-1');
assert.strictEqual(eff1.snr, 5);
// Switch to obs2
const eff2 = ctx866.clearParsedCache({...pkt, ...obs2, _isObservation: true});
const path2 = ctx866.getParsedPath(eff2);
assert.deepStrictEqual(path2, ['A', 'B', 'C', 'D']);
assert.strictEqual(eff2.observer_id, 'obs-2');
assert.strictEqual(eff2.snr, 8);
});
test('#866: effectivePkt preserves raw_hex from packet when obs has none', () => {
const pkt = { id: 1, hash: 'h1', raw_hex: '0482AABB', route_type: 1 };
const obs = { id: 10, observer_id: 'obs-1', path_json: '["AA"]', snr: 3, rssi: -90, timestamp: '2026-01-01T00:00:00Z' };
const eff = ctx866.clearParsedCache({...pkt, ...obs, _isObservation: true});
// obs doesn't have raw_hex, so packet's raw_hex survives spread
assert.strictEqual(eff.raw_hex, '0482AABB');
});
test('#866: effectivePkt uses obs raw_hex when available (API now returns it)', () => {
const pkt = { id: 1, hash: 'h1', raw_hex: '0482AABB', route_type: 1 };
const obs = { id: 10, observer_id: 'obs-1', raw_hex: '0441CC', path_json: '["CC"]', snr: 3, rssi: -90, timestamp: '2026-01-01T00:00:00Z' };
const eff = ctx866.clearParsedCache({...pkt, ...obs, _isObservation: true});
// obs has raw_hex from API, should override
assert.strictEqual(eff.raw_hex, '0441CC');
});
test('#866: direction field carried through observation spread', () => {
const pkt = { id: 1, hash: 'h1', direction: 'rx', route_type: 1 };
const obs = { id: 10, observer_id: 'obs-1', direction: 'tx', path_json: '[]', timestamp: '2026-01-01T00:00:00Z' };
const eff = {...pkt, ...obs, _isObservation: true};
assert.strictEqual(eff.direction, 'tx');
});
test('#866: resolved_path carried through observation spread', () => {
const pkt = { id: 1, hash: 'h1', resolved_path: '["aaa","bbb","ccc"]', route_type: 1 };
const obs = { id: 10, observer_id: 'obs-1', resolved_path: '["aaa"]', path_json: '["AA"]', timestamp: '2026-01-01T00:00:00Z' };
const eff = ctx866.clearParsedCache({...pkt, ...obs, _isObservation: true});
const rp = ctx866.getResolvedPath(eff);
assert.deepStrictEqual(rp, ['aaa']);
});
test('#866: getPathLenOffset used for hop count cross-check', () => {
// Flood route: offset 1
assert.strictEqual(ctx866.getPathLenOffset(1), 1);
assert.strictEqual(ctx866.getPathLenOffset(2), 1);
// Transport route: offset 5
assert.strictEqual(ctx866.getPathLenOffset(0), 5);
assert.strictEqual(ctx866.getPathLenOffset(3), 5);
});
test('#866: URL hash should encode obs parameter for deep linking', () => {
// Simulate the URL construction pattern from renderDetail obs click
const pktHash = 'abc123def456';
const obsId = '42';
const url = `#/packets/${pktHash}?obs=${obsId}`;
assert.strictEqual(url, '#/packets/abc123def456?obs=42');
// Parse back
const qIdx = url.indexOf('?');
const qs = new URLSearchParams(url.substring(qIdx));
assert.strictEqual(qs.get('obs'), '42');
});
}
// ===== #872 — hop-display unreliable badge =====
{
console.log('\n--- #872: hop-display unreliable warning badge ---');
function makeHopDisplaySandbox() {
const sb = {
window: { addEventListener: () => {}, dispatchEvent: () => {} },
document: {
readyState: 'complete',
createElement: () => ({ id: '', textContent: '', innerHTML: '' }),
head: { appendChild: () => {} },
getElementById: () => null,
addEventListener: () => {},
querySelectorAll: () => [],
querySelector: () => null,
},
console,
Date, Math, Array, Object, String, Number, JSON, RegExp, Map, Set,
encodeURIComponent, parseInt, parseFloat, isNaN, Infinity, NaN, undefined,
setTimeout: () => {}, setInterval: () => {}, clearTimeout: () => {}, clearInterval: () => {},
};
sb.window.document = sb.document;
sb.self = sb.window;
sb.globalThis = sb.window;
const ctx = vm.createContext(sb);
const hopSrc = fs.readFileSync(__dirname + '/public/hop-display.js', 'utf8');
vm.runInContext(hopSrc, ctx);
return ctx;
}
const hopCtx = makeHopDisplaySandbox();
test('#872: unreliable hop renders warning badge, not strikethrough', () => {
const html = hopCtx.window.HopDisplay.renderHop('AABB', {
name: 'TestNode', pubkey: 'pk123', unreliable: true,
ambiguous: false, conflicts: [], globalFallback: false,
}, {});
// Must contain unreliable warning badge button
assert.ok(html.includes('hop-unreliable-btn'), 'should have unreliable badge button');
assert.ok(html.includes('⚠️'), 'should have ⚠️ icon');
assert.ok(html.includes('Unreliable name resolution'), 'should have tooltip text');
// Must NOT contain line-through in inline style (CSS class no longer has it)
assert.ok(!html.includes('line-through'), 'should not contain line-through');
// Should still have hop-unreliable class for subtle styling
assert.ok(html.includes('hop-unreliable'), 'should have hop-unreliable class');
});
test('#872: reliable hop does NOT render unreliable badge', () => {
const html = hopCtx.window.HopDisplay.renderHop('CCDD', {
name: 'GoodNode', pubkey: 'pk456', unreliable: false,
ambiguous: false, conflicts: [], globalFallback: false,
}, {});
assert.ok(!html.includes('hop-unreliable-btn'), 'should not have unreliable badge');
});
}
// ===== SUMMARY =====
Promise.allSettled(pendingTests).then(() => {
console.log(`\n${'═'.repeat(40)}`);
+4 -57
View File
@@ -22,9 +22,9 @@ function assert(condition, msg) {
// ── Test nodes ──
// Two nodes share the same 1-byte prefix "ab"
const nodeA = { public_key: 'ab1111', name: 'NodeA', role: 'repeater', lat: 37.0, lon: -122.0 };
const nodeB = { public_key: 'ab2222', name: 'NodeB', role: 'repeater', lat: 38.0, lon: -123.0 };
const nodeC = { public_key: 'cd3333', name: 'NodeC', role: 'repeater', lat: 37.5, lon: -122.5 };
const nodeA = { public_key: 'ab1111', name: 'NodeA', lat: 37.0, lon: -122.0 };
const nodeB = { public_key: 'ab2222', name: 'NodeB', lat: 38.0, lon: -123.0 };
const nodeC = { public_key: 'cd3333', name: 'NodeC', lat: 37.5, lon: -122.5 };
console.log('\n=== HopResolver Affinity Tests ===\n');
@@ -88,65 +88,12 @@ assert(result5['ab'].name === 'NodeB', 'Should pick NodeB (highest affinity 0.9)
// Test 6: Unambiguous hops are not affected by affinity
console.log('\nTest 6: Unambiguous hops unaffected by affinity');
const nodeD = { public_key: 'ee4444', name: 'NodeD', role: 'repeater', lat: 36.0, lon: -121.0 };
const nodeD = { public_key: 'ee4444', name: 'NodeD', lat: 36.0, lon: -121.0 };
HopResolver.init([nodeA, nodeB, nodeC, nodeD]);
HopResolver.setAffinity({ edges: [] });
const result6 = HopResolver.resolve(['ee44'], null, null, null, null, null);
assert(result6['ee44'].name === 'NodeD', 'Unique prefix resolves directly — got: ' + result6['ee44'].name);
assert(!result6['ee44'].ambiguous, 'Should not be marked ambiguous');
// Test 7: lat=0 / lon=0 candidates are NOT excluded (equator/prime-meridian bug fix)
console.log('\nTest 7: lat=0 / lon=0 candidates are included in geo scoring');
const nodeEquator = { public_key: 'ab5555', name: 'EquatorNode', role: 'repeater', lat: 0, lon: 10 };
const nodeFar = { public_key: 'ab6666', name: 'FarNode', role: 'repeater', lat: 60, lon: 60 };
const anchorNearEq = { public_key: 'cd7777', name: 'AnchorEq', role: 'repeater', lat: 1, lon: 11 };
HopResolver.init([nodeEquator, nodeFar, anchorNearEq]);
HopResolver.setAffinity({});
// Anchor near equator — EquatorNode (0,10) should be geo-closest
const result7 = HopResolver.resolve(['cd77', 'ab'], 1.0, 11.0, null, null, null);
assert(result7['ab'].name === 'EquatorNode',
'lat=0 candidate should be included and win by geo — got: ' + result7['ab'].name);
// Test 8: lon=0 candidate is also included
console.log('\nTest 8: lon=0 candidate is included in geo scoring');
const nodePrime = { public_key: 'ab8888', name: 'PrimeMeridian', role: 'repeater', lat: 10, lon: 0 };
const anchorNearPM = { public_key: 'cd9999', name: 'AnchorPM', role: 'repeater', lat: 11, lon: 1 };
HopResolver.init([nodePrime, nodeFar, anchorNearPM]);
HopResolver.setAffinity({});
const result8 = HopResolver.resolve(['cd99', 'ab'], 11.0, 1.0, null, null, null);
assert(result8['ab'].name === 'PrimeMeridian',
'lon=0 candidate should be included and win by geo — got: ' + result8['ab'].name);
// ── Role filter tests (#935) ──
console.log('\nTest: Role filter — companions excluded from prefixIdx');
const companion = { public_key: 'ab9999', name: 'Companion1', role: 'companion', lat: 37.0, lon: -122.0 };
const sensor = { public_key: 'ab7777', name: 'Sensor1', role: 'sensor', lat: 37.0, lon: -122.0 };
const repeater = { public_key: 'ab1234', name: 'Repeater1', role: 'repeater', lat: 37.0, lon: -122.0 };
const roomSrv = { public_key: 'ff1234', name: 'RoomSrv1', role: 'room_server', lat: 37.0, lon: -122.0 };
HopResolver.init([companion, sensor, repeater, roomSrv]);
HopResolver.setAffinity({});
// Prefix 'ab' should only resolve to repeater (companion/sensor excluded)
const r1 = HopResolver.resolve(['ab12'], 0, 0, null, null, null);
assert(r1['ab12'] && r1['ab12'].name === 'Repeater1',
'prefix ab12 resolves to Repeater1 not companion — got: ' + (r1['ab12'] && r1['ab12'].name));
// Prefix 'ff' should resolve to room_server
const r2 = HopResolver.resolve(['ff12'], 0, 0, null, null, null);
assert(r2['ff12'] && r2['ff12'].name === 'RoomSrv1',
'prefix ff12 resolves to RoomSrv1 — got: ' + (r2['ff12'] && r2['ff12'].name));
// Prefix that only matches companion should return nothing
const r3 = HopResolver.resolve(['ab99'], 0, 0, null, null, null);
assert(!r3['ab99'] || !r3['ab99'].name,
'prefix ab99 (companion only) resolves to nothing — got: ' + (r3['ab99'] && r3['ab99'].name));
// pubkeyIdx should still have companion (full pubkey lookup)
console.log('\nTest: pubkeyIdx still includes all roles');
const fromServer = HopResolver.resolveFromServer(['ab99'], [companion.public_key]);
assert(fromServer['ab99'] && fromServer['ab99'].name === 'Companion1',
'resolveFromServer finds companion by full pubkey — got: ' + (fromServer['ab99'] && fromServer['ab99'].name));
console.log('\n' + (passed + failed) + ' tests, ' + passed + ' passed, ' + failed + ' failed\n');
process.exit(failed > 0 ? 1 : 0);