mirror of
https://github.com/Kpa-clawbot/meshcore-analyzer.git
synced 2026-05-13 03:24:50 +00:00
Compare commits
1 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| c8a97af737 |
@@ -1 +1 @@
|
||||
{"schemaVersion":1,"label":"e2e tests","message":"89 passed","color":"brightgreen"}
|
||||
{"schemaVersion":1,"label":"e2e tests","message":"45 passed","color":"brightgreen"}
|
||||
@@ -1 +1 @@
|
||||
{"schemaVersion":1,"label":"frontend coverage","message":"36.12%","color":"red"}
|
||||
{"schemaVersion":1,"label":"frontend coverage","message":"39.68%","color":"red"}
|
||||
+50
-154
@@ -3,15 +3,10 @@ name: CI/CD Pipeline
|
||||
on:
|
||||
push:
|
||||
branches: [master]
|
||||
tags: ['v*']
|
||||
pull_request:
|
||||
branches: [master]
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
concurrency:
|
||||
group: ci-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
@@ -23,8 +18,8 @@ env:
|
||||
STAGING_CONTAINER: corescope-staging-go
|
||||
|
||||
# Pipeline (sequential, fail-fast):
|
||||
# go-test → e2e-test → build-and-publish → deploy → publish-badges
|
||||
# PRs stop after build-and-publish (no GHCR push). Master continues to deploy + badges.
|
||||
# go-test → e2e-test → build → deploy → publish
|
||||
# PRs stop after build. Master continues to deploy + publish.
|
||||
|
||||
jobs:
|
||||
# ───────────────────────────────────────────────────────────────
|
||||
@@ -68,17 +63,6 @@ jobs:
|
||||
echo "--- Go Ingestor Coverage ---"
|
||||
go tool cover -func=ingestor-coverage.out | tail -1
|
||||
|
||||
- name: Build and test channel library + decrypt CLI
|
||||
run: |
|
||||
set -e -o pipefail
|
||||
cd internal/channel
|
||||
go test ./...
|
||||
echo "--- Channel library tests passed ---"
|
||||
cd ../../cmd/decrypt
|
||||
CGO_ENABLED=0 go build -ldflags="-s -w" -o corescope-decrypt .
|
||||
go test ./...
|
||||
echo "--- Decrypt CLI tests passed ---"
|
||||
|
||||
- name: Verify proto syntax
|
||||
run: |
|
||||
set -e
|
||||
@@ -135,7 +119,7 @@ jobs:
|
||||
e2e-test:
|
||||
name: "🎭 Playwright E2E Tests"
|
||||
needs: [go-test]
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: [self-hosted, Linux]
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
@@ -145,6 +129,13 @@ jobs:
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Free disk space
|
||||
run: |
|
||||
# Prune old runner diagnostic logs (can accumulate 50MB+)
|
||||
find ~/actions-runner/_diag/ -name '*.log' -mtime +3 -delete 2>/dev/null || true
|
||||
# Show available disk space
|
||||
df -h / | tail -1
|
||||
|
||||
- name: Set up Node.js 22
|
||||
uses: actions/setup-node@v5
|
||||
with:
|
||||
@@ -240,148 +231,54 @@ jobs:
|
||||
include-hidden-files: true
|
||||
|
||||
# ───────────────────────────────────────────────────────────────
|
||||
# 3. Build & Publish Docker Image
|
||||
# 3. Build Docker Image
|
||||
# ───────────────────────────────────────────────────────────────
|
||||
build-and-publish:
|
||||
name: "🏗️ Build & Publish Docker Image"
|
||||
build:
|
||||
name: "🏗️ Build Docker Image"
|
||||
needs: [e2e-test]
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Compute build metadata
|
||||
id: meta
|
||||
run: |
|
||||
BUILD_TIME=$(date -u '+%Y-%m-%dT%H:%M:%SZ')
|
||||
GIT_COMMIT="${GITHUB_SHA::7}"
|
||||
if [[ "$GITHUB_REF" == refs/tags/v* ]]; then
|
||||
APP_VERSION="${GITHUB_REF#refs/tags/}"
|
||||
else
|
||||
APP_VERSION="edge"
|
||||
fi
|
||||
echo "build_time=$BUILD_TIME" >> "$GITHUB_OUTPUT"
|
||||
echo "git_commit=$GIT_COMMIT" >> "$GITHUB_OUTPUT"
|
||||
echo "app_version=$APP_VERSION" >> "$GITHUB_OUTPUT"
|
||||
echo "Build: version=$APP_VERSION commit=$GIT_COMMIT time=$BUILD_TIME"
|
||||
|
||||
- name: Build Go Docker image (local staging)
|
||||
run: |
|
||||
GIT_COMMIT="${{ steps.meta.outputs.git_commit }}" \
|
||||
APP_VERSION="${{ steps.meta.outputs.app_version }}" \
|
||||
BUILD_TIME="${{ steps.meta.outputs.build_time }}" \
|
||||
docker compose -f "$STAGING_COMPOSE_FILE" -p corescope-staging build "$STAGING_SERVICE"
|
||||
echo "Built Go staging image ✅"
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
if: github.event_name == 'push'
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Set up QEMU (arm64 runtime stage)
|
||||
if: github.event_name == 'push'
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Log in to GHCR
|
||||
if: github.event_name == 'push'
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Extract Docker metadata
|
||||
if: github.event_name == 'push'
|
||||
id: docker-meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: ghcr.io/kpa-clawbot/corescope
|
||||
tags: |
|
||||
type=semver,pattern=v{{version}}
|
||||
type=semver,pattern=v{{major}}.{{minor}}
|
||||
type=semver,pattern=v{{major}}
|
||||
type=raw,value=latest,enable=${{ startsWith(github.ref, 'refs/tags/v') }}
|
||||
type=edge,branch=master
|
||||
|
||||
- name: Build and push to GHCR
|
||||
if: github.event_name == 'push'
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
platforms: linux/amd64,linux/arm64
|
||||
tags: ${{ steps.docker-meta.outputs.tags }}
|
||||
labels: ${{ steps.docker-meta.outputs.labels }}
|
||||
build-args: |
|
||||
APP_VERSION=${{ steps.meta.outputs.app_version }}
|
||||
GIT_COMMIT=${{ steps.meta.outputs.git_commit }}
|
||||
BUILD_TIME=${{ steps.meta.outputs.build_time }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
# ───────────────────────────────────────────────────────────────
|
||||
# 4. Release Artifacts (tags only)
|
||||
# ───────────────────────────────────────────────────────────────
|
||||
release-artifacts:
|
||||
name: "📦 Release Artifacts"
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
needs: [go-test]
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Set up Go 1.22
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version: '1.22'
|
||||
|
||||
- name: Build corescope-decrypt (static, linux/amd64)
|
||||
run: |
|
||||
cd cmd/decrypt
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags="-s -w -X main.version=${{ github.ref_name }}" -o ../../corescope-decrypt-linux-amd64 .
|
||||
|
||||
- name: Build corescope-decrypt (static, linux/arm64)
|
||||
run: |
|
||||
cd cmd/decrypt
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build -ldflags="-s -w -X main.version=${{ github.ref_name }}" -o ../../corescope-decrypt-linux-arm64 .
|
||||
|
||||
- name: Upload release assets
|
||||
uses: softprops/action-gh-release@v2
|
||||
with:
|
||||
files: |
|
||||
corescope-decrypt-linux-amd64
|
||||
corescope-decrypt-linux-arm64
|
||||
|
||||
# ───────────────────────────────────────────────────────────────
|
||||
# 4b. Deploy Staging (master only)
|
||||
# ───────────────────────────────────────────────────────────────
|
||||
deploy:
|
||||
name: "🚀 Deploy Staging"
|
||||
if: github.event_name == 'push'
|
||||
needs: [build-and-publish]
|
||||
runs-on: [self-hosted, meshcore-runner-2]
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Pull latest image from GHCR
|
||||
- name: Set up Node.js 22
|
||||
uses: actions/setup-node@v5
|
||||
with:
|
||||
node-version: '22'
|
||||
|
||||
- name: Free disk space
|
||||
run: |
|
||||
# Try to pull the edge image from GHCR and tag for docker-compose compatibility
|
||||
if docker pull ghcr.io/kpa-clawbot/corescope:edge; then
|
||||
docker tag ghcr.io/kpa-clawbot/corescope:edge corescope-go:latest
|
||||
echo "Pulled and tagged GHCR edge image ✅"
|
||||
else
|
||||
echo "⚠️ GHCR pull failed — falling back to locally built image"
|
||||
fi
|
||||
docker system prune -af 2>/dev/null || true
|
||||
docker builder prune -af 2>/dev/null || true
|
||||
df -h /
|
||||
|
||||
- name: Build Go Docker image
|
||||
run: |
|
||||
echo "${GITHUB_SHA::7}" > .git-commit
|
||||
APP_VERSION=$(node -p "require('./package.json').version") \
|
||||
GIT_COMMIT="${GITHUB_SHA::7}" \
|
||||
APP_VERSION=$(grep -oP 'APP_VERSION:-\K[^}]+' docker-compose.yml | head -1 || echo "3.0.0")
|
||||
GIT_COMMIT=$(git rev-parse --short HEAD)
|
||||
BUILD_TIME=$(date -u '+%Y-%m-%dT%H:%M:%SZ')
|
||||
export APP_VERSION GIT_COMMIT BUILD_TIME
|
||||
docker compose -f "$STAGING_COMPOSE_FILE" -p corescope-staging build "$STAGING_SERVICE"
|
||||
echo "Built Go staging image ✅"
|
||||
|
||||
# ───────────────────────────────────────────────────────────────
|
||||
# 4. Deploy Staging (master only)
|
||||
# ───────────────────────────────────────────────────────────────
|
||||
deploy:
|
||||
name: "🚀 Deploy Staging"
|
||||
if: github.event_name == 'push'
|
||||
needs: [build]
|
||||
runs-on: [self-hosted, meshcore-runner-2]
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Deploy staging
|
||||
run: |
|
||||
# Force-remove the staging container regardless of how it was created
|
||||
# (compose-managed OR manually created via docker run)
|
||||
docker stop corescope-staging-go 2>/dev/null || true
|
||||
docker rm -f corescope-staging-go 2>/dev/null || true
|
||||
# Stop old container and release memory
|
||||
docker compose -f "$STAGING_COMPOSE_FILE" -p corescope-staging down --timeout 30 2>/dev/null || true
|
||||
|
||||
# Wait for container to be fully gone and OS to reclaim memory (3GB limit)
|
||||
@@ -423,11 +320,10 @@ jobs:
|
||||
|
||||
- name: Smoke test staging API
|
||||
run: |
|
||||
PORT="${STAGING_GO_HTTP_PORT:-80}"
|
||||
if curl -sf "http://localhost:${PORT}/api/stats" | grep -q engine; then
|
||||
if curl -sf http://localhost:82/api/stats | grep -q engine; then
|
||||
echo "Staging verified — engine field present ✅"
|
||||
else
|
||||
echo "Staging /api/stats did not return engine field (port ${PORT})"
|
||||
echo "Staging /api/stats did not return engine field"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@@ -449,7 +345,7 @@ jobs:
|
||||
name: "📝 Publish Badges & Summary"
|
||||
if: github.event_name == 'push'
|
||||
needs: [deploy]
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: [self-hosted, Linux]
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v5
|
||||
|
||||
@@ -0,0 +1,67 @@
|
||||
name: Publish Docker Image
|
||||
|
||||
on:
|
||||
push:
|
||||
tags: ['v*']
|
||||
branches: [master]
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
publish:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: docker/setup-qemu-action@v3
|
||||
|
||||
- uses: docker/setup-buildx-action@v3
|
||||
|
||||
- uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Extract metadata
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: ghcr.io/kpa-clawbot/corescope
|
||||
tags: |
|
||||
# On tag push: v1.2.3, v1.2, v1, latest
|
||||
type=semver,pattern=v{{version}}
|
||||
type=semver,pattern=v{{major}}.{{minor}}
|
||||
type=semver,pattern=v{{major}}
|
||||
type=raw,value=latest,enable=${{ startsWith(github.ref, 'refs/tags/v') }}
|
||||
# On master push: edge
|
||||
type=edge,branch=master
|
||||
|
||||
- name: Set build time
|
||||
id: buildtime
|
||||
run: echo "value=$(date -u +'%Y-%m-%dT%H:%M:%SZ')" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: Compute app version
|
||||
id: appversion
|
||||
run: |
|
||||
if [[ "${{ github.ref }}" == refs/tags/v* ]]; then
|
||||
echo "value=${{ github.ref_name }}" >> "$GITHUB_OUTPUT"
|
||||
else
|
||||
echo "value=${{ github.sha }}" >> "$GITHUB_OUTPUT"
|
||||
fi
|
||||
|
||||
- uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
platforms: linux/amd64,linux/arm64
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
build-args: |
|
||||
APP_VERSION=${{ steps.appversion.outputs.value }}
|
||||
GIT_COMMIT=${{ github.sha }}
|
||||
BUILD_TIME=${{ steps.buildtime.outputs.value }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
+7
-28
@@ -1,44 +1,25 @@
|
||||
# Build stage always runs natively on the builder's arch ($BUILDPLATFORM)
|
||||
# and cross-compiles to $TARGETOS/$TARGETARCH via Go toolchain. No QEMU.
|
||||
FROM --platform=$BUILDPLATFORM golang:1.22-alpine AS builder
|
||||
FROM golang:1.22-alpine AS builder
|
||||
|
||||
RUN apk add --no-cache build-base
|
||||
|
||||
ARG APP_VERSION=unknown
|
||||
ARG GIT_COMMIT=unknown
|
||||
ARG BUILD_TIME=unknown
|
||||
# Provided by buildx for multi-arch builds
|
||||
ARG TARGETOS
|
||||
ARG TARGETARCH
|
||||
|
||||
# Build server (pure-Go sqlite — no CGO needed, cross-compiles cleanly)
|
||||
# Build server
|
||||
WORKDIR /build/server
|
||||
COPY cmd/server/go.mod cmd/server/go.sum ./
|
||||
COPY internal/geofilter/ ../../internal/geofilter/
|
||||
COPY internal/sigvalidate/ ../../internal/sigvalidate/
|
||||
COPY internal/packetpath/ ../../internal/packetpath/
|
||||
RUN go mod download
|
||||
COPY cmd/server/ ./
|
||||
RUN CGO_ENABLED=0 GOOS=${TARGETOS} GOARCH=${TARGETARCH} \
|
||||
go build -ldflags "-X main.Version=${APP_VERSION} -X main.Commit=${GIT_COMMIT} -X main.BuildTime=${BUILD_TIME}" -o /corescope-server .
|
||||
RUN go build -ldflags "-X main.Version=${APP_VERSION} -X main.Commit=${GIT_COMMIT} -X main.BuildTime=${BUILD_TIME}" -o /corescope-server .
|
||||
|
||||
# Build ingestor
|
||||
WORKDIR /build/ingestor
|
||||
COPY cmd/ingestor/go.mod cmd/ingestor/go.sum ./
|
||||
COPY internal/geofilter/ ../../internal/geofilter/
|
||||
COPY internal/sigvalidate/ ../../internal/sigvalidate/
|
||||
COPY internal/packetpath/ ../../internal/packetpath/
|
||||
RUN go mod download
|
||||
COPY cmd/ingestor/ ./
|
||||
RUN CGO_ENABLED=0 GOOS=${TARGETOS} GOARCH=${TARGETARCH} \
|
||||
go build -o /corescope-ingestor .
|
||||
|
||||
# Build decrypt CLI
|
||||
WORKDIR /build/decrypt
|
||||
COPY cmd/decrypt/go.mod cmd/decrypt/go.sum ./
|
||||
COPY internal/channel/ ../../internal/channel/
|
||||
RUN go mod download
|
||||
COPY cmd/decrypt/ ./
|
||||
RUN CGO_ENABLED=0 GOOS=${TARGETOS} GOARCH=${TARGETARCH} \
|
||||
go build -ldflags="-s -w" -o /corescope-decrypt .
|
||||
RUN go build -o /corescope-ingestor .
|
||||
|
||||
# Runtime image
|
||||
FROM alpine:3.20
|
||||
@@ -48,7 +29,7 @@ RUN apk add --no-cache mosquitto mosquitto-clients supervisor caddy wget
|
||||
WORKDIR /app
|
||||
|
||||
# Go binaries
|
||||
COPY --from=builder /corescope-server /corescope-ingestor /corescope-decrypt /app/
|
||||
COPY --from=builder /corescope-server /corescope-ingestor /app/
|
||||
|
||||
# Frontend assets + config
|
||||
COPY public/ ./public/
|
||||
@@ -61,8 +42,6 @@ RUN echo "unknown" > .git-commit
|
||||
# Supervisor + Mosquitto + Caddy config
|
||||
COPY docker/supervisord-go.conf /etc/supervisor/conf.d/supervisord.conf
|
||||
COPY docker/supervisord-go-no-mosquitto.conf /etc/supervisor/conf.d/supervisord-no-mosquitto.conf
|
||||
COPY docker/supervisord-go-no-caddy.conf /etc/supervisor/conf.d/supervisord-no-caddy.conf
|
||||
COPY docker/supervisord-go-no-mosquitto-no-caddy.conf /etc/supervisor/conf.d/supervisord-no-mosquitto-no-caddy.conf
|
||||
COPY docker/mosquitto.conf /etc/mosquitto/mosquitto.conf
|
||||
COPY docker/Caddyfile /etc/caddy/Caddyfile
|
||||
|
||||
|
||||
@@ -40,9 +40,6 @@ RUN if [ ! -f .git-commit ]; then echo "unknown" > .git-commit; fi
|
||||
|
||||
# Supervisor + Mosquitto + Caddy config
|
||||
COPY docker/supervisord-go.conf /etc/supervisor/conf.d/supervisord.conf
|
||||
COPY docker/supervisord-go-no-mosquitto.conf /etc/supervisor/conf.d/supervisord-no-mosquitto.conf
|
||||
COPY docker/supervisord-go-no-caddy.conf /etc/supervisor/conf.d/supervisord-no-caddy.conf
|
||||
COPY docker/supervisord-go-no-mosquitto-no-caddy.conf /etc/supervisor/conf.d/supervisord-no-mosquitto-no-caddy.conf
|
||||
COPY docker/mosquitto.conf /etc/mosquitto/mosquitto.conf
|
||||
COPY docker/Caddyfile /etc/caddy/Caddyfile
|
||||
|
||||
|
||||
@@ -80,26 +80,15 @@ No build step required — just run:
|
||||
|
||||
```bash
|
||||
docker run -d --name corescope \
|
||||
--restart=unless-stopped \
|
||||
-p 80:80 -p 1883:1883 \
|
||||
-v /your/data:/app/data \
|
||||
-p 80:80 \
|
||||
-v corescope-data:/app/data \
|
||||
ghcr.io/kpa-clawbot/corescope:latest
|
||||
```
|
||||
|
||||
Open `http://localhost` — done. No config file needed; CoreScope starts with sensible defaults.
|
||||
|
||||
For HTTPS with a custom domain, add `-p 443:443` and mount your Caddyfile:
|
||||
```bash
|
||||
docker run -d --name corescope \
|
||||
--restart=unless-stopped \
|
||||
-p 80:80 -p 443:443 -p 1883:1883 \
|
||||
-v /your/data:/app/data \
|
||||
-v /your/Caddyfile:/etc/caddy/Caddyfile:ro \
|
||||
-v /your/caddy-data:/data/caddy \
|
||||
ghcr.io/kpa-clawbot/corescope:latest
|
||||
```
|
||||
|
||||
Disable built-in services with `-e DISABLE_MOSQUITTO=true` or `-e DISABLE_CADDY=true`, or drop a `.env` file in your data volume. See [docs/deployment.md](docs/deployment.md) for the full reference.
|
||||
See [DEPLOY.md](DEPLOY.md) for image tags, Docker Compose, and migration from `manage.sh`.
|
||||
See [docs/deployment.md](docs/deployment.md) for the full deployment guide — MQTT setup, HTTPS options, backups, monitoring, and troubleshooting.
|
||||
|
||||
### Build from Source
|
||||
|
||||
@@ -265,8 +254,6 @@ Contributions welcome. Please read [AGENTS.md](AGENTS.md) for coding conventions
|
||||
|
||||
**Live instance:** [analyzer.00id.net](https://analyzer.00id.net) — all API endpoints are public, no auth required.
|
||||
|
||||
**API Documentation:** CoreScope auto-generates an OpenAPI 3.0 spec. Browse the interactive Swagger UI at [`/api/docs`](https://analyzer.00id.net/api/docs) or fetch the machine-readable spec at [`/api/spec`](https://analyzer.00id.net/api/spec).
|
||||
|
||||
## License
|
||||
|
||||
MIT
|
||||
|
||||
@@ -1,207 +0,0 @@
|
||||
# v3.6.0 - The Forensics
|
||||
|
||||
CoreScope just got eyes everywhere. This release drops **path inspection**, **color-by-hash markers**, **clock skew detection**, **full channel encryption**, an **observer graph**, and a pile of robustness fixes that make your mesh network feel like it's being watched by someone who actually cares.
|
||||
|
||||
134 commits, 105 PRs merged, 18K+ lines added. Here's what shipped.
|
||||
|
||||
---
|
||||
|
||||
## 🚀 New Features
|
||||
|
||||
### Path-Prefix Candidate Inspector (#944, #945)
|
||||
The marquee feature. Click any path segment and CoreScope opens an interactive inspector showing every candidate node that could match that hop prefix - plotted on a map with scoring by neighbor-graph affinity and geographic centroid. Ambiguous hops? Now you can see *why* they're ambiguous and pick the right one.
|
||||
|
||||
**Why you'll love it:** No more guessing which `0xA3` is the real repeater. The inspector lays out every candidate, scores them, and lets you drill in visually.
|
||||
|
||||
### Color-by-Hash Packet Markers (#948, #951)
|
||||
Every packet type gets a vivid, hash-derived color - on the live feed, map polylines, and flying-packet animations. Bright fill with dark outline for contrast. No more monochrome blobs - you can visually track packet flows by color at a glance.
|
||||
|
||||
### Node Filter on Live Page (#924, #771)
|
||||
Filter the live packet stream to show only traffic flowing through a specific node. Pick a repeater, see exactly what it's carrying. That simple.
|
||||
|
||||
### Clock Skew Detection (#746, #752, #828, #850)
|
||||
Full pipeline: backend computes drift using Theil-Sen regression with outlier rejection (#828), the UI shows per-node badges, detail sparklines, and fleet-wide analytics (#752). Bimodal clock severity (#850) surfaces flaky-RTC nodes that toggle between accurate and drifted - instead of hiding them as "No Clock."
|
||||
|
||||
**Why you'll love it:** Nodes with bad clocks silently corrupt your timeline. Now they glow red before they ruin your analysis.
|
||||
|
||||
### Observer Graph (M1+M2) (#774)
|
||||
Observers are now first-class graph citizens. CoreScope builds a neighbor graph from observation overlaps, scores hop-resolver candidates by graph edges (#876), and uses geographic centroid for tiebreaking. The observer topology is visible and queryable.
|
||||
|
||||
### Channel Encryption - Full Stack (#726, #733, #750, #760)
|
||||
Three milestones landed as one: DB-backed channel message history (#726), client-side PSK decryption in the browser (#733), and PSK channel management with add/remove UX and message caching (#750). Add a channel key in the UI, and CoreScope decrypts messages client-side - no server-side key storage. The add-channel button (#760) makes it dead simple.
|
||||
|
||||
**Why you'll love it:** Encrypted channels are no longer black boxes. Add your PSK, see the messages, search history - all without exposing keys to the server.
|
||||
|
||||
### Hash Collision Inspector (#758)
|
||||
The Hash Usage Matrix now shows collision details for all hash sizes. When two nodes share a prefix, you see exactly who collides and at what size.
|
||||
|
||||
### Geofilter Builder - In-App (#735, #900)
|
||||
The geofilter polygon builder is now served directly from CoreScope with a full docs page (#900). No more hunting for external tools. Link from the customizer, draw your polygon, done.
|
||||
|
||||
### Node Blacklist (#742)
|
||||
`nodeBlacklist` in config hides abusive or troll nodes from all views. They're gone.
|
||||
|
||||
### Observer Retention (#764)
|
||||
Stale observers are automatically pruned after a configurable number of days. Your observer list stays clean without manual intervention.
|
||||
|
||||
### Advert Signature Validation (#794)
|
||||
Corrupt packets with invalid advert signatures are now rejected at ingest. Bad data never hits your store.
|
||||
|
||||
### Bounded Cold Load (#790)
|
||||
`Load()` now respects a memory budget - no more OOM on cold start with a fat database. Combined with retention-hours cutoff (#917), cold start is safe on constrained hardware.
|
||||
|
||||
### Multi-Arch Docker Images (#869)
|
||||
Official images now publish `amd64` + `arm64` in a single multi-arch manifest. Raspberry Pi operators: pull and run. No special tags needed.
|
||||
|
||||
### /nodes Detail Panel + Search (#868)
|
||||
The nodes detail panel ships with search improvements (#862) - find nodes fast, see their full detail in a slide-out panel.
|
||||
|
||||
### Deduplicated Top Longest Hops (#848)
|
||||
Longest hops are now deduplicated by pair with observation count and SNR cues. No more seeing the same link 47 times.
|
||||
|
||||
---
|
||||
|
||||
## 🔥 Performance Wins
|
||||
|
||||
### StoreTx ResolvedPath Elimination (#806)
|
||||
The per-transaction `ResolvedPath` computation is gone - replaced by a membership index with on-demand decode. This was one of the hottest paths in the ingestor.
|
||||
|
||||
### Node Packet Queries (#803)
|
||||
Raw JSON text search for node packets replaced with a proper `byNode` index (#673). Night and day.
|
||||
|
||||
### Channel Query Performance (#762, #763)
|
||||
New `channel_hash` column enables SQL-level channel filtering. No more full-table scan to find messages in a channel.
|
||||
|
||||
### SQLite Auto-Vacuum (#919, #920)
|
||||
Incremental auto-vacuum enabled - the database file actually shrinks after retention pruning. No more 2GB database holding 200MB of live data.
|
||||
|
||||
### Retention-Hours Cutoff on Load (#917)
|
||||
`Load()` now applies `retentionHours` at read time, preventing OOM when the DB has more history than memory allows.
|
||||
|
||||
---
|
||||
|
||||
## 🛡️ Security & Robustness
|
||||
|
||||
### MQTT Reconnect with Bounded Backoff (#947, #949)
|
||||
The ingestor now reconnects to MQTT brokers with exponential backoff, observability logging, and bounded retry. No more silent disconnects that kill your data stream.
|
||||
|
||||
---
|
||||
|
||||
## 🐛 Bugs Squashed
|
||||
|
||||
This release exterminates **40+ bugs** — from protocol-level hash mismatches to pixel-level CSS breakage. Operators told us what hurt; we listened.
|
||||
|
||||
- **Path inspector "Show on Map" missed origin and first hop** (#950) - map view now includes all hops
|
||||
- **Content hash used full header byte** (#787) - content hashing now uses payload type bits only, fixing hash collisions between packets that differ only in header flags
|
||||
- **Encrypted channel deep links showed broken UI** (#825, #826, #815) - deep links to encrypted channels now show a lock message instead of broken UI when you don't have the key
|
||||
- **Geofilter longitude wrapping** (#925) - geofilter builder wraps longitude to [-180, 180]; southern hemisphere polygons no longer invert
|
||||
- **Hash filter bypasses saved region filter** (#939) - hash lookups now skip the geo filter as intended
|
||||
- **Companion-as-repeater excluded from path hops** (#935, #936) - non-repeater nodes no longer pollute hop resolution
|
||||
- **Customize panel re-renders while typing** (#927) - text fields keep focus during config changes
|
||||
- **Per-observation raw_hex** (#881, #882) - each observer's hex dump now shows what *that observer* actually received
|
||||
- **Per-observation children in packet groups** (#866, #880) - expanded groups show per-obs data, not cross-observer aggregates
|
||||
- **Full-page obs-switch** (#866, #870) - switching observers updates hex, path, and direction correctly
|
||||
- **Packet detail shows wrong observation** (#849, #851) - clicking a specific observation opens *that* observation
|
||||
- **Byte breakdown hop count** (#844, #846) - derived from `path_len`, not aggregated `_parsedPath`
|
||||
- **Transport-route path_len offset** (#852, #853) - correct offset calculation + CSS variable fix
|
||||
- **Packets/hour chart bars + x-axis** (#858, #865) - bars render correctly, x-axis labels properly decimated
|
||||
- **Channel timeline capped to top 8** (#860, #864) - no more 47-channel chart spaghetti
|
||||
- **Reachability row opacity removed** (#859, #863) - clean rows without misleading gradient
|
||||
- **Sticky table headers on mobile** (#861, #867) - restored after regression
|
||||
- **Map popup 'Show Neighbors' on iOS Safari** (#840, #841) - link actually works now
|
||||
- **Node detail Recent Packets invisible text** (#829, #830) - CSS fix
|
||||
- **/api/packets/{hash} falls back to DB** (#827, #831) - when in-memory store misses, DB catches it
|
||||
- **IATA filter bypass for status messages** (#694, #802) - status packets no longer filtered out by airport codes
|
||||
- **Desktop node click URL hash** (#676, #739) - clicking a node updates the URL for deep linking
|
||||
- **Filter params in URL hash** (#682, #740) - all filter state serialized for shareable links
|
||||
- **Hide undecryptable channel messages** (#727, #728) - clean default view
|
||||
- **TRACE path_json uses path_sz** (#732) - correct field from flags byte, not header hash_size
|
||||
- **Multi-byte adopters** (#754, #767) - all node types, role column, advert precedence
|
||||
- **Channel key case sensitivity** (#761) - Public decode works correctly
|
||||
- **Transport route field offsets** (#766) - correct offsets in field table
|
||||
- **Clock skew sanity checks** (#769) - filter epoch-0, cap drift, require minimum samples
|
||||
- **Neighbor graph slider persistence** (#776) - default 0.7, persisted to localStorage
|
||||
- **Node detail panel navigation** (#779, #785) - Details/Analytics links actually navigate
|
||||
- **Channel key removal** (#898) - user-added keys for server-known channels can be removed
|
||||
- **Side-panel Details on desktop** (#892) - opens full-screen correctly
|
||||
- **Hex-dump byte ranges client-side** (#891) - computed from per-obs raw_hex
|
||||
- **path_json derived from raw_hex at ingest** (#886, #887) - single source of truth
|
||||
- **Path pill and byte breakdown hop agreement** (#885) - they match now
|
||||
- **Mobile close button + toolbar scroll** (#797, #805) - accessible and scrollable
|
||||
- **/health.recentPackets resolved_path fallback** (#810, #821) - falls back to longest sibling observation
|
||||
- **Channel filter on Packets page** (#812, #816) - UI and API both fixed
|
||||
- **Clock-skew section in side panel** (#813, #814) - renders correctly
|
||||
- **Real RSS in /api/stats** (#832, #835) - surface actual RSS alongside tracked store bytes
|
||||
- **Hash size detection for transport routes + zero-hop adverts** (#747) - correct detection
|
||||
- **Repeater+observer merged map marker** (#745) - single marker, not two overlapping
|
||||
|
||||
---
|
||||
|
||||
## 🎨 UI Polish
|
||||
|
||||
- QA findings applied across the board (#832, #833, #836, #837, #838) - dozens of small UX fixes from systematic QA pass
|
||||
|
||||
---
|
||||
|
||||
## 📦 Upgrading
|
||||
|
||||
```bash
|
||||
git pull
|
||||
docker compose down
|
||||
docker compose build prod
|
||||
docker compose up -d prod
|
||||
```
|
||||
|
||||
Your existing `config.json` works as-is. New optional config keys:
|
||||
- `nodeBlacklist` - array of node hashes to hide
|
||||
- `observerRetentionDays` - days before stale observers are pruned
|
||||
- `memoryBudgetMB` - cap on in-memory packet store
|
||||
|
||||
### Verify
|
||||
|
||||
```bash
|
||||
curl -s http://localhost/api/health | jq .version
|
||||
# "3.6.0"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 🙏 External Contributors
|
||||
|
||||
- **#735** ([@efiten](https://github.com/efiten)) - Serve geofilter builder from app, link from customizer
|
||||
- **#739** ([@efiten](https://github.com/efiten)) - Desktop node click updates URL hash for deep linking
|
||||
- **#740** ([@efiten](https://github.com/efiten)) - Serialize filter params in URL hash for shareable links
|
||||
- **#742** ([@Joel-Claw](https://github.com/Joel-Claw)) - Add nodeBlacklist config to hide abusive/troll nodes
|
||||
- **#761** ([@copelaje](https://github.com/copelaje)) - Fix channel key case sensitivity for Public decode
|
||||
- **#764** ([@Joel-Claw](https://github.com/Joel-Claw)) - Add observer retention - prune stale observers after configurable days
|
||||
- **#802** ([@efiten](https://github.com/efiten)) - Bypass IATA filter for status messages, fill SNR on duplicate observations
|
||||
- **#803** ([@efiten](https://github.com/efiten)) - Replace raw JSON text search with byNode index for node packet queries
|
||||
- **#805** ([@efiten](https://github.com/efiten)) - Mobile close button accessible + toolbar scrollable
|
||||
- **#900** ([@efiten](https://github.com/efiten)) - App-served geofilter docs page
|
||||
- **#917** ([@efiten](https://github.com/efiten)) - Apply retentionHours cutoff in Load() to prevent OOM on cold start
|
||||
- **#924** ([@efiten](https://github.com/efiten)) - Node filter on live page - show only traffic through a specific node
|
||||
- **#925** ([@efiten](https://github.com/efiten)) - Fix geobuilder longitude wrapping for southern hemisphere polygons
|
||||
- **#927** ([@efiten](https://github.com/efiten)) - Skip customize panel re-render while text field has focus
|
||||
|
||||
---
|
||||
|
||||
## ⚠️ Breaking Changes
|
||||
|
||||
**None.** All API endpoints remain backwards-compatible. New fields are additive only.
|
||||
|
||||
---
|
||||
|
||||
## 📊 By the Numbers
|
||||
|
||||
| Stat | Count |
|
||||
|------|-------|
|
||||
| Commits | 134 |
|
||||
| PRs merged | 105 |
|
||||
| Lines added | 18,480 |
|
||||
| Lines removed | 1,632 |
|
||||
| Files changed | 110 |
|
||||
| Contributors | 4 |
|
||||
|
||||
---
|
||||
|
||||
*Previous release: [v3.5.2](https://github.com/Kpa-clawbot/CoreScope/releases/tag/v3.5.2)*
|
||||
@@ -1,142 +0,0 @@
|
||||
# corescope-decrypt
|
||||
|
||||
Standalone CLI tool to decrypt and export MeshCore hashtag channel messages from a CoreScope SQLite database.
|
||||
|
||||
## Why
|
||||
|
||||
MeshCore hashtag channels use symmetric encryption where the key is derived deterministically from the channel name. The CoreScope ingestor stores **all** `GRP_TXT` packets in the database, including those it cannot decrypt at ingest time.
|
||||
|
||||
This tool enables:
|
||||
|
||||
- **Retroactive decryption** — decrypt historical messages for any channel whose name you learn after the fact
|
||||
- **Forensics & analysis** — export channel traffic for offline review
|
||||
- **Bulk export** — dump an entire channel's history as JSON, HTML, or plain text
|
||||
|
||||
## Installation
|
||||
|
||||
### From Docker image
|
||||
|
||||
The binary is included in the CoreScope Docker image at `/app/corescope-decrypt`:
|
||||
|
||||
```bash
|
||||
docker exec corescope-prod /app/corescope-decrypt --channel "#wardriving" --db /app/data/meshcore.db
|
||||
```
|
||||
|
||||
### From GitHub release
|
||||
|
||||
Download the static binary from the [Releases](https://github.com/Kpa-clawbot/CoreScope/releases) page:
|
||||
|
||||
```bash
|
||||
# Linux amd64
|
||||
curl -LO https://github.com/Kpa-clawbot/CoreScope/releases/latest/download/corescope-decrypt-linux-amd64
|
||||
chmod +x corescope-decrypt-linux-amd64
|
||||
./corescope-decrypt-linux-amd64 --help
|
||||
```
|
||||
|
||||
### Build from source
|
||||
|
||||
```bash
|
||||
cd cmd/decrypt
|
||||
CGO_ENABLED=0 go build -ldflags="-s -w" -o corescope-decrypt .
|
||||
```
|
||||
|
||||
The binary is statically linked — no dependencies, runs on any Linux.
|
||||
|
||||
## Usage
|
||||
|
||||
```
|
||||
corescope-decrypt --channel NAME --db PATH [--format FORMAT] [--output FILE]
|
||||
```
|
||||
|
||||
Run `corescope-decrypt --help` for full flag documentation.
|
||||
|
||||
### JSON output (default)
|
||||
|
||||
Machine-readable, includes all metadata (observers, path hops, raw hex):
|
||||
|
||||
```bash
|
||||
corescope-decrypt --channel "#wardriving" --db meshcore.db
|
||||
```
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"hash": "a1b2c3...",
|
||||
"timestamp": "2026-04-12T17:19:09Z",
|
||||
"sender": "XMD Tag 1",
|
||||
"message": "@[MapperBot] 37.76985, -122.40525 [0.3w]",
|
||||
"channel": "#wardriving",
|
||||
"raw_hex": "150206...",
|
||||
"path": ["A3", "B0"],
|
||||
"observers": [
|
||||
{"name": "Observer1", "snr": 9.5, "rssi": -56, "timestamp": "2026-04-12T17:19:10Z"}
|
||||
]
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
### HTML output
|
||||
|
||||
Self-contained interactive viewer — search, sortable columns, expandable detail rows:
|
||||
|
||||
```bash
|
||||
corescope-decrypt --channel "#wardriving" --db meshcore.db --format html --output wardriving.html
|
||||
open wardriving.html
|
||||
```
|
||||
|
||||
No external dependencies. The JSON data is embedded directly in the HTML file.
|
||||
|
||||
### IRC / log output
|
||||
|
||||
Plain-text, one line per message — ideal for `grep`, `awk`, and piping:
|
||||
|
||||
```bash
|
||||
corescope-decrypt --channel "#wardriving" --db meshcore.db --format irc
|
||||
```
|
||||
|
||||
```
|
||||
[2026-04-12 17:19:09] <XMD Tag 1> @[MapperBot] 37.76985, -122.40525 [0.3w]
|
||||
[2026-04-12 17:20:25] <XMD Tag 1> @[MapperBot] 37.78075, -122.39774 [0.3w]
|
||||
[2026-04-12 17:25:30] <mk 🤠> @[MapperBot] 35.32444, -120.62077
|
||||
```
|
||||
|
||||
```bash
|
||||
# Find all messages from a specific sender
|
||||
corescope-decrypt --channel "#wardriving" --db meshcore.db --format irc | grep "KE6QR"
|
||||
```
|
||||
|
||||
## How channel encryption works
|
||||
|
||||
MeshCore hashtag channels derive their encryption key from the channel name:
|
||||
|
||||
1. **Key derivation**: `AES-128 key = SHA-256("#channelname")[:16]` (first 16 bytes)
|
||||
2. **Channel hash**: `SHA-256(key)[0]` — 1-byte identifier in the packet header, used for fast filtering
|
||||
3. **Encryption**: AES-128-ECB
|
||||
4. **MAC**: HMAC-SHA256 with a 32-byte secret (key + 16 zero bytes), truncated to 2 bytes
|
||||
5. **Plaintext format**: `timestamp(4 LE) + flags(1) + "sender: message\0"`
|
||||
|
||||
See the firmware source at `firmware/src/helpers/BaseChatMesh.cpp` for the canonical implementation.
|
||||
|
||||
## Testing against the fixture DB
|
||||
|
||||
```bash
|
||||
cd cmd/decrypt
|
||||
go test ./...
|
||||
|
||||
# Manual test with the real fixture:
|
||||
go run . --channel "#wardriving" --db ../../test-fixtures/e2e-fixture.db --format irc
|
||||
```
|
||||
|
||||
The shared crypto library also has independent tests:
|
||||
|
||||
```bash
|
||||
cd internal/channel
|
||||
go test -v ./...
|
||||
```
|
||||
|
||||
## Limitations
|
||||
|
||||
- **Hashtag channels only.** Only channels where the key is derived from `SHA-256("#name")` are supported. Custom PSK channels require the raw key (not implemented).
|
||||
- **No DM decryption.** Direct messages (`TXT_MSG`) use per-peer asymmetric encryption and cannot be decrypted by this tool.
|
||||
- **Read-only.** The tool opens the database in read-only mode and never modifies it.
|
||||
- **Timestamps are UTC.** The sender's embedded timestamp is used when available, displayed in UTC.
|
||||
@@ -1,22 +0,0 @@
|
||||
module github.com/corescope/decrypt
|
||||
|
||||
go 1.22
|
||||
|
||||
require (
|
||||
github.com/meshcore-analyzer/channel v0.0.0
|
||||
modernc.org/sqlite v1.34.5
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/ncruces/go-strftime v0.1.9 // indirect
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
|
||||
golang.org/x/sys v0.22.0 // indirect
|
||||
modernc.org/libc v1.55.3 // indirect
|
||||
modernc.org/mathutil v1.6.0 // indirect
|
||||
modernc.org/memory v1.8.0 // indirect
|
||||
)
|
||||
|
||||
replace github.com/meshcore-analyzer/channel => ../../internal/channel
|
||||
@@ -1,43 +0,0 @@
|
||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||
github.com/google/pprof v0.0.0-20240409012703-83162a5b38cd h1:gbpYu9NMq8jhDVbvlGkMFWCjLFlqqEZjEmObmhUy6Vo=
|
||||
github.com/google/pprof v0.0.0-20240409012703-83162a5b38cd/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdhx/f4=
|
||||
github.com/ncruces/go-strftime v0.1.9/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
|
||||
golang.org/x/mod v0.16.0 h1:QX4fJ0Rr5cPQCF7O9lh9Se4pmwfwskqZfq5moyldzic=
|
||||
golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI=
|
||||
golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/tools v0.19.0 h1:tfGCXNR1OsFG+sVdLAitlpjAvD/I6dHDKnYrpEZUHkw=
|
||||
golang.org/x/tools v0.19.0/go.mod h1:qoJWxmGSIBmAeriMx19ogtrEPrGtDbPK634QFIcLAhc=
|
||||
modernc.org/cc/v4 v4.21.4 h1:3Be/Rdo1fpr8GrQ7IVw9OHtplU4gWbb+wNgeoBMmGLQ=
|
||||
modernc.org/cc/v4 v4.21.4/go.mod h1:HM7VJTZbUCR3rV8EYBi9wxnJ0ZBRiGE5OeGXNA0IsLQ=
|
||||
modernc.org/ccgo/v4 v4.19.2 h1:lwQZgvboKD0jBwdaeVCTouxhxAyN6iawF3STraAal8Y=
|
||||
modernc.org/ccgo/v4 v4.19.2/go.mod h1:ysS3mxiMV38XGRTTcgo0DQTeTmAO4oCmJl1nX9VFI3s=
|
||||
modernc.org/fileutil v1.3.0 h1:gQ5SIzK3H9kdfai/5x41oQiKValumqNTDXMvKo62HvE=
|
||||
modernc.org/fileutil v1.3.0/go.mod h1:XatxS8fZi3pS8/hKG2GH/ArUogfxjpEKs3Ku3aK4JyQ=
|
||||
modernc.org/gc/v2 v2.4.1 h1:9cNzOqPyMJBvrUipmynX0ZohMhcxPtMccYgGOJdOiBw=
|
||||
modernc.org/gc/v2 v2.4.1/go.mod h1:wzN5dK1AzVGoH6XOzc3YZ+ey/jPgYHLuVckd62P0GYU=
|
||||
modernc.org/libc v1.55.3 h1:AzcW1mhlPNrRtjS5sS+eW2ISCgSOLLNyFzRh/V3Qj/U=
|
||||
modernc.org/libc v1.55.3/go.mod h1:qFXepLhz+JjFThQ4kzwzOjA/y/artDeg+pcYnY+Q83w=
|
||||
modernc.org/mathutil v1.6.0 h1:fRe9+AmYlaej+64JsEEhoWuAYBkOtQiMEU7n/XgfYi4=
|
||||
modernc.org/mathutil v1.6.0/go.mod h1:Ui5Q9q1TR2gFm0AQRqQUaBWFLAhQpCwNcuhBOSedWPo=
|
||||
modernc.org/memory v1.8.0 h1:IqGTL6eFMaDZZhEWwcREgeMXYwmW83LYW8cROZYkg+E=
|
||||
modernc.org/memory v1.8.0/go.mod h1:XPZ936zp5OMKGWPqbD3JShgd/ZoQ7899TUuQqxY+peU=
|
||||
modernc.org/opt v0.1.3 h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4=
|
||||
modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0=
|
||||
modernc.org/sortutil v1.2.0 h1:jQiD3PfS2REGJNzNCMMaLSp/wdMNieTbKX920Cqdgqc=
|
||||
modernc.org/sortutil v1.2.0/go.mod h1:TKU2s7kJMf1AE84OoiGppNHJwvB753OYfNl2WRb++Ss=
|
||||
modernc.org/sqlite v1.34.5 h1:Bb6SR13/fjp15jt70CL4f18JIN7p7dnMExd+UFnF15g=
|
||||
modernc.org/sqlite v1.34.5/go.mod h1:YLuNmX9NKs8wRNK2ko1LW1NGYcc9FkBO69JOt1AR9JE=
|
||||
modernc.org/strutil v1.2.0 h1:agBi9dp1I+eOnxXeiZawM8F4LawKv4NzGWSaLfyeNZA=
|
||||
modernc.org/strutil v1.2.0/go.mod h1:/mdcBmfOibveCTBxUl5B5l6W+TTH1FXPLHZE6bTosX0=
|
||||
modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
|
||||
modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
|
||||
@@ -1,467 +0,0 @@
|
||||
// corescope-decrypt decrypts and exports hashtag channel messages from a CoreScope SQLite database.
|
||||
//
|
||||
// Usage:
|
||||
//
|
||||
// corescope-decrypt --channel "#wardriving" --db meshcore.db [--format json|html] [--output file]
|
||||
package main
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"html"
|
||||
"log"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/meshcore-analyzer/channel"
|
||||
_ "modernc.org/sqlite"
|
||||
)
|
||||
|
||||
// Version info (set via ldflags).
|
||||
var version = "dev"
|
||||
|
||||
// ChannelMessage is a single decrypted channel message with metadata.
|
||||
type ChannelMessage struct {
|
||||
Hash string `json:"hash"`
|
||||
Timestamp string `json:"timestamp"`
|
||||
Sender string `json:"sender"`
|
||||
Message string `json:"message"`
|
||||
Channel string `json:"channel"`
|
||||
RawHex string `json:"raw_hex"`
|
||||
Path []string `json:"path"`
|
||||
Observers []Observer `json:"observers"`
|
||||
}
|
||||
|
||||
// Observer is a single observation of the transmission.
|
||||
type Observer struct {
|
||||
Name string `json:"name"`
|
||||
SNR float64 `json:"snr"`
|
||||
RSSI float64 `json:"rssi"`
|
||||
Timestamp string `json:"timestamp"`
|
||||
}
|
||||
|
||||
func main() {
|
||||
channelName := flag.String("channel", "", "Channel name (e.g. \"#wardriving\")")
|
||||
dbPath := flag.String("db", "", "Path to CoreScope SQLite database")
|
||||
format := flag.String("format", "json", "Output format: json, html, irc (or log)")
|
||||
output := flag.String("output", "", "Output file (default: stdout)")
|
||||
showVersion := flag.Bool("version", false, "Print version and exit")
|
||||
|
||||
flag.Usage = func() {
|
||||
fmt.Fprintf(os.Stderr, `corescope-decrypt — Decrypt and export MeshCore hashtag channel messages
|
||||
|
||||
USAGE
|
||||
corescope-decrypt --channel NAME --db PATH [--format FORMAT] [--output FILE]
|
||||
|
||||
FLAGS
|
||||
--channel NAME Channel name to decrypt (e.g. "#wardriving", "wardriving")
|
||||
The "#" prefix is added automatically if missing.
|
||||
--db PATH Path to a CoreScope SQLite database file (read-only access).
|
||||
--format FORMAT Output format (default: json):
|
||||
json — Machine-readable JSON array with full metadata
|
||||
html — Self-contained HTML viewer with search and sorting
|
||||
irc — Plain-text IRC-style log, one line per message
|
||||
log — Alias for irc
|
||||
--output FILE Write output to FILE instead of stdout.
|
||||
--version Print version and exit.
|
||||
|
||||
EXAMPLES
|
||||
# Export #wardriving messages as JSON
|
||||
corescope-decrypt --channel "#wardriving" --db /app/data/meshcore.db
|
||||
|
||||
# Generate an interactive HTML viewer
|
||||
corescope-decrypt --channel wardriving --db meshcore.db --format html --output wardriving.html
|
||||
|
||||
# Greppable IRC log
|
||||
corescope-decrypt --channel "#MeshCore" --db meshcore.db --format irc --output meshcore.log
|
||||
grep "KE6QR" meshcore.log
|
||||
|
||||
# From the Docker container
|
||||
docker exec corescope-prod /app/corescope-decrypt --channel "#wardriving" --db /app/data/meshcore.db
|
||||
|
||||
RETROACTIVE DECRYPTION
|
||||
MeshCore hashtag channels use symmetric encryption — the key is derived from the
|
||||
channel name. The CoreScope ingestor stores ALL GRP_TXT packets in the database,
|
||||
even those it cannot decrypt at ingest time. This tool lets you retroactively
|
||||
decrypt messages for any channel whose name you know, even if the ingestor was
|
||||
never configured with that channel's key.
|
||||
|
||||
This means you can recover historical messages by simply knowing the channel name.
|
||||
|
||||
LIMITATIONS
|
||||
- Only hashtag channels (shared-secret, name-derived key) are supported.
|
||||
- Direct messages (TXT_MSG) use per-peer encryption and cannot be decrypted.
|
||||
- Custom PSK channels (non-hashtag) require the raw key, not a channel name.
|
||||
`)
|
||||
}
|
||||
|
||||
flag.Parse()
|
||||
|
||||
if *showVersion {
|
||||
fmt.Println("corescope-decrypt", version)
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
if *channelName == "" || *dbPath == "" {
|
||||
flag.Usage()
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Normalize channel name
|
||||
ch := *channelName
|
||||
if !strings.HasPrefix(ch, "#") {
|
||||
ch = "#" + ch
|
||||
}
|
||||
|
||||
key := channel.DeriveKey(ch)
|
||||
chHash := channel.ChannelHash(key)
|
||||
|
||||
db, err := sql.Open("sqlite", *dbPath+"?mode=ro")
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to open database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
// Query all GRP_TXT packets
|
||||
rows, err := db.Query(`SELECT id, hash, raw_hex, first_seen FROM transmissions WHERE payload_type = 5`)
|
||||
if err != nil {
|
||||
log.Fatalf("Query failed: %v", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var messages []ChannelMessage
|
||||
decrypted, total := 0, 0
|
||||
|
||||
for rows.Next() {
|
||||
var id int
|
||||
var txHash, rawHex, firstSeen string
|
||||
if err := rows.Scan(&id, &txHash, &rawHex, &firstSeen); err != nil {
|
||||
log.Printf("Scan error: %v", err)
|
||||
continue
|
||||
}
|
||||
total++
|
||||
|
||||
payload, err := extractGRPPayload(rawHex)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if len(payload) < 3 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Check channel hash byte
|
||||
if payload[0] != chHash {
|
||||
continue
|
||||
}
|
||||
|
||||
mac := payload[1:3]
|
||||
ciphertext := payload[3:]
|
||||
if len(ciphertext) < 5 || len(ciphertext)%16 != 0 {
|
||||
// Pad ciphertext to block boundary for decryption attempt
|
||||
if len(ciphertext) < 16 {
|
||||
continue
|
||||
}
|
||||
// Truncate to block boundary
|
||||
ciphertext = ciphertext[:len(ciphertext)/16*16]
|
||||
}
|
||||
|
||||
plaintext, ok := channel.Decrypt(key, mac, ciphertext)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
ts, sender, msg, err := channel.ParsePlaintext(plaintext)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
decrypted++
|
||||
|
||||
// Convert MeshCore timestamp
|
||||
timestamp := time.Unix(int64(ts), 0).UTC().Format(time.RFC3339)
|
||||
|
||||
// Get path from decoded_json
|
||||
path := getPathFromDB(db, id)
|
||||
|
||||
// Get observers
|
||||
observers := getObservers(db, id)
|
||||
|
||||
messages = append(messages, ChannelMessage{
|
||||
Hash: txHash,
|
||||
Timestamp: timestamp,
|
||||
Sender: sender,
|
||||
Message: msg,
|
||||
Channel: ch,
|
||||
RawHex: rawHex,
|
||||
Path: path,
|
||||
Observers: observers,
|
||||
})
|
||||
}
|
||||
|
||||
// Sort by timestamp
|
||||
sort.Slice(messages, func(i, j int) bool {
|
||||
return messages[i].Timestamp < messages[j].Timestamp
|
||||
})
|
||||
|
||||
log.Printf("Scanned %d GRP_TXT packets, decrypted %d for channel %s", total, decrypted, ch)
|
||||
|
||||
// Generate output
|
||||
var out []byte
|
||||
switch *format {
|
||||
case "json":
|
||||
out, err = json.MarshalIndent(messages, "", " ")
|
||||
if err != nil {
|
||||
log.Fatalf("JSON marshal: %v", err)
|
||||
}
|
||||
out = append(out, '\n')
|
||||
case "html":
|
||||
out = renderHTML(messages, ch)
|
||||
case "irc", "log":
|
||||
out = renderIRC(messages)
|
||||
default:
|
||||
log.Fatalf("Unknown format: %s (use json, html, irc, or log)", *format)
|
||||
}
|
||||
|
||||
if *output != "" {
|
||||
if err := os.WriteFile(*output, out, 0644); err != nil {
|
||||
log.Fatalf("Write file: %v", err)
|
||||
}
|
||||
log.Printf("Written to %s", *output)
|
||||
} else {
|
||||
os.Stdout.Write(out)
|
||||
}
|
||||
}
|
||||
|
||||
// extractGRPPayload parses a raw hex packet and returns the GRP_TXT payload bytes.
|
||||
func extractGRPPayload(rawHex string) ([]byte, error) {
|
||||
buf, err := hex.DecodeString(strings.TrimSpace(rawHex))
|
||||
if err != nil || len(buf) < 2 {
|
||||
return nil, fmt.Errorf("invalid hex")
|
||||
}
|
||||
|
||||
// Header byte
|
||||
header := buf[0]
|
||||
payloadType := int((header >> 2) & 0x0F)
|
||||
if payloadType != 5 { // GRP_TXT
|
||||
return nil, fmt.Errorf("not GRP_TXT")
|
||||
}
|
||||
|
||||
routeType := int(header & 0x03)
|
||||
offset := 1
|
||||
|
||||
// Transport codes (2 codes × 2 bytes) come BEFORE path for transport routes
|
||||
if routeType == 0 || routeType == 3 {
|
||||
offset += 4
|
||||
}
|
||||
|
||||
// Path byte
|
||||
if offset >= len(buf) {
|
||||
return nil, fmt.Errorf("too short for path")
|
||||
}
|
||||
pathByte := buf[offset]
|
||||
offset++
|
||||
hashSize := int(pathByte>>6) + 1
|
||||
hashCount := int(pathByte & 0x3F)
|
||||
offset += hashSize * hashCount
|
||||
|
||||
if offset >= len(buf) {
|
||||
return nil, fmt.Errorf("too short for payload")
|
||||
}
|
||||
|
||||
return buf[offset:], nil
|
||||
}
|
||||
|
||||
func getPathFromDB(db *sql.DB, txID int) []string {
|
||||
var decodedJSON sql.NullString
|
||||
err := db.QueryRow(`SELECT decoded_json FROM transmissions WHERE id = ?`, txID).Scan(&decodedJSON)
|
||||
if err != nil || !decodedJSON.Valid {
|
||||
return nil
|
||||
}
|
||||
|
||||
var decoded struct {
|
||||
Path struct {
|
||||
Hops []string `json:"hops"`
|
||||
} `json:"path"`
|
||||
}
|
||||
if json.Unmarshal([]byte(decodedJSON.String), &decoded) == nil {
|
||||
return decoded.Path.Hops
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getObservers(db *sql.DB, txID int) []Observer {
|
||||
rows, err := db.Query(`
|
||||
SELECT o.name, obs.snr, obs.rssi, obs.timestamp
|
||||
FROM observations obs
|
||||
LEFT JOIN observers o ON o.id = CAST(obs.observer_idx AS TEXT)
|
||||
WHERE obs.transmission_id = ?
|
||||
ORDER BY obs.timestamp
|
||||
`, txID)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var observers []Observer
|
||||
for rows.Next() {
|
||||
var name sql.NullString
|
||||
var snr, rssi sql.NullFloat64
|
||||
var ts int64
|
||||
if err := rows.Scan(&name, &snr, &rssi, &ts); err != nil {
|
||||
continue
|
||||
}
|
||||
obs := Observer{
|
||||
Timestamp: time.Unix(ts, 0).UTC().Format(time.RFC3339),
|
||||
}
|
||||
if name.Valid {
|
||||
obs.Name = name.String
|
||||
}
|
||||
if snr.Valid {
|
||||
obs.SNR = snr.Float64
|
||||
}
|
||||
if rssi.Valid {
|
||||
obs.RSSI = rssi.Float64
|
||||
}
|
||||
observers = append(observers, obs)
|
||||
}
|
||||
return observers
|
||||
}
|
||||
|
||||
func renderIRC(messages []ChannelMessage) []byte {
|
||||
var b strings.Builder
|
||||
for _, m := range messages {
|
||||
sender := m.Sender
|
||||
if sender == "" {
|
||||
sender = "???"
|
||||
}
|
||||
// Parse RFC3339 timestamp into a compact format
|
||||
t, err := time.Parse(time.RFC3339, m.Timestamp)
|
||||
if err != nil {
|
||||
b.WriteString(fmt.Sprintf("[%s] <%s> %s\n", m.Timestamp, sender, m.Message))
|
||||
continue
|
||||
}
|
||||
b.WriteString(fmt.Sprintf("[%s] <%s> %s\n", t.Format("2006-01-02 15:04:05"), sender, m.Message))
|
||||
}
|
||||
return []byte(b.String())
|
||||
}
|
||||
|
||||
func renderHTML(messages []ChannelMessage, channelName string) []byte {
|
||||
jsonData, _ := json.Marshal(messages)
|
||||
|
||||
var b strings.Builder
|
||||
b.WriteString(`<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>CoreScope Channel Export — ` + html.EscapeString(channelName) + `</title>
|
||||
<style>
|
||||
*{box-sizing:border-box;margin:0;padding:0}
|
||||
body{font-family:-apple-system,BlinkMacSystemFont,"Segoe UI",Roboto,sans-serif;background:#0d1117;color:#c9d1d9;padding:20px}
|
||||
h1{color:#58a6ff;margin-bottom:16px;font-size:1.5em}
|
||||
.stats{color:#8b949e;margin-bottom:16px;font-size:0.9em}
|
||||
input[type=text]{width:100%;max-width:500px;padding:8px 12px;background:#161b22;border:1px solid #30363d;border-radius:6px;color:#c9d1d9;font-size:14px;margin-bottom:16px}
|
||||
input[type=text]:focus{outline:none;border-color:#58a6ff}
|
||||
table{width:100%;border-collapse:collapse;font-size:14px}
|
||||
th{background:#161b22;color:#8b949e;text-align:left;padding:8px 12px;border-bottom:2px solid #30363d;cursor:pointer;user-select:none;white-space:nowrap}
|
||||
th:hover{color:#58a6ff}
|
||||
th.sorted-asc::after{content:" ▲"}
|
||||
th.sorted-desc::after{content:" ▼"}
|
||||
td{padding:8px 12px;border-bottom:1px solid #21262d;vertical-align:top}
|
||||
tr:hover{background:#161b22}
|
||||
tr.expanded{background:#161b22}
|
||||
.detail-row td{padding:12px 24px;background:#0d1117;border-bottom:1px solid #21262d}
|
||||
.detail-row pre{background:#161b22;padding:12px;border-radius:6px;overflow-x:auto;font-size:12px;color:#8b949e}
|
||||
.detail-row .label{color:#58a6ff;font-weight:600;margin-top:8px;display:block}
|
||||
.observer-tag{display:inline-block;background:#1f6feb22;color:#58a6ff;padding:2px 8px;border-radius:4px;margin:2px;font-size:12px}
|
||||
.no-results{color:#8b949e;text-align:center;padding:40px;font-size:16px}
|
||||
.sender{color:#d2a8ff;font-weight:600}
|
||||
.timestamp{color:#8b949e;font-family:monospace;font-size:12px}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<h1>` + html.EscapeString(channelName) + ` — Channel Messages</h1>
|
||||
<div class="stats" id="stats"></div>
|
||||
<input type="text" id="search" placeholder="Search messages..." autocomplete="off">
|
||||
<table>
|
||||
<thead>
|
||||
<tr>
|
||||
<th data-col="timestamp">Timestamp</th>
|
||||
<th data-col="sender">Sender</th>
|
||||
<th data-col="message">Message</th>
|
||||
<th data-col="observers">Observers</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody id="tbody"></tbody>
|
||||
</table>
|
||||
<div class="no-results" id="no-results" style="display:none">No matching messages</div>
|
||||
<script>
|
||||
var DATA=` + string(jsonData) + `;
|
||||
var sortCol="timestamp",sortAsc=true,expandedHash=null;
|
||||
function init(){
|
||||
document.getElementById("stats").textContent=DATA.length+" messages";
|
||||
document.getElementById("search").addEventListener("input",render);
|
||||
document.querySelectorAll("th[data-col]").forEach(function(th){
|
||||
th.addEventListener("click",function(){
|
||||
var col=th.dataset.col;
|
||||
if(sortCol===col)sortAsc=!sortAsc;
|
||||
else{sortCol=col;sortAsc=true}
|
||||
render();
|
||||
});
|
||||
});
|
||||
render();
|
||||
}
|
||||
function render(){
|
||||
var q=document.getElementById("search").value.toLowerCase();
|
||||
var filtered=DATA.filter(function(m){
|
||||
if(!q)return true;
|
||||
return(m.message||"").toLowerCase().indexOf(q)>=0||(m.sender||"").toLowerCase().indexOf(q)>=0;
|
||||
});
|
||||
filtered.sort(function(a,b){
|
||||
var va=a[sortCol]||"",vb=b[sortCol]||"";
|
||||
if(sortCol==="observers"){va=a.observers?a.observers.length:0;vb=b.observers?b.observers.length:0}
|
||||
if(va<vb)return sortAsc?-1:1;
|
||||
if(va>vb)return sortAsc?1:-1;
|
||||
return 0;
|
||||
});
|
||||
document.querySelectorAll("th[data-col]").forEach(function(th){
|
||||
th.className=th.dataset.col===sortCol?(sortAsc?"sorted-asc":"sorted-desc"):"";
|
||||
});
|
||||
var tb=document.getElementById("tbody");
|
||||
tb.innerHTML="";
|
||||
document.getElementById("no-results").style.display=filtered.length?"none":"block";
|
||||
filtered.forEach(function(m){
|
||||
var tr=document.createElement("tr");
|
||||
tr.innerHTML='<td class="timestamp">'+esc(m.timestamp)+'</td><td class="sender">'+esc(m.sender||"—")+'</td><td>'+esc(m.message)+'</td><td>'+
|
||||
(m.observers?m.observers.map(function(o){return'<span class="observer-tag">'+esc(o.name||"?")+" SNR:"+o.snr.toFixed(1)+'</span>'}).join(""):"—")+'</td>';
|
||||
tr.style.cursor="pointer";
|
||||
tr.addEventListener("click",function(){
|
||||
expandedHash=expandedHash===m.hash?null:m.hash;
|
||||
render();
|
||||
});
|
||||
tb.appendChild(tr);
|
||||
if(expandedHash===m.hash){
|
||||
tr.className="expanded";
|
||||
var dr=document.createElement("tr");
|
||||
dr.className="detail-row";
|
||||
dr.innerHTML='<td colspan="4"><span class="label">Hash</span><pre>'+esc(m.hash)+'</pre>'+
|
||||
'<span class="label">Raw Hex</span><pre>'+esc(m.raw_hex)+'</pre>'+
|
||||
(m.path&&m.path.length?'<span class="label">Path</span><pre>'+esc(m.path.join(" → "))+'</pre>':'')+
|
||||
'<span class="label">Observers</span><pre>'+esc(JSON.stringify(m.observers,null,2))+'</pre></td>';
|
||||
tb.appendChild(dr);
|
||||
}
|
||||
});
|
||||
}
|
||||
function esc(s){var d=document.createElement("div");d.textContent=s;return d.innerHTML}
|
||||
init();
|
||||
</script>
|
||||
</body>
|
||||
</html>`)
|
||||
|
||||
return []byte(b.String())
|
||||
}
|
||||
@@ -1,129 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/meshcore-analyzer/channel"
|
||||
)
|
||||
|
||||
func TestExtractGRPPayload(t *testing.T) {
|
||||
// Build a minimal GRP_TXT packet: header(1) + path(1) + payload
|
||||
// header: route=FLOOD(1), payload=GRP_TXT(5), version=0 → (5<<2)|1 = 0x15
|
||||
// path: 0 hops, hash_size=1 → 0x00
|
||||
payload := []byte{0x81, 0x12, 0x34} // channel_hash + mac + data
|
||||
pkt := append([]byte{0x15, 0x00}, payload...)
|
||||
rawHex := hex.EncodeToString(pkt)
|
||||
|
||||
result, err := extractGRPPayload(rawHex)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(result) != 3 || result[0] != 0x81 {
|
||||
t.Fatalf("payload mismatch: %x", result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtractGRPPayloadTransport(t *testing.T) {
|
||||
// Transport flood: route=0, 4 bytes transport codes BEFORE path byte
|
||||
// header: (5<<2)|0 = 0x14
|
||||
payload := []byte{0xAA, 0xBB, 0xCC}
|
||||
// header + 4 transport bytes + path(0 hops) + payload
|
||||
pkt := append([]byte{0x14, 0xFF, 0xFF, 0xFF, 0xFF, 0x00}, payload...)
|
||||
rawHex := hex.EncodeToString(pkt)
|
||||
|
||||
result, err := extractGRPPayload(rawHex)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if result[0] != 0xAA {
|
||||
t.Fatalf("expected AA, got %02X", result[0])
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtractGRPPayloadNotGRP(t *testing.T) {
|
||||
// payload type = ADVERT (4): (4<<2)|1 = 0x11
|
||||
rawHex := hex.EncodeToString([]byte{0x11, 0x00, 0x01, 0x02})
|
||||
_, err := extractGRPPayload(rawHex)
|
||||
if err == nil {
|
||||
t.Fatal("expected error for non-GRP_TXT")
|
||||
}
|
||||
}
|
||||
|
||||
func TestKeyDerivationConsistency(t *testing.T) {
|
||||
// Verify key derivation matches what the ingestor expects
|
||||
key := channel.DeriveKey("#wardriving")
|
||||
if len(key) != 16 {
|
||||
t.Fatalf("key len %d", len(key))
|
||||
}
|
||||
ch := channel.ChannelHash(key)
|
||||
if ch != 0x81 {
|
||||
// We know from fixture data that #wardriving has channelHashHex "81"
|
||||
t.Fatalf("channel hash %02X, expected 81", ch)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRenderIRC(t *testing.T) {
|
||||
msgs := []ChannelMessage{
|
||||
{Timestamp: "2026-04-12T03:45:12Z", Sender: "NodeA", Message: "Hello"},
|
||||
{Timestamp: "2026-04-12T03:46:01Z", Sender: "", Message: "No sender"},
|
||||
}
|
||||
out := string(renderIRC(msgs))
|
||||
if !strings.Contains(out, "[2026-04-12 03:45:12] <NodeA> Hello") {
|
||||
t.Fatalf("IRC output missing expected line: %s", out)
|
||||
}
|
||||
if !strings.Contains(out, "<???> No sender") {
|
||||
t.Fatalf("IRC output should use ??? for empty sender: %s", out)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRenderHTMLValid(t *testing.T) {
|
||||
msgs := []ChannelMessage{
|
||||
{Hash: "abc", Timestamp: "2026-04-12T00:00:00Z", Sender: "X", Message: "test", Channel: "#test"},
|
||||
}
|
||||
out := string(renderHTML(msgs, "#test"))
|
||||
if !strings.Contains(out, "<!DOCTYPE html>") {
|
||||
t.Fatal("not valid HTML")
|
||||
}
|
||||
if !strings.Contains(out, "#test") {
|
||||
t.Fatal("channel name missing")
|
||||
}
|
||||
if !strings.Contains(out, "</html>") {
|
||||
t.Fatal("HTML not closed")
|
||||
}
|
||||
}
|
||||
|
||||
func TestJSONOutputParseable(t *testing.T) {
|
||||
msgs := []ChannelMessage{
|
||||
{Hash: "abc", Timestamp: "2026-04-12T00:00:00Z", Sender: "X", Message: "hi", Channel: "#test"},
|
||||
}
|
||||
data, err := json.MarshalIndent(msgs, "", " ")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
var parsed []ChannelMessage
|
||||
if err := json.Unmarshal(data, &parsed); err != nil {
|
||||
t.Fatalf("JSON not parseable: %v", err)
|
||||
}
|
||||
if len(parsed) != 1 || parsed[0].Sender != "X" {
|
||||
t.Fatalf("parsed mismatch: %+v", parsed)
|
||||
}
|
||||
}
|
||||
|
||||
// Integration test against fixture DB (skipped if DB not found)
|
||||
func TestFixtureDecrypt(t *testing.T) {
|
||||
dbPath := "../../test-fixtures/e2e-fixture.db"
|
||||
if _, err := os.Stat(dbPath); os.IsNotExist(err) {
|
||||
t.Skip("fixture DB not found")
|
||||
}
|
||||
|
||||
// We know the fixture has #wardriving messages with channelHash 0x81
|
||||
key := channel.DeriveKey("#wardriving")
|
||||
ch := channel.ChannelHash(key)
|
||||
if ch != 0x81 {
|
||||
t.Fatalf("unexpected channel hash: %02X", ch)
|
||||
}
|
||||
}
|
||||
+3
-37
@@ -39,9 +39,7 @@ type Config struct {
|
||||
HashChannels []string `json:"hashChannels,omitempty"`
|
||||
Retention *RetentionConfig `json:"retention,omitempty"`
|
||||
Metrics *MetricsConfig `json:"metrics,omitempty"`
|
||||
GeoFilter *GeoFilterConfig `json:"geo_filter,omitempty"`
|
||||
ValidateSignatures *bool `json:"validateSignatures,omitempty"`
|
||||
DB *DBConfig `json:"db,omitempty"`
|
||||
GeoFilter *GeoFilterConfig `json:"geo_filter,omitempty"`
|
||||
}
|
||||
|
||||
// GeoFilterConfig is an alias for the shared geofilter.Config type.
|
||||
@@ -49,9 +47,8 @@ type GeoFilterConfig = geofilter.Config
|
||||
|
||||
// RetentionConfig controls how long stale nodes are kept before being moved to inactive_nodes.
|
||||
type RetentionConfig struct {
|
||||
NodeDays int `json:"nodeDays"`
|
||||
ObserverDays int `json:"observerDays"`
|
||||
MetricsDays int `json:"metricsDays"`
|
||||
NodeDays int `json:"nodeDays"`
|
||||
MetricsDays int `json:"metricsDays"`
|
||||
}
|
||||
|
||||
// MetricsConfig controls observer metrics collection.
|
||||
@@ -59,28 +56,6 @@ type MetricsConfig struct {
|
||||
SampleIntervalSec int `json:"sampleIntervalSec"`
|
||||
}
|
||||
|
||||
// DBConfig controls SQLite vacuum and maintenance behavior (#919).
|
||||
type DBConfig struct {
|
||||
VacuumOnStartup bool `json:"vacuumOnStartup"` // one-time full VACUUM on startup if auto_vacuum is not INCREMENTAL
|
||||
IncrementalVacuumPages int `json:"incrementalVacuumPages"` // pages returned to OS per reaper cycle (default 1024)
|
||||
}
|
||||
|
||||
// IncrementalVacuumPages returns the configured pages per vacuum or 1024 default.
|
||||
func (c *Config) IncrementalVacuumPages() int {
|
||||
if c.DB != nil && c.DB.IncrementalVacuumPages > 0 {
|
||||
return c.DB.IncrementalVacuumPages
|
||||
}
|
||||
return 1024
|
||||
}
|
||||
|
||||
// ShouldValidateSignatures returns true (default) unless explicitly disabled.
|
||||
func (c *Config) ShouldValidateSignatures() bool {
|
||||
if c.ValidateSignatures != nil {
|
||||
return *c.ValidateSignatures
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// MetricsSampleInterval returns the configured sample interval or 300s default.
|
||||
func (c *Config) MetricsSampleInterval() int {
|
||||
if c.Metrics != nil && c.Metrics.SampleIntervalSec > 0 {
|
||||
@@ -105,15 +80,6 @@ func (c *Config) NodeDaysOrDefault() int {
|
||||
return 7
|
||||
}
|
||||
|
||||
// ObserverDaysOrDefault returns the configured retention.observerDays or 14 if not set.
|
||||
// A value of -1 means observers are never removed.
|
||||
func (c *Config) ObserverDaysOrDefault() int {
|
||||
if c.Retention != nil && c.Retention.ObserverDays != 0 {
|
||||
return c.Retention.ObserverDays
|
||||
}
|
||||
return 14
|
||||
}
|
||||
|
||||
// LoadConfig reads configuration from a JSON file, with env var overrides.
|
||||
// If the config file does not exist, sensible defaults are used (zero-config startup).
|
||||
func LoadConfig(path string) (*Config, error) {
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// hmacSHA256 computes HMAC-SHA256 for test use.
|
||||
@@ -158,7 +157,7 @@ func TestHandleMessageChannelMessage(t *testing.T) {
|
||||
payload := []byte(`{"text":"Alice: Hello everyone","channel_idx":3,"SNR":5.0,"RSSI":-95,"score":10,"direction":"rx","sender_timestamp":1700000000}`)
|
||||
msg := &mockMessage{topic: "meshcore/message/channel/2", payload: payload}
|
||||
|
||||
handleMessage(store, "test", source, msg, nil, &Config{})
|
||||
handleMessage(store, "test", source, msg, nil, nil)
|
||||
|
||||
var count int
|
||||
if err := store.db.QueryRow("SELECT COUNT(*) FROM transmissions").Scan(&count); err != nil {
|
||||
@@ -204,13 +203,21 @@ func TestHandleMessageChannelMessage(t *testing.T) {
|
||||
t.Errorf("direction=%v, want rx", direction)
|
||||
}
|
||||
|
||||
// Sender node should NOT be created (see issue #665: synthetic "sender-" keys
|
||||
// are unreachable from the claiming/health flow)
|
||||
// Should create sender node
|
||||
if err := store.db.QueryRow("SELECT COUNT(*) FROM nodes").Scan(&count); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if count != 0 {
|
||||
t.Errorf("nodes count=%d, want 0 (no phantom sender node)", count)
|
||||
if count != 1 {
|
||||
t.Errorf("nodes count=%d, want 1 (sender node)", count)
|
||||
}
|
||||
|
||||
// Verify sender node name
|
||||
var nodeName string
|
||||
if err := store.db.QueryRow("SELECT name FROM nodes LIMIT 1").Scan(&nodeName); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if nodeName != "Alice" {
|
||||
t.Errorf("node name=%s, want Alice", nodeName)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -218,7 +225,7 @@ func TestHandleMessageChannelMessageEmptyText(t *testing.T) {
|
||||
store, source := newTestContext(t)
|
||||
|
||||
msg := &mockMessage{topic: "meshcore/message/channel/1", payload: []byte(`{"text":""}`)}
|
||||
handleMessage(store, "test", source, msg, nil, &Config{})
|
||||
handleMessage(store, "test", source, msg, nil, nil)
|
||||
|
||||
var count int
|
||||
if err := store.db.QueryRow("SELECT COUNT(*) FROM transmissions").Scan(&count); err != nil {
|
||||
@@ -233,7 +240,7 @@ func TestHandleMessageChannelNoSender(t *testing.T) {
|
||||
store, source := newTestContext(t)
|
||||
|
||||
msg := &mockMessage{topic: "meshcore/message/channel/1", payload: []byte(`{"text":"no sender here"}`)}
|
||||
handleMessage(store, "test", source, msg, nil, &Config{})
|
||||
handleMessage(store, "test", source, msg, nil, nil)
|
||||
|
||||
var count int
|
||||
if err := store.db.QueryRow("SELECT COUNT(*) FROM nodes").Scan(&count); err != nil {
|
||||
@@ -250,7 +257,7 @@ func TestHandleMessageDirectMessage(t *testing.T) {
|
||||
payload := []byte(`{"text":"Bob: Hey there","sender_timestamp":1700000000,"SNR":3.0,"rssi":-100,"Score":8,"Direction":"tx"}`)
|
||||
msg := &mockMessage{topic: "meshcore/message/direct/abc123", payload: payload}
|
||||
|
||||
handleMessage(store, "test", source, msg, nil, &Config{})
|
||||
handleMessage(store, "test", source, msg, nil, nil)
|
||||
|
||||
var count int
|
||||
if err := store.db.QueryRow("SELECT COUNT(*) FROM transmissions").Scan(&count); err != nil {
|
||||
@@ -294,7 +301,7 @@ func TestHandleMessageDirectMessageEmptyText(t *testing.T) {
|
||||
store, source := newTestContext(t)
|
||||
|
||||
msg := &mockMessage{topic: "meshcore/message/direct/abc", payload: []byte(`{"text":""}`)}
|
||||
handleMessage(store, "test", source, msg, nil, &Config{})
|
||||
handleMessage(store, "test", source, msg, nil, nil)
|
||||
|
||||
var count int
|
||||
if err := store.db.QueryRow("SELECT COUNT(*) FROM transmissions").Scan(&count); err != nil {
|
||||
@@ -309,7 +316,7 @@ func TestHandleMessageDirectNoSender(t *testing.T) {
|
||||
store, source := newTestContext(t)
|
||||
|
||||
msg := &mockMessage{topic: "meshcore/message/direct/xyz", payload: []byte(`{"text":"message with no colon"}`)}
|
||||
handleMessage(store, "test", source, msg, nil, &Config{})
|
||||
handleMessage(store, "test", source, msg, nil, nil)
|
||||
|
||||
var count int
|
||||
if err := store.db.QueryRow("SELECT COUNT(*) FROM transmissions").Scan(&count); err != nil {
|
||||
@@ -328,7 +335,7 @@ func TestHandleMessageUppercaseScoreDirection(t *testing.T) {
|
||||
payload := []byte(`{"raw":"` + rawHex + `","Score":9.0,"Direction":"tx"}`)
|
||||
msg := &mockMessage{topic: "meshcore/SJC/obs1/packets", payload: payload}
|
||||
|
||||
handleMessage(store, "test", source, msg, nil, &Config{})
|
||||
handleMessage(store, "test", source, msg, nil, nil)
|
||||
|
||||
var score *float64
|
||||
var direction *string
|
||||
@@ -349,7 +356,7 @@ func TestHandleMessageChannelLowercaseFields(t *testing.T) {
|
||||
|
||||
payload := []byte(`{"text":"Test: msg","snr":3.0,"rssi":-90,"Score":5,"Direction":"rx"}`)
|
||||
msg := &mockMessage{topic: "meshcore/message/channel/0", payload: payload}
|
||||
handleMessage(store, "test", source, msg, nil, &Config{})
|
||||
handleMessage(store, "test", source, msg, nil, nil)
|
||||
|
||||
var count int
|
||||
if err := store.db.QueryRow("SELECT COUNT(*) FROM transmissions").Scan(&count); err != nil {
|
||||
@@ -365,7 +372,7 @@ func TestHandleMessageDirectLowercaseFields(t *testing.T) {
|
||||
|
||||
payload := []byte(`{"text":"Test: msg","snr":2.0,"rssi":-85,"score":7,"direction":"tx"}`)
|
||||
msg := &mockMessage{topic: "meshcore/message/direct/xyz", payload: payload}
|
||||
handleMessage(store, "test", source, msg, nil, &Config{})
|
||||
handleMessage(store, "test", source, msg, nil, nil)
|
||||
|
||||
var count int
|
||||
if err := store.db.QueryRow("SELECT COUNT(*) FROM transmissions").Scan(&count); err != nil {
|
||||
@@ -388,7 +395,7 @@ func TestHandleMessageAdvertWithTelemetry(t *testing.T) {
|
||||
payload: []byte(`{"raw":"` + rawHex + `"}`),
|
||||
}
|
||||
|
||||
handleMessage(store, "test", source, msg, nil, &Config{})
|
||||
handleMessage(store, "test", source, msg, nil, nil)
|
||||
|
||||
// Should have created transmission, node, and observer
|
||||
var txCount, nodeCount, obsCount int
|
||||
@@ -428,7 +435,7 @@ func TestHandleMessageAdvertGeoFiltered(t *testing.T) {
|
||||
topic: "meshcore/SJC/obs1/packets",
|
||||
payload: []byte(`{"raw":"` + rawHex + `"}`),
|
||||
}
|
||||
handleMessage(store, "test", source, msg, nil, &Config{GeoFilter: gf})
|
||||
handleMessage(store, "test", source, msg, nil, gf)
|
||||
|
||||
// Geo-filtered adverts should not create nodes
|
||||
var nodeCount int
|
||||
@@ -454,7 +461,7 @@ func TestDecodeAdvertLocationTruncated(t *testing.T) {
|
||||
buf[100] = 0x11
|
||||
// Only 4 bytes after flags — not enough for full location (needs 8)
|
||||
|
||||
p := decodeAdvert(buf[:105], false)
|
||||
p := decodeAdvert(buf[:105])
|
||||
if p.Error != "" {
|
||||
t.Fatalf("error: %s", p.Error)
|
||||
}
|
||||
@@ -476,7 +483,7 @@ func TestDecodeAdvertFeat1Truncated(t *testing.T) {
|
||||
buf[100] = 0x21
|
||||
// Only 1 byte after flags — not enough for feat1 (needs 2)
|
||||
|
||||
p := decodeAdvert(buf[:102], false)
|
||||
p := decodeAdvert(buf[:102])
|
||||
if p.Feat1 != nil {
|
||||
t.Error("feat1 should be nil with truncated data")
|
||||
}
|
||||
@@ -497,7 +504,7 @@ func TestDecodeAdvertFeat2Truncated(t *testing.T) {
|
||||
buf[102] = 0x00
|
||||
// Only 1 byte left — not enough for feat2
|
||||
|
||||
p := decodeAdvert(buf[:104], false)
|
||||
p := decodeAdvert(buf[:104])
|
||||
if p.Feat1 == nil {
|
||||
t.Error("feat1 should be set")
|
||||
}
|
||||
@@ -537,7 +544,7 @@ func TestDecodeAdvertSensorBadTelemetry(t *testing.T) {
|
||||
buf[105] = 0x20
|
||||
buf[106] = 0x4E
|
||||
|
||||
p := decodeAdvert(buf[:107], false)
|
||||
p := decodeAdvert(buf[:107])
|
||||
if p.BatteryMv != nil {
|
||||
t.Error("battery_mv=0 should be nil")
|
||||
}
|
||||
@@ -665,7 +672,7 @@ func TestHandleMessageCorruptedAdvertNoNode(t *testing.T) {
|
||||
topic: "meshcore/SJC/obs1/packets",
|
||||
payload: []byte(`{"raw":"` + rawHex + `"}`),
|
||||
}
|
||||
handleMessage(store, "test", source, msg, nil, &Config{})
|
||||
handleMessage(store, "test", source, msg, nil, nil)
|
||||
|
||||
var count int
|
||||
if err := store.db.QueryRow("SELECT COUNT(*) FROM nodes").Scan(&count); err != nil {
|
||||
@@ -687,7 +694,7 @@ func TestHandleMessageNonAdvertPacket(t *testing.T) {
|
||||
topic: "meshcore/SJC/obs1/packets",
|
||||
payload: []byte(`{"raw":"` + rawHex + `"}`),
|
||||
}
|
||||
handleMessage(store, "test", source, msg, nil, &Config{})
|
||||
handleMessage(store, "test", source, msg, nil, nil)
|
||||
|
||||
var count int
|
||||
if err := store.db.QueryRow("SELECT COUNT(*) FROM transmissions").Scan(&count); err != nil {
|
||||
@@ -733,7 +740,7 @@ func TestDecodeAdvertSensorNoName(t *testing.T) {
|
||||
buf[103] = 0xC4
|
||||
buf[104] = 0x09
|
||||
|
||||
p := decodeAdvert(buf[:105], false)
|
||||
p := decodeAdvert(buf[:105])
|
||||
if p.Error != "" {
|
||||
t.Fatalf("error: %s", p.Error)
|
||||
}
|
||||
@@ -828,7 +835,7 @@ func TestDecodePacketNoPathByteAfterHeader(t *testing.T) {
|
||||
// Non-transport route, but only header byte (no path byte)
|
||||
// Actually 0A alone = 1 byte, but we need >= 2
|
||||
// Header + exactly at offset boundary
|
||||
_, err := DecodePacket("0A", nil, false)
|
||||
_, err := DecodePacket("0A", nil)
|
||||
if err == nil {
|
||||
t.Error("should error - too short")
|
||||
}
|
||||
@@ -849,7 +856,7 @@ func TestDecodeAdvertNameNoNull(t *testing.T) {
|
||||
// Name without null terminator — goes to end of buffer
|
||||
copy(buf[101:], []byte("LongNameNoNull"))
|
||||
|
||||
p := decodeAdvert(buf[:115], false)
|
||||
p := decodeAdvert(buf[:115])
|
||||
if p.Name != "LongNameNoNull" {
|
||||
t.Errorf("name=%q, want LongNameNoNull", p.Name)
|
||||
}
|
||||
@@ -864,7 +871,7 @@ func TestHandleMessageChannelLongSender(t *testing.T) {
|
||||
longText := "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA: msg"
|
||||
payload := []byte(`{"text":"` + longText + `"}`)
|
||||
msg := &mockMessage{topic: "meshcore/message/channel/1", payload: payload}
|
||||
handleMessage(store, "test", source, msg, nil, &Config{})
|
||||
handleMessage(store, "test", source, msg, nil, nil)
|
||||
|
||||
var count int
|
||||
if err := store.db.QueryRow("SELECT COUNT(*) FROM nodes").Scan(&count); err != nil {
|
||||
@@ -883,7 +890,7 @@ func TestHandleMessageDirectLongSender(t *testing.T) {
|
||||
longText := "BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB: msg"
|
||||
payload := []byte(`{"text":"` + longText + `"}`)
|
||||
msg := &mockMessage{topic: "meshcore/message/direct/abc", payload: payload}
|
||||
handleMessage(store, "test", source, msg, nil, &Config{})
|
||||
handleMessage(store, "test", source, msg, nil, nil)
|
||||
|
||||
var count int
|
||||
if err := store.db.QueryRow("SELECT COUNT(*) FROM transmissions").Scan(&count); err != nil {
|
||||
@@ -900,7 +907,7 @@ func TestHandleMessageDirectUppercaseScoreDirection(t *testing.T) {
|
||||
|
||||
payload := []byte(`{"text":"X: hi","Score":6,"Direction":"rx"}`)
|
||||
msg := &mockMessage{topic: "meshcore/message/direct/d1", payload: payload}
|
||||
handleMessage(store, "test", source, msg, nil, &Config{})
|
||||
handleMessage(store, "test", source, msg, nil, nil)
|
||||
|
||||
var count int
|
||||
if err := store.db.QueryRow("SELECT COUNT(*) FROM transmissions").Scan(&count); err != nil {
|
||||
@@ -930,7 +937,7 @@ func TestHandleMessageChannelUppercaseScoreDirection(t *testing.T) {
|
||||
|
||||
payload := []byte(`{"text":"Y: hi","Score":4,"Direction":"tx"}`)
|
||||
msg := &mockMessage{topic: "meshcore/message/channel/5", payload: payload}
|
||||
handleMessage(store, "test", source, msg, nil, &Config{})
|
||||
handleMessage(store, "test", source, msg, nil, nil)
|
||||
|
||||
var count int
|
||||
if err := store.db.QueryRow("SELECT COUNT(*) FROM transmissions").Scan(&count); err != nil {
|
||||
@@ -961,7 +968,7 @@ func TestHandleMessageRawLowercaseScore(t *testing.T) {
|
||||
rawHex := "0A00D69FD7A5A7475DB07337749AE61FA53A4788E976"
|
||||
payload := []byte(`{"raw":"` + rawHex + `","score":3.5}`)
|
||||
msg := &mockMessage{topic: "meshcore/SJC/obs1/packets", payload: payload}
|
||||
handleMessage(store, "test", source, msg, nil, &Config{})
|
||||
handleMessage(store, "test", source, msg, nil, nil)
|
||||
|
||||
var score *float64
|
||||
if err := store.db.QueryRow("SELECT score FROM observations LIMIT 1").Scan(&score); err != nil {
|
||||
@@ -980,7 +987,7 @@ func TestHandleMessageStatusNoOrigin(t *testing.T) {
|
||||
topic: "meshcore/LAX/obs5/status",
|
||||
payload: []byte(`{"model":"L1"}`),
|
||||
}
|
||||
handleMessage(store, "test", source, msg, nil, &Config{})
|
||||
handleMessage(store, "test", source, msg, nil, nil)
|
||||
|
||||
var count int
|
||||
if err := store.db.QueryRow("SELECT COUNT(*) FROM observers WHERE id = 'obs5'").Scan(&count); err != nil {
|
||||
@@ -1139,182 +1146,3 @@ func TestDecodeTraceWithPath(t *testing.T) {
|
||||
t.Errorf("flags=%v, want 3", p.TraceFlags)
|
||||
}
|
||||
}
|
||||
|
||||
// --- db.go: RemoveStaleObservers (soft-delete) ---
|
||||
|
||||
func TestRemoveStaleObservers(t *testing.T) {
|
||||
store := newTestStore(t)
|
||||
|
||||
// Insert an observer with last_seen 30 days ago
|
||||
err := store.UpsertObserver("obs-old", "OldObserver", "LAX", nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Override last_seen to 30 days ago
|
||||
cutoff := time.Now().UTC().AddDate(0, 0, -30).Format(time.RFC3339)
|
||||
_, err = store.db.Exec("UPDATE observers SET last_seen = ? WHERE id = ?", cutoff, "obs-old")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Insert a recent observer
|
||||
err = store.UpsertObserver("obs-new", "NewObserver", "NYC", nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
removed, err := store.RemoveStaleObservers(14)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if removed != 1 {
|
||||
t.Errorf("removed=%d, want 1", removed)
|
||||
}
|
||||
|
||||
// Observer should still be in the table (soft-delete), but marked inactive
|
||||
var count int
|
||||
if err := store.db.QueryRow("SELECT COUNT(*) FROM observers").Scan(&count); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if count != 2 {
|
||||
t.Errorf("observers count=%d, want 2 (soft-delete preserves row)", count)
|
||||
}
|
||||
|
||||
// Check that the old observer is marked inactive
|
||||
var inactive int
|
||||
if err := store.db.QueryRow("SELECT inactive FROM observers WHERE id = ?", "obs-old").Scan(&inactive); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if inactive != 1 {
|
||||
t.Errorf("obs-old inactive=%d, want 1", inactive)
|
||||
}
|
||||
|
||||
// Check that the recent observer is still active
|
||||
var newInactive int
|
||||
if err := store.db.QueryRow("SELECT inactive FROM observers WHERE id = ?", "obs-new").Scan(&newInactive); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if newInactive != 0 {
|
||||
t.Errorf("obs-new inactive=%d, want 0", newInactive)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemoveStaleObserversNone(t *testing.T) {
|
||||
store := newTestStore(t)
|
||||
|
||||
removed, err := store.RemoveStaleObservers(14)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if removed != 0 {
|
||||
t.Errorf("removed=%d, want 0", removed)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemoveStaleObserversKeepForever(t *testing.T) {
|
||||
store := newTestStore(t)
|
||||
|
||||
// Insert an old observer
|
||||
err := store.UpsertObserver("obs-ancient", "AncientObserver", "LAX", nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cutoff := time.Now().UTC().AddDate(0, 0, -365).Format(time.RFC3339)
|
||||
_, err = store.db.Exec("UPDATE observers SET last_seen = ? WHERE id = ?", cutoff, "obs-ancient")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// observerDays = -1 means keep forever
|
||||
removed, err := store.RemoveStaleObservers(-1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if removed != 0 {
|
||||
t.Errorf("removed=%d, want 0 (keep forever)", removed)
|
||||
}
|
||||
|
||||
var count int
|
||||
if err := store.db.QueryRow("SELECT COUNT(*) FROM observers").Scan(&count); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if count != 1 {
|
||||
t.Errorf("observers count=%d, want 1 (keep forever)", count)
|
||||
}
|
||||
|
||||
// Observer should NOT be marked inactive
|
||||
var inactive int
|
||||
if err := store.db.QueryRow("SELECT inactive FROM observers WHERE id = ?", "obs-ancient").Scan(&inactive); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if inactive != 0 {
|
||||
t.Errorf("obs-ancient inactive=%d, want 0 (keep forever)", inactive)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemoveStaleObserversReactivation(t *testing.T) {
|
||||
store := newTestStore(t)
|
||||
|
||||
// Insert and stale-mark an observer
|
||||
err := store.UpsertObserver("obs-test", "TestObserver", "LAX", nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cutoff := time.Now().UTC().AddDate(0, 0, -30).Format(time.RFC3339)
|
||||
_, err = store.db.Exec("UPDATE observers SET last_seen = ? WHERE id = ?", cutoff, "obs-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
removed, err := store.RemoveStaleObservers(14)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if removed != 1 {
|
||||
t.Errorf("removed=%d, want 1", removed)
|
||||
}
|
||||
|
||||
// Verify it's inactive
|
||||
var inactive int
|
||||
if err := store.db.QueryRow("SELECT inactive FROM observers WHERE id = ?", "obs-test").Scan(&inactive); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if inactive != 1 {
|
||||
t.Errorf("inactive=%d, want 1 after soft-delete", inactive)
|
||||
}
|
||||
|
||||
// Now UpsertObserver should reactivate it
|
||||
err = store.UpsertObserver("obs-test", "TestObserver", "LAX", nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := store.db.QueryRow("SELECT inactive FROM observers WHERE id = ?", "obs-test").Scan(&inactive); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if inactive != 0 {
|
||||
t.Errorf("inactive=%d, want 0 after reactivation", inactive)
|
||||
}
|
||||
}
|
||||
|
||||
func TestObserverDaysOrDefault(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
cfg *Config
|
||||
want int
|
||||
}{
|
||||
{"nil retention", &Config{}, 14},
|
||||
{"zero observer days", &Config{Retention: &RetentionConfig{ObserverDays: 0}}, 14},
|
||||
{"positive value", &Config{Retention: &RetentionConfig{ObserverDays: 30}}, 30},
|
||||
{"keep forever", &Config{Retention: &RetentionConfig{ObserverDays: -1}}, -1},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := tt.cfg.ObserverDaysOrDefault()
|
||||
if got != tt.want {
|
||||
t.Errorf("ObserverDaysOrDefault() = %d, want %d", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
+17
-250
@@ -11,7 +11,6 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/meshcore-analyzer/packetpath"
|
||||
_ "modernc.org/sqlite"
|
||||
)
|
||||
|
||||
@@ -23,7 +22,6 @@ type DBStats struct {
|
||||
NodeUpserts atomic.Int64
|
||||
ObserverUpserts atomic.Int64
|
||||
WriteErrors atomic.Int64
|
||||
SignatureDrops atomic.Int64
|
||||
}
|
||||
|
||||
// Store wraps the SQLite database for packet ingestion.
|
||||
@@ -59,7 +57,7 @@ func OpenStoreWithInterval(dbPath string, sampleIntervalSec int) (*Store, error)
|
||||
return nil, fmt.Errorf("creating data dir: %w", err)
|
||||
}
|
||||
|
||||
db, err := sql.Open("sqlite", dbPath+"?_pragma=auto_vacuum(INCREMENTAL)&_pragma=journal_mode(WAL)&_pragma=foreign_keys(ON)&_pragma=busy_timeout(5000)")
|
||||
db, err := sql.Open("sqlite", dbPath+"?_pragma=journal_mode(WAL)&_pragma=foreign_keys(ON)&_pragma=busy_timeout(5000)")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("opening db: %w", err)
|
||||
}
|
||||
@@ -85,9 +83,6 @@ func OpenStoreWithInterval(dbPath string, sampleIntervalSec int) (*Store, error)
|
||||
}
|
||||
|
||||
func applySchema(db *sql.DB) error {
|
||||
// auto_vacuum=INCREMENTAL is set via DSN pragma (must be before journal_mode).
|
||||
// Logging of current mode is handled by CheckAutoVacuum — no duplicate log here.
|
||||
|
||||
schema := `
|
||||
CREATE TABLE IF NOT EXISTS nodes (
|
||||
public_key TEXT PRIMARY KEY,
|
||||
@@ -115,8 +110,7 @@ func applySchema(db *sql.DB) error {
|
||||
radio TEXT,
|
||||
battery_mv INTEGER,
|
||||
uptime_secs INTEGER,
|
||||
noise_floor REAL,
|
||||
inactive INTEGER DEFAULT 0
|
||||
noise_floor REAL
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_nodes_last_seen ON nodes(last_seen);
|
||||
@@ -193,7 +187,7 @@ func applySchema(db *sql.DB) error {
|
||||
db.Exec(`DROP VIEW IF EXISTS packets_v`)
|
||||
_, vErr := db.Exec(`
|
||||
CREATE VIEW packets_v AS
|
||||
SELECT o.id, COALESCE(o.raw_hex, t.raw_hex) AS raw_hex,
|
||||
SELECT o.id, t.raw_hex,
|
||||
datetime(o.timestamp, 'unixepoch') AS timestamp,
|
||||
obs.id AS observer_id, obs.name AS observer_name,
|
||||
o.direction, o.snr, o.rssi, o.score, t.hash, t.route_type,
|
||||
@@ -201,7 +195,7 @@ func applySchema(db *sql.DB) error {
|
||||
t.created_at
|
||||
FROM observations o
|
||||
JOIN transmissions t ON t.id = o.transmission_id
|
||||
LEFT JOIN observers obs ON obs.rowid = o.observer_idx AND (obs.inactive IS NULL OR obs.inactive = 0)
|
||||
LEFT JOIN observers obs ON obs.rowid = o.observer_idx
|
||||
`)
|
||||
if vErr != nil {
|
||||
return fmt.Errorf("packets_v view: %w", vErr)
|
||||
@@ -341,19 +335,6 @@ func applySchema(db *sql.DB) error {
|
||||
log.Println("[migration] observer_metrics timestamp index created")
|
||||
}
|
||||
|
||||
// Migration: add inactive column to observers for soft-delete retention
|
||||
row = db.QueryRow("SELECT 1 FROM _migrations WHERE name = 'observers_inactive_v1'")
|
||||
if row.Scan(&migDone) != nil {
|
||||
log.Println("[migration] Adding inactive column to observers...")
|
||||
_, err := db.Exec(`ALTER TABLE observers ADD COLUMN inactive INTEGER DEFAULT 0`)
|
||||
if err != nil {
|
||||
// Column may already exist (e.g. fresh install with schema above)
|
||||
log.Printf("[migration] observers.inactive: %v (may already exist)", err)
|
||||
}
|
||||
db.Exec(`INSERT INTO _migrations (name) VALUES ('observers_inactive_v1')`)
|
||||
log.Println("[migration] observers.inactive column added")
|
||||
}
|
||||
|
||||
// Migration: add packets_sent and packets_recv columns to observer_metrics
|
||||
row = db.QueryRow("SELECT 1 FROM _migrations WHERE name = 'observer_metrics_packets_v1'")
|
||||
if row.Scan(&migDone) != nil {
|
||||
@@ -364,63 +345,6 @@ func applySchema(db *sql.DB) error {
|
||||
log.Println("[migration] packets_sent/packets_recv columns added")
|
||||
}
|
||||
|
||||
// Migration: add channel_hash column for fast channel queries (#762)
|
||||
row = db.QueryRow("SELECT 1 FROM _migrations WHERE name = 'channel_hash_v1'")
|
||||
if row.Scan(&migDone) != nil {
|
||||
log.Println("[migration] Adding channel_hash column to transmissions...")
|
||||
db.Exec(`ALTER TABLE transmissions ADD COLUMN channel_hash TEXT DEFAULT NULL`)
|
||||
db.Exec(`CREATE INDEX IF NOT EXISTS idx_tx_channel_hash ON transmissions(channel_hash) WHERE payload_type = 5`)
|
||||
// Backfill: extract channel name for decrypted (CHAN) packets
|
||||
res, err := db.Exec(`UPDATE transmissions SET channel_hash = json_extract(decoded_json, '$.channel') WHERE payload_type = 5 AND channel_hash IS NULL AND json_extract(decoded_json, '$.type') = 'CHAN'`)
|
||||
if err == nil {
|
||||
n, _ := res.RowsAffected()
|
||||
log.Printf("[migration] Backfilled channel_hash for %d CHAN packets", n)
|
||||
}
|
||||
// Backfill: extract channelHashHex for encrypted (GRP_TXT) packets, prefixed with 'enc_'
|
||||
res, err = db.Exec(`UPDATE transmissions SET channel_hash = 'enc_' || json_extract(decoded_json, '$.channelHashHex') WHERE payload_type = 5 AND channel_hash IS NULL AND json_extract(decoded_json, '$.type') = 'GRP_TXT'`)
|
||||
if err == nil {
|
||||
n, _ := res.RowsAffected()
|
||||
log.Printf("[migration] Backfilled channel_hash for %d GRP_TXT packets", n)
|
||||
}
|
||||
db.Exec(`INSERT INTO _migrations (name) VALUES ('channel_hash_v1')`)
|
||||
log.Println("[migration] channel_hash column added and backfilled")
|
||||
}
|
||||
|
||||
// Migration: dropped_packets table for signature validation failures (#793)
|
||||
row = db.QueryRow("SELECT 1 FROM _migrations WHERE name = 'dropped_packets_v1'")
|
||||
if row.Scan(&migDone) != nil {
|
||||
log.Println("[migration] Creating dropped_packets table...")
|
||||
_, err := db.Exec(`
|
||||
CREATE TABLE IF NOT EXISTS dropped_packets (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
hash TEXT,
|
||||
raw_hex TEXT,
|
||||
reason TEXT NOT NULL,
|
||||
observer_id TEXT,
|
||||
observer_name TEXT,
|
||||
node_pubkey TEXT,
|
||||
node_name TEXT,
|
||||
dropped_at DATETIME DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS idx_dropped_observer ON dropped_packets(observer_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_dropped_node ON dropped_packets(node_pubkey);
|
||||
`)
|
||||
if err != nil {
|
||||
return fmt.Errorf("dropped_packets schema: %w", err)
|
||||
}
|
||||
db.Exec(`INSERT INTO _migrations (name) VALUES ('dropped_packets_v1')`)
|
||||
log.Println("[migration] dropped_packets table created")
|
||||
}
|
||||
|
||||
// Migration: add raw_hex column to observations (#881)
|
||||
row = db.QueryRow("SELECT 1 FROM _migrations WHERE name = 'observations_raw_hex_v1'")
|
||||
if row.Scan(&migDone) != nil {
|
||||
log.Println("[migration] Adding raw_hex column to observations...")
|
||||
db.Exec(`ALTER TABLE observations ADD COLUMN raw_hex TEXT`)
|
||||
db.Exec(`INSERT INTO _migrations (name) VALUES ('observations_raw_hex_v1')`)
|
||||
log.Println("[migration] observations.raw_hex column added")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -433,8 +357,8 @@ func (s *Store) prepareStatements() error {
|
||||
}
|
||||
|
||||
s.stmtInsertTransmission, err = s.db.Prepare(`
|
||||
INSERT INTO transmissions (raw_hex, hash, first_seen, route_type, payload_type, payload_version, decoded_json, channel_hash)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?)
|
||||
INSERT INTO transmissions (raw_hex, hash, first_seen, route_type, payload_type, payload_version, decoded_json)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?)
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -446,13 +370,8 @@ func (s *Store) prepareStatements() error {
|
||||
}
|
||||
|
||||
s.stmtInsertObservation, err = s.db.Prepare(`
|
||||
INSERT INTO observations (transmission_id, observer_idx, direction, snr, rssi, score, path_json, timestamp, raw_hex)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
ON CONFLICT(transmission_id, observer_idx, COALESCE(path_json, '')) DO UPDATE SET
|
||||
snr = COALESCE(excluded.snr, snr),
|
||||
rssi = COALESCE(excluded.rssi, rssi),
|
||||
score = COALESCE(excluded.score, score),
|
||||
raw_hex = COALESCE(excluded.raw_hex, raw_hex)
|
||||
INSERT OR IGNORE INTO observations (transmission_id, observer_idx, direction, snr, rssi, score, path_json, timestamp)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?)
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -562,7 +481,7 @@ func (s *Store) InsertTransmission(data *PacketData) (bool, error) {
|
||||
result, err := s.stmtInsertTransmission.Exec(
|
||||
data.RawHex, hash, now,
|
||||
data.RouteType, data.PayloadType, data.PayloadVersion,
|
||||
data.DecodedJSON, nilIfEmpty(data.ChannelHash),
|
||||
data.DecodedJSON,
|
||||
)
|
||||
if err != nil {
|
||||
s.Stats.WriteErrors.Add(1)
|
||||
@@ -598,7 +517,7 @@ func (s *Store) InsertTransmission(data *PacketData) (bool, error) {
|
||||
_, err = s.stmtInsertObservation.Exec(
|
||||
txID, observerIdx, data.Direction,
|
||||
data.SNR, data.RSSI, data.Score,
|
||||
data.PathJSON, epochTs, nilIfEmpty(data.RawHex),
|
||||
data.PathJSON, epochTs,
|
||||
)
|
||||
if err != nil {
|
||||
s.Stats.WriteErrors.Add(1)
|
||||
@@ -703,13 +622,10 @@ func (s *Store) UpsertObserver(id, name, iata string, meta *ObserverMeta) error
|
||||
)
|
||||
if err != nil {
|
||||
s.Stats.WriteErrors.Add(1)
|
||||
return err
|
||||
} else {
|
||||
s.Stats.ObserverUpserts.Add(1)
|
||||
}
|
||||
s.Stats.ObserverUpserts.Add(1)
|
||||
|
||||
// Reactivate if this observer was previously marked inactive
|
||||
s.db.Exec(`UPDATE observers SET inactive = 0 WHERE id = ? AND inactive = 1`, id)
|
||||
return nil
|
||||
return err
|
||||
}
|
||||
|
||||
// Close checkpoints the WAL and closes the database.
|
||||
@@ -791,58 +707,6 @@ func (s *Store) PruneOldMetrics(retentionDays int) (int64, error) {
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// CheckAutoVacuum inspects the current auto_vacuum mode and logs a warning
|
||||
// if not INCREMENTAL. Performs opt-in full VACUUM if db.vacuumOnStartup is set (#919).
|
||||
func (s *Store) CheckAutoVacuum(cfg *Config) {
|
||||
var autoVacuum int
|
||||
if err := s.db.QueryRow("PRAGMA auto_vacuum").Scan(&autoVacuum); err != nil {
|
||||
log.Printf("[db] warning: could not read auto_vacuum: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
if autoVacuum == 2 {
|
||||
log.Printf("[db] auto_vacuum=INCREMENTAL")
|
||||
return
|
||||
}
|
||||
|
||||
modes := map[int]string{0: "NONE", 1: "FULL", 2: "INCREMENTAL"}
|
||||
mode := modes[autoVacuum]
|
||||
if mode == "" {
|
||||
mode = fmt.Sprintf("UNKNOWN(%d)", autoVacuum)
|
||||
}
|
||||
|
||||
log.Printf("[db] auto_vacuum=%s — DB needs one-time VACUUM to enable incremental auto-vacuum. "+
|
||||
"Set db.vacuumOnStartup: true in config to migrate (will block startup for several minutes on large DBs). "+
|
||||
"See https://github.com/Kpa-clawbot/CoreScope/issues/919", mode)
|
||||
|
||||
if cfg.DB != nil && cfg.DB.VacuumOnStartup {
|
||||
// WARNING: Full VACUUM creates a temporary copy of the entire DB file.
|
||||
// Requires ~2× the DB file size in free disk space or it will fail.
|
||||
log.Printf("[db] vacuumOnStartup=true — starting one-time full VACUUM (ensure 2x DB size free disk space)...")
|
||||
start := time.Now()
|
||||
|
||||
if _, err := s.db.Exec("PRAGMA auto_vacuum = INCREMENTAL"); err != nil {
|
||||
log.Printf("[db] VACUUM failed: could not set auto_vacuum: %v", err)
|
||||
return
|
||||
}
|
||||
if _, err := s.db.Exec("VACUUM"); err != nil {
|
||||
log.Printf("[db] VACUUM failed: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
elapsed := time.Since(start)
|
||||
log.Printf("[db] VACUUM complete in %v — auto_vacuum is now INCREMENTAL", elapsed.Round(time.Millisecond))
|
||||
}
|
||||
}
|
||||
|
||||
// RunIncrementalVacuum returns free pages to the OS (#919).
|
||||
// Safe to call on auto_vacuum=NONE databases (noop).
|
||||
func (s *Store) RunIncrementalVacuum(pages int) {
|
||||
if _, err := s.db.Exec(fmt.Sprintf("PRAGMA incremental_vacuum(%d)", pages)); err != nil {
|
||||
log.Printf("[vacuum] incremental_vacuum error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Checkpoint forces a WAL checkpoint to release the WAL lock file,
|
||||
// preventing lock contention with a new process starting up.
|
||||
func (s *Store) Checkpoint() {
|
||||
@@ -855,14 +719,13 @@ func (s *Store) Checkpoint() {
|
||||
|
||||
// LogStats logs current operational metrics.
|
||||
func (s *Store) LogStats() {
|
||||
log.Printf("[stats] tx_inserted=%d tx_dupes=%d obs_inserted=%d node_upserts=%d observer_upserts=%d write_errors=%d sig_drops=%d",
|
||||
log.Printf("[stats] tx_inserted=%d tx_dupes=%d obs_inserted=%d node_upserts=%d observer_upserts=%d write_errors=%d",
|
||||
s.Stats.TransmissionsInserted.Load(),
|
||||
s.Stats.DuplicateTransmissions.Load(),
|
||||
s.Stats.ObservationsInserted.Load(),
|
||||
s.Stats.NodeUpserts.Load(),
|
||||
s.Stats.ObserverUpserts.Load(),
|
||||
s.Stats.WriteErrors.Load(),
|
||||
s.Stats.SignatureDrops.Load(),
|
||||
)
|
||||
}
|
||||
|
||||
@@ -894,71 +757,6 @@ func (s *Store) MoveStaleNodes(nodeDays int) (int64, error) {
|
||||
return moved, nil
|
||||
}
|
||||
|
||||
// RemoveStaleObservers marks observers that have not actively sent data in observerDays
|
||||
// as inactive (soft-delete). This preserves JOIN integrity for observations.observer_idx
|
||||
// and observer_metrics.observer_id — historical data still references the correct observer.
|
||||
// An observer must actively send data to stay listed — being seen by another node does not count.
|
||||
// observerDays <= -1 means never remove (keep forever).
|
||||
func (s *Store) RemoveStaleObservers(observerDays int) (int64, error) {
|
||||
if observerDays <= -1 {
|
||||
return 0, nil // keep forever
|
||||
}
|
||||
cutoff := time.Now().UTC().AddDate(0, 0, -observerDays).Format(time.RFC3339)
|
||||
result, err := s.db.Exec(`UPDATE observers SET inactive = 1 WHERE last_seen < ? AND (inactive IS NULL OR inactive = 0)`, cutoff)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("mark stale observers inactive: %w", err)
|
||||
}
|
||||
removed, _ := result.RowsAffected()
|
||||
if removed > 0 {
|
||||
// Clean up orphaned metrics for now-inactive observers
|
||||
s.db.Exec(`DELETE FROM observer_metrics WHERE observer_id IN (SELECT id FROM observers WHERE inactive = 1)`)
|
||||
log.Printf("Marked %d observer(s) as inactive (not seen in %d days)", removed, observerDays)
|
||||
}
|
||||
return removed, nil
|
||||
}
|
||||
|
||||
// DroppedPacket holds data for a packet rejected during ingest.
|
||||
type DroppedPacket struct {
|
||||
Hash string
|
||||
RawHex string
|
||||
Reason string
|
||||
ObserverID string
|
||||
ObserverName string
|
||||
NodePubKey string
|
||||
NodeName string
|
||||
}
|
||||
|
||||
// InsertDroppedPacket records a rejected packet in the dropped_packets table.
|
||||
func (s *Store) InsertDroppedPacket(dp *DroppedPacket) error {
|
||||
_, err := s.db.Exec(
|
||||
`INSERT INTO dropped_packets (hash, raw_hex, reason, observer_id, observer_name, node_pubkey, node_name) VALUES (?, ?, ?, ?, ?, ?, ?)`,
|
||||
dp.Hash, dp.RawHex, dp.Reason, dp.ObserverID, dp.ObserverName, dp.NodePubKey, dp.NodeName,
|
||||
)
|
||||
if err != nil {
|
||||
s.Stats.WriteErrors.Add(1)
|
||||
return fmt.Errorf("insert dropped packet: %w", err)
|
||||
}
|
||||
s.Stats.SignatureDrops.Add(1)
|
||||
return nil
|
||||
}
|
||||
|
||||
// PruneDroppedPackets removes dropped_packets older than retentionDays.
|
||||
func (s *Store) PruneDroppedPackets(retentionDays int) (int64, error) {
|
||||
if retentionDays <= 0 {
|
||||
return 0, nil
|
||||
}
|
||||
cutoff := time.Now().UTC().AddDate(0, 0, -retentionDays).Format(time.RFC3339)
|
||||
result, err := s.db.Exec(`DELETE FROM dropped_packets WHERE dropped_at < ?`, cutoff)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("prune dropped packets: %w", err)
|
||||
}
|
||||
n, _ := result.RowsAffected()
|
||||
if n > 0 {
|
||||
log.Printf("Pruned %d dropped packet(s) older than %d days", n, retentionDays)
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// PacketData holds the data needed to insert a packet into the DB.
|
||||
type PacketData struct {
|
||||
RawHex string
|
||||
@@ -975,15 +773,6 @@ type PacketData struct {
|
||||
PayloadVersion int
|
||||
PathJSON string
|
||||
DecodedJSON string
|
||||
ChannelHash string // grouping key for channel queries (#762)
|
||||
}
|
||||
|
||||
// nilIfEmpty returns nil for empty strings (for nullable DB columns).
|
||||
func nilIfEmpty(s string) interface{} {
|
||||
if s == "" {
|
||||
return nil
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// MQTTPacketMessage is the JSON payload from an MQTT raw packet message.
|
||||
@@ -997,26 +786,15 @@ type MQTTPacketMessage struct {
|
||||
}
|
||||
|
||||
// BuildPacketData constructs a PacketData from a decoded packet and MQTT message.
|
||||
// path_json is derived directly from raw_hex header bytes (not decoded.Path.Hops)
|
||||
// to guarantee the stored path always matches the raw bytes. This matters for
|
||||
// TRACE packets where decoded.Path.Hops is overwritten with payload hops (#886).
|
||||
func BuildPacketData(msg *MQTTPacketMessage, decoded *DecodedPacket, observerID, region string) *PacketData {
|
||||
now := time.Now().UTC().Format(time.RFC3339)
|
||||
pathJSON := "[]"
|
||||
// For TRACE packets, path_json must be the payload-decoded route hops
|
||||
// (decoded.Path.Hops), NOT the raw_hex header bytes which are SNR values.
|
||||
// For all other packet types, derive path from raw_hex (#886).
|
||||
if !packetpath.PathBytesAreHops(byte(decoded.Header.PayloadType)) {
|
||||
if len(decoded.Path.Hops) > 0 {
|
||||
b, _ := json.Marshal(decoded.Path.Hops)
|
||||
pathJSON = string(b)
|
||||
}
|
||||
} else if hops, err := packetpath.DecodePathFromRawHex(msg.Raw); err == nil && len(hops) > 0 {
|
||||
b, _ := json.Marshal(hops)
|
||||
if len(decoded.Path.Hops) > 0 {
|
||||
b, _ := json.Marshal(decoded.Path.Hops)
|
||||
pathJSON = string(b)
|
||||
}
|
||||
|
||||
pd := &PacketData{
|
||||
return &PacketData{
|
||||
RawHex: msg.Raw,
|
||||
Timestamp: now,
|
||||
ObserverID: observerID,
|
||||
@@ -1032,15 +810,4 @@ func BuildPacketData(msg *MQTTPacketMessage, decoded *DecodedPacket, observerID,
|
||||
PathJSON: pathJSON,
|
||||
DecodedJSON: PayloadJSON(&decoded.Payload),
|
||||
}
|
||||
|
||||
// Populate channel_hash for fast channel queries (#762)
|
||||
if decoded.Header.PayloadType == PayloadGRP_TXT {
|
||||
if decoded.Payload.Type == "CHAN" && decoded.Payload.Channel != "" {
|
||||
pd.ChannelHash = decoded.Payload.Channel
|
||||
} else if decoded.Payload.Type == "GRP_TXT" && decoded.Payload.ChannelHashHex != "" {
|
||||
pd.ChannelHash = "enc_" + decoded.Payload.ChannelHashHex
|
||||
}
|
||||
}
|
||||
|
||||
return pd
|
||||
}
|
||||
|
||||
+6
-247
@@ -2,7 +2,6 @@ package main
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@@ -11,8 +10,6 @@ import (
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/meshcore-analyzer/packetpath"
|
||||
)
|
||||
|
||||
func tempDBPath(t *testing.T) string {
|
||||
@@ -579,7 +576,7 @@ func TestEndToEndIngest(t *testing.T) {
|
||||
// Simulate full pipeline: decode + insert
|
||||
rawHex := "120046D62DE27D4C5194D7821FC5A34A45565DCC2537B300B9AB6275255CEFB65D840CE5C169C94C9AED39E8BCB6CB6EB0335497A198B33A1A610CD3B03D8DCFC160900E5244280323EE0B44CACAB8F02B5B38B91CFA18BD067B0B5E63E94CFC85F758A8530B9240933402E0E6B8F84D5252322D52"
|
||||
|
||||
decoded, err := DecodePacket(rawHex, nil, false)
|
||||
decoded, err := DecodePacket(rawHex, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -767,7 +764,7 @@ func TestInsertTransmissionNilSNRRSSI(t *testing.T) {
|
||||
|
||||
func TestBuildPacketData(t *testing.T) {
|
||||
rawHex := "0A00D69FD7A5A7475DB07337749AE61FA53A4788E976"
|
||||
decoded, err := DecodePacket(rawHex, nil, false)
|
||||
decoded, err := DecodePacket(rawHex, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -821,7 +818,7 @@ func TestBuildPacketData(t *testing.T) {
|
||||
func TestBuildPacketDataWithHops(t *testing.T) {
|
||||
// A packet with actual hops in the path
|
||||
raw := "0505AABBCCDDEE" + strings.Repeat("00", 10)
|
||||
decoded, err := DecodePacket(raw, nil, false)
|
||||
decoded, err := DecodePacket(raw, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -837,7 +834,7 @@ func TestBuildPacketDataWithHops(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestBuildPacketDataNilSNRRSSI(t *testing.T) {
|
||||
decoded, _ := DecodePacket("0A00"+strings.Repeat("00", 10), nil, false)
|
||||
decoded, _ := DecodePacket("0A00"+strings.Repeat("00", 10), nil)
|
||||
msg := &MQTTPacketMessage{Raw: "0A00" + strings.Repeat("00", 10)}
|
||||
pkt := BuildPacketData(msg, decoded, "", "")
|
||||
|
||||
@@ -1627,7 +1624,7 @@ func TestObsTimestampIndexMigration(t *testing.T) {
|
||||
|
||||
func TestBuildPacketDataScoreAndDirection(t *testing.T) {
|
||||
rawHex := "0A00D69FD7A5A7475DB07337749AE61FA53A4788E976"
|
||||
decoded, err := DecodePacket(rawHex, nil, false)
|
||||
decoded, err := DecodePacket(rawHex, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -1650,7 +1647,7 @@ func TestBuildPacketDataScoreAndDirection(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestBuildPacketDataNilScoreDirection(t *testing.T) {
|
||||
decoded, _ := DecodePacket("0A00"+strings.Repeat("00", 10), nil, false)
|
||||
decoded, _ := DecodePacket("0A00"+strings.Repeat("00", 10), nil)
|
||||
msg := &MQTTPacketMessage{Raw: "0A00" + strings.Repeat("00", 10)}
|
||||
pkt := BuildPacketData(msg, decoded, "", "")
|
||||
|
||||
@@ -1885,241 +1882,3 @@ func TestExtractObserverMetaNewFields(t *testing.T) {
|
||||
t.Errorf("RecvErrors = %v, want 3", meta.RecvErrors)
|
||||
}
|
||||
}
|
||||
|
||||
// TestInsertObservationSNRFillIn verifies that when the same observation is
|
||||
// received twice — first without SNR, then with SNR — the SNR is filled in
|
||||
// rather than silently discarded. The unique dedup index is
|
||||
// (transmission_id, observer_idx, COALESCE(path_json, '')); observer_idx must
|
||||
// be non-NULL for the conflict to fire (SQLite treats NULL != NULL).
|
||||
func TestInsertObservationSNRFillIn(t *testing.T) {
|
||||
s, err := OpenStore(tempDBPath(t))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer s.Close()
|
||||
|
||||
// Register the observer so observer_idx is non-NULL (required for dedup).
|
||||
if err := s.UpsertObserver("pymc-obs1", "PyMC Observer", "SJC", nil); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// First arrival: same observer, no SNR/RSSI (e.g. broker replay without RF fields).
|
||||
data1 := &PacketData{
|
||||
RawHex: "0A00D69FD7A5A7475DB07337749AE61FA53A4788E976",
|
||||
Timestamp: "2026-04-20T00:00:00Z",
|
||||
Hash: "snrfillin0001hash",
|
||||
RouteType: 1,
|
||||
ObserverID: "pymc-obs1",
|
||||
SNR: nil,
|
||||
RSSI: nil,
|
||||
}
|
||||
if _, err := s.InsertTransmission(data1); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var snr1, rssi1 *float64
|
||||
s.db.QueryRow("SELECT snr, rssi FROM observations LIMIT 1").Scan(&snr1, &rssi1)
|
||||
if snr1 != nil || rssi1 != nil {
|
||||
t.Fatalf("precondition: first insert should have nil SNR/RSSI, got snr=%v rssi=%v", snr1, rssi1)
|
||||
}
|
||||
|
||||
// Second arrival: same packet, same observer, now WITH SNR/RSSI.
|
||||
snr := 10.5
|
||||
rssi := -88.0
|
||||
data2 := &PacketData{
|
||||
RawHex: data1.RawHex,
|
||||
Timestamp: data1.Timestamp,
|
||||
Hash: data1.Hash,
|
||||
RouteType: data1.RouteType,
|
||||
ObserverID: "pymc-obs1",
|
||||
SNR: &snr,
|
||||
RSSI: &rssi,
|
||||
}
|
||||
if _, err := s.InsertTransmission(data2); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var snr2, rssi2 *float64
|
||||
s.db.QueryRow("SELECT snr, rssi FROM observations LIMIT 1").Scan(&snr2, &rssi2)
|
||||
if snr2 == nil || *snr2 != snr {
|
||||
t.Errorf("SNR not filled in by second arrival: got %v, want %v", snr2, snr)
|
||||
}
|
||||
if rssi2 == nil || *rssi2 != rssi {
|
||||
t.Errorf("RSSI not filled in by second arrival: got %v, want %v", rssi2, rssi)
|
||||
}
|
||||
|
||||
// Third arrival: same packet again, SNR absent — must NOT overwrite existing SNR.
|
||||
data3 := &PacketData{
|
||||
RawHex: data1.RawHex,
|
||||
Timestamp: data1.Timestamp,
|
||||
Hash: data1.Hash,
|
||||
RouteType: data1.RouteType,
|
||||
ObserverID: "pymc-obs1",
|
||||
SNR: nil,
|
||||
RSSI: nil,
|
||||
}
|
||||
if _, err := s.InsertTransmission(data3); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var snr3, rssi3 *float64
|
||||
s.db.QueryRow("SELECT snr, rssi FROM observations LIMIT 1").Scan(&snr3, &rssi3)
|
||||
if snr3 == nil || *snr3 != snr {
|
||||
t.Errorf("SNR overwritten by null arrival: got %v, want %v", snr3, snr)
|
||||
}
|
||||
if rssi3 == nil || *rssi3 != rssi {
|
||||
t.Errorf("RSSI overwritten by null arrival: got %v, want %v", rssi3, rssi)
|
||||
}
|
||||
}
|
||||
|
||||
// TestPerObservationRawHex verifies that two MQTT packets for the same hash
|
||||
// from different observers store distinct raw_hex per observation (#881).
|
||||
func TestPerObservationRawHex(t *testing.T) {
|
||||
store, err := OpenStore(tempDBPath(t))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer store.Close()
|
||||
|
||||
// Register two observers
|
||||
store.UpsertObserver("obs-A", "Observer A", "", nil)
|
||||
store.UpsertObserver("obs-B", "Observer B", "", nil)
|
||||
|
||||
hash := "abc123def456"
|
||||
rawA := "c0ffee01"
|
||||
rawB := "c0ffee0201aa"
|
||||
dir := "RX"
|
||||
|
||||
// First observation from observer A
|
||||
pdA := &PacketData{
|
||||
RawHex: rawA,
|
||||
Hash: hash,
|
||||
Timestamp: "2026-04-21T10:00:00Z",
|
||||
ObserverID: "obs-A",
|
||||
Direction: &dir,
|
||||
PathJSON: "[]",
|
||||
}
|
||||
isNew, err := store.InsertTransmission(pdA)
|
||||
if err != nil {
|
||||
t.Fatalf("insert A: %v", err)
|
||||
}
|
||||
if !isNew {
|
||||
t.Fatal("expected new transmission")
|
||||
}
|
||||
|
||||
// Second observation from observer B (same hash, different raw bytes)
|
||||
pdB := &PacketData{
|
||||
RawHex: rawB,
|
||||
Hash: hash,
|
||||
Timestamp: "2026-04-21T10:00:01Z",
|
||||
ObserverID: "obs-B",
|
||||
Direction: &dir,
|
||||
PathJSON: `["aabb"]`,
|
||||
}
|
||||
isNew2, err := store.InsertTransmission(pdB)
|
||||
if err != nil {
|
||||
t.Fatalf("insert B: %v", err)
|
||||
}
|
||||
if isNew2 {
|
||||
t.Fatal("expected duplicate transmission")
|
||||
}
|
||||
|
||||
// Query observations and verify per-observation raw_hex
|
||||
rows, err := store.db.Query(`
|
||||
SELECT o.raw_hex, obs.id
|
||||
FROM observations o
|
||||
LEFT JOIN observers obs ON obs.rowid = o.observer_idx
|
||||
ORDER BY o.id ASC
|
||||
`)
|
||||
if err != nil {
|
||||
t.Fatalf("query: %v", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
type obsResult struct {
|
||||
rawHex string
|
||||
observerID string
|
||||
}
|
||||
var results []obsResult
|
||||
for rows.Next() {
|
||||
var rh, oid sql.NullString
|
||||
if err := rows.Scan(&rh, &oid); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
results = append(results, obsResult{
|
||||
rawHex: rh.String,
|
||||
observerID: oid.String,
|
||||
})
|
||||
}
|
||||
|
||||
if len(results) != 2 {
|
||||
t.Fatalf("expected 2 observations, got %d", len(results))
|
||||
}
|
||||
if results[0].rawHex != rawA {
|
||||
t.Errorf("obs A raw_hex: got %q, want %q", results[0].rawHex, rawA)
|
||||
}
|
||||
if results[1].rawHex != rawB {
|
||||
t.Errorf("obs B raw_hex: got %q, want %q", results[1].rawHex, rawB)
|
||||
}
|
||||
if results[0].rawHex == results[1].rawHex {
|
||||
t.Error("both observations have same raw_hex — should differ")
|
||||
}
|
||||
}
|
||||
|
||||
// TestBuildPacketData_TraceUsesPayloadHops verifies that TRACE packets use
|
||||
// payload-decoded route hops in path_json (NOT the raw_hex header SNR bytes).
|
||||
// Issue #886 / #887.
|
||||
func TestBuildPacketData_TraceUsesPayloadHops(t *testing.T) {
|
||||
// TRACE packet: header path has SNR bytes [30,2D,0D,23], but decoded.Path.Hops
|
||||
// is overwritten to payload hops [67,33,D6,33,67].
|
||||
rawHex := "2604302D0D2359FEE7B100000000006733D63367"
|
||||
decoded, err := DecodePacket(rawHex, nil, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// decoded.Path.Hops should be the TRACE-replaced hops (payload hops)
|
||||
if len(decoded.Path.Hops) != 5 {
|
||||
t.Fatalf("expected 5 decoded hops, got %d", len(decoded.Path.Hops))
|
||||
}
|
||||
|
||||
msg := &MQTTPacketMessage{Raw: rawHex}
|
||||
pd := BuildPacketData(msg, decoded, "test-obs", "TST")
|
||||
|
||||
// For TRACE: path_json MUST be the payload-decoded route hops, NOT the SNR bytes
|
||||
expectedPathJSON := `["67","33","D6","33","67"]`
|
||||
if pd.PathJSON != expectedPathJSON {
|
||||
t.Errorf("path_json = %s, want %s (TRACE must use payload hops)", pd.PathJSON, expectedPathJSON)
|
||||
}
|
||||
|
||||
// Verify that DecodePathFromRawHex returns the SNR bytes (header path) which differ
|
||||
headerHops, herr := packetpath.DecodePathFromRawHex(rawHex)
|
||||
if herr != nil {
|
||||
t.Fatal(herr)
|
||||
}
|
||||
headerJSON, _ := json.Marshal(headerHops)
|
||||
if string(headerJSON) == expectedPathJSON {
|
||||
t.Error("header path (SNR) should differ from payload hops for TRACE")
|
||||
}
|
||||
}
|
||||
|
||||
// TestBuildPacketData_NonTracePathJSON verifies non-TRACE packets also derive path from raw_hex.
|
||||
func TestBuildPacketData_NonTracePathJSON(t *testing.T) {
|
||||
// A simple ADVERT packet (payload type 0) with 2 hops, hash_size 1
|
||||
// Header 0x09 = FLOOD(1), ADVERT(2), version 0
|
||||
// Path byte 0x02 = hash_size 1, hash_count 2
|
||||
// Path bytes: AA BB
|
||||
rawHex := "0902AABB" + "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"
|
||||
decoded, err := DecodePacket(rawHex, nil, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
msg := &MQTTPacketMessage{Raw: rawHex}
|
||||
pd := BuildPacketData(msg, decoded, "obs1", "TST")
|
||||
|
||||
expectedPathJSON := `["AA","BB"]`
|
||||
if pd.PathJSON != expectedPathJSON {
|
||||
t.Errorf("path_json = %s, want %s", pd.PathJSON, expectedPathJSON)
|
||||
}
|
||||
}
|
||||
|
||||
+18
-76
@@ -11,9 +11,6 @@ import (
|
||||
"math"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/meshcore-analyzer/packetpath"
|
||||
"github.com/meshcore-analyzer/sigvalidate"
|
||||
)
|
||||
|
||||
// Route type constants (header bits 1-0)
|
||||
@@ -81,10 +78,9 @@ type TransportCodes struct {
|
||||
|
||||
// Path holds decoded path/hop information.
|
||||
type Path struct {
|
||||
HashSize int `json:"hashSize"`
|
||||
HashCount int `json:"hashCount"`
|
||||
Hops []string `json:"hops"`
|
||||
HopsCompleted *int `json:"hopsCompleted,omitempty"`
|
||||
HashSize int `json:"hashSize"`
|
||||
HashCount int `json:"hashCount"`
|
||||
Hops []string `json:"hops"`
|
||||
}
|
||||
|
||||
// AdvertFlags holds decoded advert flag bits.
|
||||
@@ -113,7 +109,6 @@ type Payload struct {
|
||||
Timestamp uint32 `json:"timestamp,omitempty"`
|
||||
TimestampISO string `json:"timestampISO,omitempty"`
|
||||
Signature string `json:"signature,omitempty"`
|
||||
SignatureValid *bool `json:"signatureValid,omitempty"`
|
||||
Flags *AdvertFlags `json:"flags,omitempty"`
|
||||
Lat *float64 `json:"lat,omitempty"`
|
||||
Lon *float64 `json:"lon,omitempty"`
|
||||
@@ -145,7 +140,6 @@ type DecodedPacket struct {
|
||||
Path Path `json:"path"`
|
||||
Payload Payload `json:"payload"`
|
||||
Raw string `json:"raw"`
|
||||
Anomaly string `json:"anomaly,omitempty"`
|
||||
}
|
||||
|
||||
func decodeHeader(b byte) Header {
|
||||
@@ -193,9 +187,8 @@ func decodePath(pathByte byte, buf []byte, offset int) (Path, int) {
|
||||
}, totalBytes
|
||||
}
|
||||
|
||||
// isTransportRoute delegates to packetpath.IsTransportRoute.
|
||||
func isTransportRoute(routeType int) bool {
|
||||
return packetpath.IsTransportRoute(routeType)
|
||||
return routeType == RouteTransportFlood || routeType == RouteTransportDirect
|
||||
}
|
||||
|
||||
func decodeEncryptedPayload(typeName string, buf []byte) Payload {
|
||||
@@ -222,7 +215,7 @@ func decodeAck(buf []byte) Payload {
|
||||
}
|
||||
}
|
||||
|
||||
func decodeAdvert(buf []byte, validateSignatures bool) Payload {
|
||||
func decodeAdvert(buf []byte) Payload {
|
||||
if len(buf) < 100 {
|
||||
return Payload{Type: "ADVERT", Error: "too short for advert", RawHex: hex.EncodeToString(buf)}
|
||||
}
|
||||
@@ -240,16 +233,6 @@ func decodeAdvert(buf []byte, validateSignatures bool) Payload {
|
||||
Signature: signature,
|
||||
}
|
||||
|
||||
if validateSignatures {
|
||||
valid, err := sigvalidate.ValidateAdvert(buf[0:32], buf[36:100], timestamp, appdata)
|
||||
if err != nil {
|
||||
f := false
|
||||
p.SignatureValid = &f
|
||||
} else {
|
||||
p.SignatureValid = &valid
|
||||
}
|
||||
}
|
||||
|
||||
if len(appdata) > 0 {
|
||||
flags := appdata[0]
|
||||
advType := int(flags & 0x0F)
|
||||
@@ -523,7 +506,7 @@ func decodeTrace(buf []byte) Payload {
|
||||
return p
|
||||
}
|
||||
|
||||
func decodePayload(payloadType int, buf []byte, channelKeys map[string]string, validateSignatures bool) Payload {
|
||||
func decodePayload(payloadType int, buf []byte, channelKeys map[string]string) Payload {
|
||||
switch payloadType {
|
||||
case PayloadREQ:
|
||||
return decodeEncryptedPayload("REQ", buf)
|
||||
@@ -534,7 +517,7 @@ func decodePayload(payloadType int, buf []byte, channelKeys map[string]string, v
|
||||
case PayloadACK:
|
||||
return decodeAck(buf)
|
||||
case PayloadADVERT:
|
||||
return decodeAdvert(buf, validateSignatures)
|
||||
return decodeAdvert(buf)
|
||||
case PayloadGRP_TXT:
|
||||
return decodeGrpTxt(buf, channelKeys)
|
||||
case PayloadANON_REQ:
|
||||
@@ -549,7 +532,7 @@ func decodePayload(payloadType int, buf []byte, channelKeys map[string]string, v
|
||||
}
|
||||
|
||||
// DecodePacket decodes a hex-encoded MeshCore packet.
|
||||
func DecodePacket(hexString string, channelKeys map[string]string, validateSignatures bool) (*DecodedPacket, error) {
|
||||
func DecodePacket(hexString string, channelKeys map[string]string) (*DecodedPacket, error) {
|
||||
hexString = strings.ReplaceAll(hexString, " ", "")
|
||||
hexString = strings.ReplaceAll(hexString, "\n", "")
|
||||
hexString = strings.ReplaceAll(hexString, "\r", "")
|
||||
@@ -587,65 +570,35 @@ func DecodePacket(hexString string, channelKeys map[string]string, validateSigna
|
||||
offset += bytesConsumed
|
||||
|
||||
payloadBuf := buf[offset:]
|
||||
payload := decodePayload(header.PayloadType, payloadBuf, channelKeys, validateSignatures)
|
||||
payload := decodePayload(header.PayloadType, payloadBuf, channelKeys)
|
||||
|
||||
// TRACE packets store hop IDs in the payload (buf[9:]) rather than the header
|
||||
// path field. Firmware always sends TRACE as DIRECT (route_type 2 or 3);
|
||||
// FLOOD-routed TRACEs are anomalous but handled gracefully (parsed, but
|
||||
// flagged). The TRACE flags byte (payload offset 8) encodes path_sz in
|
||||
// bits 0-1 as a power-of-two exponent: hash_bytes = 1 << path_sz.
|
||||
// NOT the header path byte's hash_size bits. The header path contains SNR
|
||||
// bytes — one per hop that actually forwarded.
|
||||
// We expose hopsCompleted (count of SNR bytes) so consumers can distinguish
|
||||
// how far the trace got vs the full intended route.
|
||||
var anomaly string
|
||||
// path field. The header path byte still encodes hashSize in bits 6-7, which
|
||||
// we use to split the payload path data into individual hop prefixes.
|
||||
if header.PayloadType == PayloadTRACE && payload.PathData != "" {
|
||||
// Flag anomalous routing — firmware only sends TRACE as DIRECT
|
||||
if header.RouteType != RouteDirect && header.RouteType != RouteTransportDirect {
|
||||
anomaly = "TRACE packet with non-DIRECT routing (expected DIRECT or TRANSPORT_DIRECT)"
|
||||
}
|
||||
// The header path hops count represents SNR entries = completed hops
|
||||
hopsCompleted := path.HashCount
|
||||
pathBytes, err := hex.DecodeString(payload.PathData)
|
||||
if err == nil && payload.TraceFlags != nil {
|
||||
// path_sz from flags byte is a power-of-two exponent per firmware:
|
||||
// hash_bytes = 1 << (flags & 0x03)
|
||||
pathSz := 1 << (*payload.TraceFlags & 0x03)
|
||||
hops := make([]string, 0, len(pathBytes)/pathSz)
|
||||
for i := 0; i+pathSz <= len(pathBytes); i += pathSz {
|
||||
hops = append(hops, strings.ToUpper(hex.EncodeToString(pathBytes[i:i+pathSz])))
|
||||
if err == nil && path.HashSize > 0 {
|
||||
hops := make([]string, 0, len(pathBytes)/path.HashSize)
|
||||
for i := 0; i+path.HashSize <= len(pathBytes); i += path.HashSize {
|
||||
hops = append(hops, strings.ToUpper(hex.EncodeToString(pathBytes[i:i+path.HashSize])))
|
||||
}
|
||||
path.Hops = hops
|
||||
path.HashCount = len(hops)
|
||||
path.HashSize = pathSz
|
||||
path.HopsCompleted = &hopsCompleted
|
||||
}
|
||||
}
|
||||
|
||||
// Zero-hop direct packets have hash_count=0 (lower 6 bits of pathByte),
|
||||
// which makes the generic formula yield a bogus hashSize. Reset to 0
|
||||
// (unknown) so API consumers get correct data. We mask with 0x3F to check
|
||||
// only hash_count, matching the JS frontend approach — the upper hash_size
|
||||
// bits are meaningless when there are no hops. Skip TRACE packets — they
|
||||
// use hashSize to parse hops from the payload above.
|
||||
if (header.RouteType == RouteDirect || header.RouteType == RouteTransportDirect) && pathByte&0x3F == 0 && header.PayloadType != PayloadTRACE {
|
||||
path.HashSize = 0
|
||||
}
|
||||
|
||||
return &DecodedPacket{
|
||||
Header: header,
|
||||
TransportCodes: tc,
|
||||
Path: path,
|
||||
Payload: payload,
|
||||
Raw: strings.ToUpper(hexString),
|
||||
Anomaly: anomaly,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ComputeContentHash computes the SHA-256-based content hash (first 16 hex chars).
|
||||
// It hashes the payload-type nibble + payload (skipping path bytes) to produce a
|
||||
// route-independent identifier for the same logical packet. For TRACE packets,
|
||||
// path_len is included in the hash to match firmware behavior.
|
||||
// It hashes the header byte + payload (skipping path bytes) to produce a
|
||||
// path-independent identifier for the same transmission.
|
||||
func ComputeContentHash(rawHex string) string {
|
||||
buf, err := hex.DecodeString(rawHex)
|
||||
if err != nil || len(buf) < 2 {
|
||||
@@ -681,18 +634,7 @@ func ComputeContentHash(rawHex string) string {
|
||||
}
|
||||
|
||||
payload := buf[payloadStart:]
|
||||
|
||||
// Hash payload-type byte only (bits 2-5 of header), not the full header.
|
||||
// Firmware: SHA256(payload_type + [path_len for TRACE] + payload)
|
||||
// Using the full header caused different hashes for the same logical packet
|
||||
// when route type or version bits differed. See issue #786.
|
||||
payloadType := (headerByte >> 2) & 0x0F
|
||||
toHash := []byte{payloadType}
|
||||
if int(payloadType) == PayloadTRACE {
|
||||
// Firmware uses uint16_t path_len (2 bytes, little-endian)
|
||||
toHash = append(toHash, pathByte, 0x00)
|
||||
}
|
||||
toHash = append(toHash, payload...)
|
||||
toHash := append([]byte{headerByte}, payload...)
|
||||
|
||||
h := sha256.Sum256(toHash)
|
||||
return hex.EncodeToString(h[:])[:16]
|
||||
|
||||
+37
-421
@@ -2,7 +2,6 @@ package main
|
||||
|
||||
import (
|
||||
"crypto/aes"
|
||||
"crypto/ed25519"
|
||||
"crypto/hmac"
|
||||
"crypto/sha256"
|
||||
"encoding/binary"
|
||||
@@ -10,9 +9,6 @@ import (
|
||||
"math"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/meshcore-analyzer/packetpath"
|
||||
"github.com/meshcore-analyzer/sigvalidate"
|
||||
)
|
||||
|
||||
func TestDecodeHeaderRoutTypes(t *testing.T) {
|
||||
@@ -59,7 +55,7 @@ func TestDecodeHeaderPayloadTypes(t *testing.T) {
|
||||
|
||||
func TestDecodePathZeroHops(t *testing.T) {
|
||||
// 0x00: 0 hops, 1-byte hashes
|
||||
pkt, err := DecodePacket("0500"+strings.Repeat("00", 10), nil, false)
|
||||
pkt, err := DecodePacket("0500"+strings.Repeat("00", 10), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -76,7 +72,7 @@ func TestDecodePathZeroHops(t *testing.T) {
|
||||
|
||||
func TestDecodePath1ByteHashes(t *testing.T) {
|
||||
// 0x05: 5 hops, 1-byte hashes → 5 path bytes
|
||||
pkt, err := DecodePacket("0505"+"AABBCCDDEE"+strings.Repeat("00", 10), nil, false)
|
||||
pkt, err := DecodePacket("0505"+"AABBCCDDEE"+strings.Repeat("00", 10), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -99,7 +95,7 @@ func TestDecodePath1ByteHashes(t *testing.T) {
|
||||
|
||||
func TestDecodePath2ByteHashes(t *testing.T) {
|
||||
// 0x45: 5 hops, 2-byte hashes
|
||||
pkt, err := DecodePacket("0545"+"AA11BB22CC33DD44EE55"+strings.Repeat("00", 10), nil, false)
|
||||
pkt, err := DecodePacket("0545"+"AA11BB22CC33DD44EE55"+strings.Repeat("00", 10), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -116,7 +112,7 @@ func TestDecodePath2ByteHashes(t *testing.T) {
|
||||
|
||||
func TestDecodePath3ByteHashes(t *testing.T) {
|
||||
// 0x8A: 10 hops, 3-byte hashes
|
||||
pkt, err := DecodePacket("058A"+strings.Repeat("AA11FF", 10)+strings.Repeat("00", 10), nil, false)
|
||||
pkt, err := DecodePacket("058A"+strings.Repeat("AA11FF", 10)+strings.Repeat("00", 10), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -135,7 +131,7 @@ func TestTransportCodes(t *testing.T) {
|
||||
// Route type 0 (TRANSPORT_FLOOD) should have transport codes
|
||||
// Firmware order: header + transport_codes(4) + path_len + path + payload
|
||||
hex := "14" + "AABB" + "CCDD" + "00" + strings.Repeat("00", 10)
|
||||
pkt, err := DecodePacket(hex, nil, false)
|
||||
pkt, err := DecodePacket(hex, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -153,7 +149,7 @@ func TestTransportCodes(t *testing.T) {
|
||||
}
|
||||
|
||||
// Route type 1 (FLOOD) should NOT have transport codes
|
||||
pkt2, err := DecodePacket("0500"+strings.Repeat("00", 10), nil, false)
|
||||
pkt2, err := DecodePacket("0500"+strings.Repeat("00", 10), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -173,7 +169,7 @@ func TestDecodeAdvertFull(t *testing.T) {
|
||||
name := "546573744E6F6465" // "TestNode"
|
||||
|
||||
hex := "1200" + pubkey + timestamp + signature + flags + lat + lon + name
|
||||
pkt, err := DecodePacket(hex, nil, false)
|
||||
pkt, err := DecodePacket(hex, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -231,7 +227,7 @@ func TestDecodeAdvertTypeEnums(t *testing.T) {
|
||||
makeAdvert := func(flagsByte byte) *DecodedPacket {
|
||||
hex := "1200" + strings.Repeat("AA", 32) + "00000000" + strings.Repeat("BB", 64) +
|
||||
strings.ToUpper(string([]byte{hexDigit(flagsByte>>4), hexDigit(flagsByte & 0x0f)}))
|
||||
pkt, err := DecodePacket(hex, nil, false)
|
||||
pkt, err := DecodePacket(hex, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -276,7 +272,7 @@ func hexDigit(v byte) byte {
|
||||
|
||||
func TestDecodeAdvertNoLocationNoName(t *testing.T) {
|
||||
hex := "1200" + strings.Repeat("CC", 32) + "00000000" + strings.Repeat("DD", 64) + "02"
|
||||
pkt, err := DecodePacket(hex, nil, false)
|
||||
pkt, err := DecodePacket(hex, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -295,7 +291,7 @@ func TestDecodeAdvertNoLocationNoName(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGoldenFixtureTxtMsg(t *testing.T) {
|
||||
pkt, err := DecodePacket("0A00D69FD7A5A7475DB07337749AE61FA53A4788E976", nil, false)
|
||||
pkt, err := DecodePacket("0A00D69FD7A5A7475DB07337749AE61FA53A4788E976", nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -318,7 +314,7 @@ func TestGoldenFixtureTxtMsg(t *testing.T) {
|
||||
|
||||
func TestGoldenFixtureAdvert(t *testing.T) {
|
||||
rawHex := "120046D62DE27D4C5194D7821FC5A34A45565DCC2537B300B9AB6275255CEFB65D840CE5C169C94C9AED39E8BCB6CB6EB0335497A198B33A1A610CD3B03D8DCFC160900E5244280323EE0B44CACAB8F02B5B38B91CFA18BD067B0B5E63E94CFC85F758A8530B9240933402E0E6B8F84D5252322D52"
|
||||
pkt, err := DecodePacket(rawHex, nil, false)
|
||||
pkt, err := DecodePacket(rawHex, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -341,7 +337,7 @@ func TestGoldenFixtureAdvert(t *testing.T) {
|
||||
|
||||
func TestGoldenFixtureUnicodeAdvert(t *testing.T) {
|
||||
rawHex := "120073CFF971E1CB5754A742C152B2D2E0EB108A19B246D663ED8898A72C4A5AD86EA6768E66694B025EDF6939D5C44CFF719C5D5520E5F06B20680A83AD9C2C61C3227BBB977A85EE462F3553445FECF8EDD05C234ECE217272E503F14D6DF2B1B9B133890C923CDF3002F8FDC1F85045414BF09F8CB3"
|
||||
pkt, err := DecodePacket(rawHex, nil, false)
|
||||
pkt, err := DecodePacket(rawHex, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -358,14 +354,14 @@ func TestGoldenFixtureUnicodeAdvert(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDecodePacketTooShort(t *testing.T) {
|
||||
_, err := DecodePacket("FF", nil, false)
|
||||
_, err := DecodePacket("FF", nil)
|
||||
if err == nil {
|
||||
t.Error("expected error for 1-byte packet")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecodePacketInvalidHex(t *testing.T) {
|
||||
_, err := DecodePacket("ZZZZ", nil, false)
|
||||
_, err := DecodePacket("ZZZZ", nil)
|
||||
if err == nil {
|
||||
t.Error("expected error for invalid hex")
|
||||
}
|
||||
@@ -572,7 +568,7 @@ func TestDecodeTracePathParsing(t *testing.T) {
|
||||
// Packet from issue #276: 260001807dca00000000007d547d
|
||||
// Path byte 0x00 → hashSize=1, hops in payload at buf[9:] = 7d 54 7d
|
||||
// Expected path: ["7D", "54", "7D"]
|
||||
pkt, err := DecodePacket("260001807dca00000000007d547d", nil, false)
|
||||
pkt, err := DecodePacket("260001807dca00000000007d547d", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("DecodePacket error: %v", err)
|
||||
}
|
||||
@@ -594,7 +590,7 @@ func TestDecodeTracePathParsing(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDecodeAdvertShort(t *testing.T) {
|
||||
p := decodeAdvert(make([]byte, 50), false)
|
||||
p := decodeAdvert(make([]byte, 50))
|
||||
if p.Error != "too short for advert" {
|
||||
t.Errorf("expected 'too short for advert' error, got %q", p.Error)
|
||||
}
|
||||
@@ -632,7 +628,7 @@ func TestDecodeEncryptedPayloadValid(t *testing.T) {
|
||||
|
||||
func TestDecodePayloadGRPData(t *testing.T) {
|
||||
buf := []byte{0x01, 0x02, 0x03}
|
||||
p := decodePayload(PayloadGRP_DATA, buf, nil, false)
|
||||
p := decodePayload(PayloadGRP_DATA, buf, nil)
|
||||
if p.Type != "UNKNOWN" {
|
||||
t.Errorf("type=%s, want UNKNOWN", p.Type)
|
||||
}
|
||||
@@ -643,7 +639,7 @@ func TestDecodePayloadGRPData(t *testing.T) {
|
||||
|
||||
func TestDecodePayloadRAWCustom(t *testing.T) {
|
||||
buf := []byte{0xFF, 0xFE}
|
||||
p := decodePayload(PayloadRAW_CUSTOM, buf, nil, false)
|
||||
p := decodePayload(PayloadRAW_CUSTOM, buf, nil)
|
||||
if p.Type != "UNKNOWN" {
|
||||
t.Errorf("type=%s, want UNKNOWN", p.Type)
|
||||
}
|
||||
@@ -651,49 +647,49 @@ func TestDecodePayloadRAWCustom(t *testing.T) {
|
||||
|
||||
func TestDecodePayloadAllTypes(t *testing.T) {
|
||||
// REQ
|
||||
p := decodePayload(PayloadREQ, make([]byte, 10), nil, false)
|
||||
p := decodePayload(PayloadREQ, make([]byte, 10), nil)
|
||||
if p.Type != "REQ" {
|
||||
t.Errorf("REQ: type=%s", p.Type)
|
||||
}
|
||||
|
||||
// RESPONSE
|
||||
p = decodePayload(PayloadRESPONSE, make([]byte, 10), nil, false)
|
||||
p = decodePayload(PayloadRESPONSE, make([]byte, 10), nil)
|
||||
if p.Type != "RESPONSE" {
|
||||
t.Errorf("RESPONSE: type=%s", p.Type)
|
||||
}
|
||||
|
||||
// TXT_MSG
|
||||
p = decodePayload(PayloadTXT_MSG, make([]byte, 10), nil, false)
|
||||
p = decodePayload(PayloadTXT_MSG, make([]byte, 10), nil)
|
||||
if p.Type != "TXT_MSG" {
|
||||
t.Errorf("TXT_MSG: type=%s", p.Type)
|
||||
}
|
||||
|
||||
// ACK
|
||||
p = decodePayload(PayloadACK, make([]byte, 10), nil, false)
|
||||
p = decodePayload(PayloadACK, make([]byte, 10), nil)
|
||||
if p.Type != "ACK" {
|
||||
t.Errorf("ACK: type=%s", p.Type)
|
||||
}
|
||||
|
||||
// GRP_TXT
|
||||
p = decodePayload(PayloadGRP_TXT, make([]byte, 10), nil, false)
|
||||
p = decodePayload(PayloadGRP_TXT, make([]byte, 10), nil)
|
||||
if p.Type != "GRP_TXT" {
|
||||
t.Errorf("GRP_TXT: type=%s", p.Type)
|
||||
}
|
||||
|
||||
// ANON_REQ
|
||||
p = decodePayload(PayloadANON_REQ, make([]byte, 40), nil, false)
|
||||
p = decodePayload(PayloadANON_REQ, make([]byte, 40), nil)
|
||||
if p.Type != "ANON_REQ" {
|
||||
t.Errorf("ANON_REQ: type=%s", p.Type)
|
||||
}
|
||||
|
||||
// PATH
|
||||
p = decodePayload(PayloadPATH, make([]byte, 10), nil, false)
|
||||
p = decodePayload(PayloadPATH, make([]byte, 10), nil)
|
||||
if p.Type != "PATH" {
|
||||
t.Errorf("PATH: type=%s", p.Type)
|
||||
}
|
||||
|
||||
// TRACE
|
||||
p = decodePayload(PayloadTRACE, make([]byte, 20), nil, false)
|
||||
p = decodePayload(PayloadTRACE, make([]byte, 20), nil)
|
||||
if p.Type != "TRACE" {
|
||||
t.Errorf("TRACE: type=%s", p.Type)
|
||||
}
|
||||
@@ -927,96 +923,9 @@ func TestComputeContentHashLongFallback(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestComputeContentHashRouteTypeIndependence verifies that the same logical
|
||||
// packet produces the same content hash regardless of route type (issue #786).
|
||||
func TestComputeContentHashRouteTypeIndependence(t *testing.T) {
|
||||
// Same payload type (TXT_MSG=2, bits 2-5) with different route types.
|
||||
// Header 0x08 = route_type 0 (TRANSPORT_FLOOD), payload_type 2
|
||||
// Header 0x0A = route_type 2 (DIRECT), payload_type 2
|
||||
// Header 0x09 = route_type 1 (FLOOD), payload_type 2
|
||||
// pathByte=0x00, payload=D69FD7A5A7
|
||||
payloadHex := "D69FD7A5A7"
|
||||
|
||||
// FLOOD: header=0x09 (route_type 1), pathByte=0x00
|
||||
floodHex := "09" + "00" + payloadHex
|
||||
// DIRECT: header=0x0A (route_type 2), pathByte=0x00
|
||||
directHex := "0A" + "00" + payloadHex
|
||||
|
||||
hashFlood := ComputeContentHash(floodHex)
|
||||
hashDirect := ComputeContentHash(directHex)
|
||||
if hashFlood != hashDirect {
|
||||
t.Errorf("same payload with different route types produced different hashes: flood=%s direct=%s", hashFlood, hashDirect)
|
||||
}
|
||||
}
|
||||
|
||||
// TestComputeContentHashTraceIncludesPathLen verifies TRACE packets include
|
||||
// path_len in the hash (matching firmware behavior).
|
||||
func TestComputeContentHashTraceIncludesPathLen(t *testing.T) {
|
||||
// TRACE = payload_type 0x09, so header bits 2-5 = 0x09 → header = 0x09<<2 | route=2 = 0x26
|
||||
// pathByte=0x01 (1 hop, 1-byte hash) → 1 path byte
|
||||
traceHeader1 := "26" // route=2, payload_type=9
|
||||
pathByte1 := "01"
|
||||
pathData1 := "AA"
|
||||
payload := "DEADBEEF"
|
||||
hex1 := traceHeader1 + pathByte1 + pathData1 + payload
|
||||
|
||||
// Same but pathByte=0x02 (2 hops) → 2 path bytes
|
||||
pathByte2 := "02"
|
||||
pathData2 := "AABB"
|
||||
hex2 := traceHeader1 + pathByte2 + pathData2 + payload
|
||||
|
||||
hash1 := ComputeContentHash(hex1)
|
||||
hash2 := ComputeContentHash(hex2)
|
||||
if hash1 == hash2 {
|
||||
t.Error("TRACE packets with different path_len should produce different hashes (path_len is part of hash input)")
|
||||
}
|
||||
}
|
||||
|
||||
// TestComputeContentHashMatchesFirmware verifies hash output matches what the
|
||||
// firmware would compute: SHA256(payload_type_byte + payload)[:16hex].
|
||||
func TestComputeContentHashMatchesFirmware(t *testing.T) {
|
||||
// header=0x0A → payload_type = (0x0A >> 2) & 0x0F = 2
|
||||
// pathByte=0x00, payload = D69FD7A5A7475DB07337749AE61FA53A4788E976
|
||||
rawHex := "0A00D69FD7A5A7475DB07337749AE61FA53A4788E976"
|
||||
hash := ComputeContentHash(rawHex)
|
||||
|
||||
// Manually compute expected: SHA256(0x02 + payload_bytes)
|
||||
payloadBytes, _ := hex.DecodeString("D69FD7A5A7475DB07337749AE61FA53A4788E976")
|
||||
toHash := append([]byte{0x02}, payloadBytes...)
|
||||
expected := sha256.Sum256(toHash)
|
||||
expectedHex := hex.EncodeToString(expected[:])[:16]
|
||||
if hash != expectedHex {
|
||||
t.Errorf("hash=%s, want %s (firmware-compatible)", hash, expectedHex)
|
||||
}
|
||||
}
|
||||
|
||||
// TestComputeContentHashTraceGoldenValue is a golden-value test that locks down
|
||||
// the 2-byte path_len (uint16 LE) behavior for TRACE hashing. If anyone removes
|
||||
// the 0x00 byte from the hash input, this test breaks.
|
||||
//
|
||||
// Packet: header=0x25 (FLOOD route=1, payload_type=TRACE=0x09), pathByte=0x02
|
||||
// (2 hops, 1-byte hash), path=[AA,BB], payload=[DE,AD,BE,EF].
|
||||
// Hash input: [0x09, 0x02, 0x00, 0xDE, 0xAD, 0xBE, 0xEF]
|
||||
// → SHA256 = b1baaf3bf0d0726c2672b1ec9e2665dc...
|
||||
// → first 16 hex chars = "b1baaf3bf0d0726c"
|
||||
func TestComputeContentHashTraceGoldenValue(t *testing.T) {
|
||||
// TRACE packet: header byte 0x25 = payload_type 9 (TRACE), route_type 1 (FLOOD)
|
||||
// pathByte 0x02 = hash_size 1, hash_count 2
|
||||
// 2 path bytes (AA, BB), then payload DEADBEEF
|
||||
rawHex := "2502AABBDEADBEEF"
|
||||
hash := ComputeContentHash(rawHex)
|
||||
|
||||
// Pre-computed: SHA256(0x09 0x02 0x00 0xDE 0xAD 0xBE 0xEF)[:16hex]
|
||||
// The 0x00 is the high byte of uint16_t path_len (little-endian).
|
||||
const golden = "b1baaf3bf0d0726c"
|
||||
if hash != golden {
|
||||
t.Errorf("TRACE golden hash = %s, want %s (2-byte path_len encoding)", hash, golden)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecodePacketWithWhitespace(t *testing.T) {
|
||||
raw := "0A 00 D6 9F D7 A5 A7 47 5D B0 73 37 74 9A E6 1F A5 3A 47 88 E9 76"
|
||||
pkt, err := DecodePacket(raw, nil, false)
|
||||
pkt, err := DecodePacket(raw, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -1027,7 +936,7 @@ func TestDecodePacketWithWhitespace(t *testing.T) {
|
||||
|
||||
func TestDecodePacketWithNewlines(t *testing.T) {
|
||||
raw := "0A00\nD69F\r\nD7A5A7475DB07337749AE61FA53A4788E976"
|
||||
pkt, err := DecodePacket(raw, nil, false)
|
||||
pkt, err := DecodePacket(raw, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -1038,7 +947,7 @@ func TestDecodePacketWithNewlines(t *testing.T) {
|
||||
|
||||
func TestDecodePacketTransportRouteTooShort(t *testing.T) {
|
||||
// TRANSPORT_FLOOD (route=0) but only 2 bytes total → too short for transport codes
|
||||
_, err := DecodePacket("1400", nil, false)
|
||||
_, err := DecodePacket("1400", nil)
|
||||
if err == nil {
|
||||
t.Error("expected error for transport route with too-short buffer")
|
||||
}
|
||||
@@ -1098,7 +1007,7 @@ func TestDecodeHeaderUnknownTypes(t *testing.T) {
|
||||
|
||||
func TestDecodePayloadMultipart(t *testing.T) {
|
||||
// MULTIPART (0x0A) falls through to default → UNKNOWN
|
||||
p := decodePayload(PayloadMULTIPART, []byte{0x01, 0x02}, nil, false)
|
||||
p := decodePayload(PayloadMULTIPART, []byte{0x01, 0x02}, nil)
|
||||
if p.Type != "UNKNOWN" {
|
||||
t.Errorf("MULTIPART type=%s, want UNKNOWN", p.Type)
|
||||
}
|
||||
@@ -1106,7 +1015,7 @@ func TestDecodePayloadMultipart(t *testing.T) {
|
||||
|
||||
func TestDecodePayloadControl(t *testing.T) {
|
||||
// CONTROL (0x0B) falls through to default → UNKNOWN
|
||||
p := decodePayload(PayloadCONTROL, []byte{0x01, 0x02}, nil, false)
|
||||
p := decodePayload(PayloadCONTROL, []byte{0x01, 0x02}, nil)
|
||||
if p.Type != "UNKNOWN" {
|
||||
t.Errorf("CONTROL type=%s, want UNKNOWN", p.Type)
|
||||
}
|
||||
@@ -1130,7 +1039,7 @@ func TestDecodePathTruncatedBuffer(t *testing.T) {
|
||||
func TestDecodeFloodAdvert5Hops(t *testing.T) {
|
||||
// From test-decoder.js Test 1
|
||||
raw := "11451000D818206D3AAC152C8A91F89957E6D30CA51F36E28790228971C473B755F244F718754CF5EE4A2FD58D944466E42CDED140C66D0CC590183E32BAF40F112BE8F3F2BDF6012B4B2793C52F1D36F69EE054D9A05593286F78453E56C0EC4A3EB95DDA2A7543FCCC00B939CACC009278603902FC12BCF84B706120526F6F6620536F6C6172"
|
||||
pkt, err := DecodePacket(raw, nil, false)
|
||||
pkt, err := DecodePacket(raw, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -1501,7 +1410,7 @@ func TestDecodeAdvertWithTelemetry(t *testing.T) {
|
||||
name + nullTerm +
|
||||
hex.EncodeToString(batteryLE) + hex.EncodeToString(tempLE)
|
||||
|
||||
pkt, err := DecodePacket(hexStr, nil, false)
|
||||
pkt, err := DecodePacket(hexStr, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -1540,7 +1449,7 @@ func TestDecodeAdvertWithTelemetryNegativeTemp(t *testing.T) {
|
||||
name + nullTerm +
|
||||
hex.EncodeToString(batteryLE) + hex.EncodeToString(tempLE)
|
||||
|
||||
pkt, err := DecodePacket(hexStr, nil, false)
|
||||
pkt, err := DecodePacket(hexStr, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -1567,7 +1476,7 @@ func TestDecodeAdvertWithoutTelemetry(t *testing.T) {
|
||||
name := hex.EncodeToString([]byte("Node1"))
|
||||
|
||||
hexStr := "1200" + pubkey + timestamp + signature + flags + name
|
||||
pkt, err := DecodePacket(hexStr, nil, false)
|
||||
pkt, err := DecodePacket(hexStr, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -1594,7 +1503,7 @@ func TestDecodeAdvertNonSensorIgnoresTelemetryBytes(t *testing.T) {
|
||||
extraBytes := "B40ED403" // battery-like and temp-like bytes
|
||||
|
||||
hexStr := "1200" + pubkey + timestamp + signature + flags + name + nullTerm + extraBytes
|
||||
pkt, err := DecodePacket(hexStr, nil, false)
|
||||
pkt, err := DecodePacket(hexStr, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -1622,7 +1531,7 @@ func TestDecodeAdvertTelemetryZeroTemp(t *testing.T) {
|
||||
name + nullTerm +
|
||||
hex.EncodeToString(batteryLE) + hex.EncodeToString(tempLE)
|
||||
|
||||
pkt, err := DecodePacket(hexStr, nil, false)
|
||||
pkt, err := DecodePacket(hexStr, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -1633,296 +1542,3 @@ func TestDecodeAdvertTelemetryZeroTemp(t *testing.T) {
|
||||
t.Errorf("temperature_c=%f, want 0.0", *pkt.Payload.TemperatureC)
|
||||
}
|
||||
}
|
||||
|
||||
func repeatHex(byteHex string, n int) string {
|
||||
s := ""
|
||||
for i := 0; i < n; i++ {
|
||||
s += byteHex
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func TestZeroHopDirectHashSize(t *testing.T) {
|
||||
// DIRECT (RouteType=2) + REQ (PayloadType=0) → header byte = 0x02
|
||||
// pathByte=0x00 → hash_count=0, hash_size bits=0 → should get HashSize=0
|
||||
hex := "02" + "00" + repeatHex("AA", 20)
|
||||
pkt, err := DecodePacket(hex, nil, false)
|
||||
if err != nil {
|
||||
t.Fatalf("DecodePacket failed: %v", err)
|
||||
}
|
||||
if pkt.Path.HashSize != 0 {
|
||||
t.Errorf("DIRECT zero-hop: want HashSize=0, got %d", pkt.Path.HashSize)
|
||||
}
|
||||
}
|
||||
|
||||
func TestZeroHopDirectHashSizeWithNonZeroUpperBits(t *testing.T) {
|
||||
// DIRECT (RouteType=2) + REQ (PayloadType=0) → header byte = 0x02
|
||||
// pathByte=0x40 → hash_count=0, hash_size bits=01 → should still get HashSize=0
|
||||
hex := "02" + "40" + repeatHex("AA", 20)
|
||||
pkt, err := DecodePacket(hex, nil, false)
|
||||
if err != nil {
|
||||
t.Fatalf("DecodePacket failed: %v", err)
|
||||
}
|
||||
if pkt.Path.HashSize != 0 {
|
||||
t.Errorf("DIRECT zero-hop with hash_size bits set: want HashSize=0, got %d", pkt.Path.HashSize)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNonDirectZeroPathByteKeepsHashSize(t *testing.T) {
|
||||
// FLOOD (RouteType=1) + REQ (PayloadType=0) → header byte = 0x01
|
||||
// pathByte=0x00 → non-DIRECT should keep HashSize=1
|
||||
hex := "01" + "00" + repeatHex("AA", 20)
|
||||
pkt, err := DecodePacket(hex, nil, false)
|
||||
if err != nil {
|
||||
t.Fatalf("DecodePacket failed: %v", err)
|
||||
}
|
||||
if pkt.Path.HashSize != 1 {
|
||||
t.Errorf("FLOOD zero pathByte: want HashSize=1, got %d", pkt.Path.HashSize)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDirectNonZeroHopKeepsHashSize(t *testing.T) {
|
||||
// DIRECT (RouteType=2) + REQ (PayloadType=0) → header byte = 0x02
|
||||
// pathByte=0x01 → hash_count=1, hash_size=1 → should keep HashSize=1
|
||||
hex := "02" + "01" + repeatHex("BB", 21)
|
||||
pkt, err := DecodePacket(hex, nil, false)
|
||||
if err != nil {
|
||||
t.Fatalf("DecodePacket failed: %v", err)
|
||||
}
|
||||
if pkt.Path.HashSize != 1 {
|
||||
t.Errorf("DIRECT with 1 hop: want HashSize=1, got %d", pkt.Path.HashSize)
|
||||
}
|
||||
}
|
||||
|
||||
func TestZeroHopTransportDirectHashSize(t *testing.T) {
|
||||
// TRANSPORT_DIRECT (RouteType=3) + REQ (PayloadType=0) → header byte = 0x03
|
||||
// 4 bytes transport codes + pathByte=0x00 → hash_count=0 → should get HashSize=0
|
||||
hex := "03" + "11223344" + "00" + repeatHex("AA", 20)
|
||||
pkt, err := DecodePacket(hex, nil, false)
|
||||
if err != nil {
|
||||
t.Fatalf("DecodePacket failed: %v", err)
|
||||
}
|
||||
if pkt.Path.HashSize != 0 {
|
||||
t.Errorf("TRANSPORT_DIRECT zero-hop: want HashSize=0, got %d", pkt.Path.HashSize)
|
||||
}
|
||||
}
|
||||
|
||||
func TestZeroHopTransportDirectHashSizeWithNonZeroUpperBits(t *testing.T) {
|
||||
// TRANSPORT_DIRECT (RouteType=3) + REQ (PayloadType=0) → header byte = 0x03
|
||||
// 4 bytes transport codes + pathByte=0xC0 → hash_count=0, hash_size bits=11 → should still get HashSize=0
|
||||
hex := "03" + "11223344" + "C0" + repeatHex("AA", 20)
|
||||
pkt, err := DecodePacket(hex, nil, false)
|
||||
if err != nil {
|
||||
t.Fatalf("DecodePacket failed: %v", err)
|
||||
}
|
||||
if pkt.Path.HashSize != 0 {
|
||||
t.Errorf("TRANSPORT_DIRECT zero-hop with hash_size bits set: want HashSize=0, got %d", pkt.Path.HashSize)
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateAdvertSignature(t *testing.T) {
|
||||
// Generate a real ed25519 key pair
|
||||
pub, priv, err := ed25519.GenerateKey(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var timestamp uint32 = 1234567890
|
||||
appdata := []byte{0x02, 0x11, 0x22} // flags + some data
|
||||
|
||||
// Build the signed message: pubKey + timestamp(LE) + appdata
|
||||
message := make([]byte, 32+4+len(appdata))
|
||||
copy(message[0:32], pub)
|
||||
binary.LittleEndian.PutUint32(message[32:36], timestamp)
|
||||
copy(message[36:], appdata)
|
||||
|
||||
sig := ed25519.Sign(priv, message)
|
||||
|
||||
// Valid signature
|
||||
valid, err := sigvalidate.ValidateAdvert([]byte(pub), sig, timestamp, appdata)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if !valid {
|
||||
t.Error("expected valid signature")
|
||||
}
|
||||
|
||||
// Tampered appdata → invalid
|
||||
badAppdata := []byte{0x03, 0x11, 0x22}
|
||||
valid, err = sigvalidate.ValidateAdvert([]byte(pub), sig, timestamp, badAppdata)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if valid {
|
||||
t.Error("expected invalid signature with tampered appdata")
|
||||
}
|
||||
|
||||
// Wrong timestamp → invalid
|
||||
valid, err = sigvalidate.ValidateAdvert([]byte(pub), sig, timestamp+1, appdata)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if valid {
|
||||
t.Error("expected invalid signature with wrong timestamp")
|
||||
}
|
||||
|
||||
// Wrong length pubkey
|
||||
_, err = sigvalidate.ValidateAdvert([]byte{0xAA, 0xBB}, sig, timestamp, appdata)
|
||||
if err == nil {
|
||||
t.Error("expected error for short pubkey")
|
||||
}
|
||||
|
||||
// Wrong length signature
|
||||
_, err = sigvalidate.ValidateAdvert([]byte(pub), []byte{0xAA, 0xBB}, timestamp, appdata)
|
||||
if err == nil {
|
||||
t.Error("expected error for short signature")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecodeAdvertWithSignatureValidation(t *testing.T) {
|
||||
// Generate key pair
|
||||
pub, priv, err := ed25519.GenerateKey(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var timestamp uint32 = 1000000
|
||||
appdata := []byte{0x02} // repeater type, no location
|
||||
|
||||
// Build signed message
|
||||
message := make([]byte, 32+4+len(appdata))
|
||||
copy(message[0:32], pub)
|
||||
binary.LittleEndian.PutUint32(message[32:36], timestamp)
|
||||
copy(message[36:], appdata)
|
||||
sig := ed25519.Sign(priv, message)
|
||||
|
||||
// Build advert buffer: pubkey(32) + timestamp(4) + signature(64) + appdata
|
||||
buf := make([]byte, 0, 101)
|
||||
buf = append(buf, pub...)
|
||||
ts := make([]byte, 4)
|
||||
binary.LittleEndian.PutUint32(ts, timestamp)
|
||||
buf = append(buf, ts...)
|
||||
buf = append(buf, sig...)
|
||||
buf = append(buf, appdata...)
|
||||
|
||||
// With validation enabled
|
||||
p := decodeAdvert(buf, true)
|
||||
if p.Error != "" {
|
||||
t.Fatalf("decode error: %s", p.Error)
|
||||
}
|
||||
if p.SignatureValid == nil {
|
||||
t.Fatal("SignatureValid should be set when validation enabled")
|
||||
}
|
||||
if !*p.SignatureValid {
|
||||
t.Error("expected valid signature")
|
||||
}
|
||||
|
||||
// Without validation
|
||||
p2 := decodeAdvert(buf, false)
|
||||
if p2.SignatureValid != nil {
|
||||
t.Error("SignatureValid should be nil when validation disabled")
|
||||
}
|
||||
}
|
||||
|
||||
// === Tests for DecodePathFromRawHex (issue #886) ===
|
||||
|
||||
func TestDecodePathFromRawHex_HashSize1(t *testing.T) {
|
||||
// Header byte 0x26 = route_type DIRECT, payload TRACE
|
||||
// Path byte 0x04 = hash_size 1 (bits 7-6 = 00 → 0+1=1), hash_count 4
|
||||
// Path bytes: 30 2D 0D 23
|
||||
raw := "2604302D0D2359FEE7B100000000006733D63367"
|
||||
hops, err := packetpath.DecodePathFromRawHex(raw)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
expected := []string{"30", "2D", "0D", "23"}
|
||||
if len(hops) != len(expected) {
|
||||
t.Fatalf("got %d hops, want %d", len(hops), len(expected))
|
||||
}
|
||||
for i, h := range hops {
|
||||
if h != expected[i] {
|
||||
t.Errorf("hop[%d] = %s, want %s", i, h, expected[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecodePathFromRawHex_HashSize2(t *testing.T) {
|
||||
// Path byte 0x42 = hash_size 2 (bits 7-6 = 01 → 1+1=2), hash_count 2
|
||||
// Header 0x09 = FLOOD route (rt=1), payload ADVERT (pt=2)
|
||||
// Path bytes: AABB CCDD (4 bytes = 2 hops * 2 bytes)
|
||||
raw := "0942AABBCCDD" + "00000000000000"
|
||||
hops, err := packetpath.DecodePathFromRawHex(raw)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
expected := []string{"AABB", "CCDD"}
|
||||
if len(hops) != len(expected) {
|
||||
t.Fatalf("got %d hops, want %d", len(hops), len(expected))
|
||||
}
|
||||
for i, h := range hops {
|
||||
if h != expected[i] {
|
||||
t.Errorf("hop[%d] = %s, want %s", i, h, expected[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecodePathFromRawHex_HashSize3(t *testing.T) {
|
||||
// Path byte 0x81 = hash_size 3 (bits 7-6 = 10 → 2+1=3), hash_count 1
|
||||
// Header 0x09 = FLOOD route (rt=1), payload ADVERT
|
||||
raw := "0981AABBCC" + "0000000000"
|
||||
hops, err := packetpath.DecodePathFromRawHex(raw)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(hops) != 1 || hops[0] != "AABBCC" {
|
||||
t.Fatalf("got %v, want [AABBCC]", hops)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecodePathFromRawHex_HashSize4(t *testing.T) {
|
||||
// Path byte 0xC1 = hash_size 4 (bits 7-6 = 11 → 3+1=4), hash_count 1
|
||||
// Header 0x09 = FLOOD route (rt=1)
|
||||
raw := "09C1AABBCCDD" + "0000000000"
|
||||
hops, err := packetpath.DecodePathFromRawHex(raw)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(hops) != 1 || hops[0] != "AABBCCDD" {
|
||||
t.Fatalf("got %v, want [AABBCCDD]", hops)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecodePathFromRawHex_DirectZeroHops(t *testing.T) {
|
||||
// Path byte 0x00 = hash_size 1, hash_count 0
|
||||
// Header 0x0A = DIRECT route (rt=2), payload ADVERT
|
||||
raw := "0A00" + "0000000000"
|
||||
hops, err := packetpath.DecodePathFromRawHex(raw)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(hops) != 0 {
|
||||
t.Fatalf("got %d hops, want 0", len(hops))
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecodePathFromRawHex_Transport(t *testing.T) {
|
||||
// Route type 3 = TRANSPORT_DIRECT → 4 transport code bytes before path byte
|
||||
// Header 0x27 = route_type 3, payload TRACE
|
||||
// Transport codes: 1122 3344
|
||||
// Path byte 0x02 = hash_size 1, hash_count 2
|
||||
// Path bytes: AA BB
|
||||
raw := "2711223344" + "02AABB" + "0000000000"
|
||||
hops, err := packetpath.DecodePathFromRawHex(raw)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
expected := []string{"AA", "BB"}
|
||||
if len(hops) != len(expected) {
|
||||
t.Fatalf("got %d hops, want %d", len(hops), len(expected))
|
||||
}
|
||||
for i, h := range hops {
|
||||
if h != expected[i] {
|
||||
t.Errorf("hop[%d] = %s, want %s", i, h, expected[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,18 +5,11 @@ go 1.22
|
||||
require (
|
||||
github.com/eclipse/paho.mqtt.golang v1.5.0
|
||||
github.com/meshcore-analyzer/geofilter v0.0.0
|
||||
github.com/meshcore-analyzer/sigvalidate v0.0.0
|
||||
modernc.org/sqlite v1.34.5
|
||||
)
|
||||
|
||||
replace github.com/meshcore-analyzer/geofilter => ../../internal/geofilter
|
||||
|
||||
replace github.com/meshcore-analyzer/sigvalidate => ../../internal/sigvalidate
|
||||
|
||||
require github.com/meshcore-analyzer/packetpath v0.0.0
|
||||
|
||||
replace github.com/meshcore-analyzer/packetpath => ../../internal/packetpath
|
||||
|
||||
require (
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
|
||||
+44
-106
@@ -57,42 +57,19 @@ func main() {
|
||||
defer store.Close()
|
||||
log.Printf("SQLite opened: %s", cfg.DBPath)
|
||||
|
||||
// Check auto_vacuum mode and optionally migrate (#919)
|
||||
store.CheckAutoVacuum(cfg)
|
||||
|
||||
// Node retention: move stale nodes to inactive_nodes on startup
|
||||
nodeDays := cfg.NodeDaysOrDefault()
|
||||
store.MoveStaleNodes(nodeDays)
|
||||
|
||||
// Observer retention: remove stale observers on startup
|
||||
observerDays := cfg.ObserverDaysOrDefault()
|
||||
store.RemoveStaleObservers(observerDays)
|
||||
|
||||
// Metrics retention: prune old metrics on startup
|
||||
metricsDays := cfg.MetricsRetentionDays()
|
||||
store.PruneOldMetrics(metricsDays)
|
||||
store.PruneDroppedPackets(metricsDays)
|
||||
vacuumPages := cfg.IncrementalVacuumPages()
|
||||
store.RunIncrementalVacuum(vacuumPages)
|
||||
|
||||
// Daily ticker for node retention
|
||||
retentionTicker := time.NewTicker(1 * time.Hour)
|
||||
go func() {
|
||||
for range retentionTicker.C {
|
||||
store.MoveStaleNodes(nodeDays)
|
||||
store.RunIncrementalVacuum(vacuumPages)
|
||||
}
|
||||
}()
|
||||
|
||||
// Daily ticker for observer retention (every 24h, staggered 90s after startup)
|
||||
observerRetentionTicker := time.NewTicker(24 * time.Hour)
|
||||
go func() {
|
||||
time.Sleep(90 * time.Second) // stagger after metrics prune
|
||||
store.RemoveStaleObservers(observerDays)
|
||||
store.RunIncrementalVacuum(vacuumPages)
|
||||
for range observerRetentionTicker.C {
|
||||
store.RemoveStaleObservers(observerDays)
|
||||
store.RunIncrementalVacuum(vacuumPages)
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -101,8 +78,6 @@ func main() {
|
||||
go func() {
|
||||
for range metricsRetentionTicker.C {
|
||||
store.PruneOldMetrics(metricsDays)
|
||||
store.PruneDroppedPackets(metricsDays)
|
||||
store.RunIncrementalVacuum(vacuumPages)
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -129,7 +104,23 @@ func main() {
|
||||
tag = source.Broker
|
||||
}
|
||||
|
||||
opts := buildMQTTOpts(source)
|
||||
opts := mqtt.NewClientOptions().
|
||||
AddBroker(source.Broker).
|
||||
SetAutoReconnect(true).
|
||||
SetConnectRetry(true).
|
||||
SetOrderMatters(true)
|
||||
|
||||
if source.Username != "" {
|
||||
opts.SetUsername(source.Username)
|
||||
}
|
||||
if source.Password != "" {
|
||||
opts.SetPassword(source.Password)
|
||||
}
|
||||
if source.RejectUnauthorized != nil && !*source.RejectUnauthorized {
|
||||
opts.SetTLSConfig(&tls.Config{InsecureSkipVerify: true})
|
||||
} else if strings.HasPrefix(source.Broker, "ssl://") {
|
||||
opts.SetTLSConfig(&tls.Config{})
|
||||
}
|
||||
|
||||
opts.SetOnConnectHandler(func(c mqtt.Client) {
|
||||
log.Printf("MQTT [%s] connected to %s", tag, source.Broker)
|
||||
@@ -149,17 +140,13 @@ func main() {
|
||||
})
|
||||
|
||||
opts.SetConnectionLostHandler(func(c mqtt.Client, err error) {
|
||||
log.Printf("MQTT [%s] disconnected from %s: %v", tag, source.Broker, err)
|
||||
})
|
||||
|
||||
opts.SetReconnectingHandler(func(c mqtt.Client, options *mqtt.ClientOptions) {
|
||||
log.Printf("MQTT [%s] reconnecting to %s", tag, source.Broker)
|
||||
log.Printf("MQTT [%s] disconnected: %v", tag, err)
|
||||
})
|
||||
|
||||
// Capture source for closure
|
||||
src := source
|
||||
opts.SetDefaultPublishHandler(func(c mqtt.Client, m mqtt.Message) {
|
||||
handleMessage(store, tag, src, m, channelKeys, cfg)
|
||||
handleMessage(store, tag, src, m, channelKeys, cfg.GeoFilter)
|
||||
})
|
||||
|
||||
client := mqtt.NewClient(opts)
|
||||
@@ -194,33 +181,7 @@ func main() {
|
||||
log.Println("Done.")
|
||||
}
|
||||
|
||||
// buildMQTTOpts creates MQTT client options for a source with bounded reconnect
|
||||
// backoff, connect timeout, and TLS/auth configuration.
|
||||
func buildMQTTOpts(source MQTTSource) *mqtt.ClientOptions {
|
||||
opts := mqtt.NewClientOptions().
|
||||
AddBroker(source.Broker).
|
||||
SetAutoReconnect(true).
|
||||
SetConnectRetry(true).
|
||||
SetOrderMatters(true).
|
||||
SetMaxReconnectInterval(30 * time.Second).
|
||||
SetConnectTimeout(10 * time.Second).
|
||||
SetWriteTimeout(10 * time.Second)
|
||||
|
||||
if source.Username != "" {
|
||||
opts.SetUsername(source.Username)
|
||||
}
|
||||
if source.Password != "" {
|
||||
opts.SetPassword(source.Password)
|
||||
}
|
||||
if source.RejectUnauthorized != nil && !*source.RejectUnauthorized {
|
||||
opts.SetTLSConfig(&tls.Config{InsecureSkipVerify: true})
|
||||
} else if strings.HasPrefix(source.Broker, "ssl://") {
|
||||
opts.SetTLSConfig(&tls.Config{})
|
||||
}
|
||||
return opts
|
||||
}
|
||||
|
||||
func handleMessage(store *Store, tag string, source MQTTSource, m mqtt.Message, channelKeys map[string]string, cfg *Config) {
|
||||
func handleMessage(store *Store, tag string, source MQTTSource, m mqtt.Message, channelKeys map[string]string, geoFilter *GeoFilterConfig) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
log.Printf("MQTT [%s] panic in handler: %v", tag, r)
|
||||
@@ -230,6 +191,21 @@ func handleMessage(store *Store, tag string, source MQTTSource, m mqtt.Message,
|
||||
topic := m.Topic()
|
||||
parts := strings.Split(topic, "/")
|
||||
|
||||
// IATA filter
|
||||
if len(source.IATAFilter) > 0 && len(parts) > 1 {
|
||||
region := parts[1]
|
||||
matched := false
|
||||
for _, f := range source.IATAFilter {
|
||||
if f == region {
|
||||
matched = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !matched {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
var msg map[string]interface{}
|
||||
if err := json.Unmarshal(m.Payload(), &msg); err != nil {
|
||||
return
|
||||
@@ -241,9 +217,6 @@ func handleMessage(store *Store, tag string, source MQTTSource, m mqtt.Message,
|
||||
}
|
||||
|
||||
// Status topic: meshcore/<region>/<observer_id>/status
|
||||
// IATA filter does NOT apply here — observer metadata (noise_floor, battery, etc.)
|
||||
// is region-independent and should be accepted from all observers regardless of
|
||||
// which IATA regions are configured for packet ingestion.
|
||||
if len(parts) >= 4 && parts[3] == "status" {
|
||||
observerID := parts[2]
|
||||
name, _ := msg["origin"].(string)
|
||||
@@ -272,26 +245,10 @@ func handleMessage(store *Store, tag string, source MQTTSource, m mqtt.Message,
|
||||
return
|
||||
}
|
||||
|
||||
// IATA filter applies to packet messages only — not status messages above.
|
||||
if len(source.IATAFilter) > 0 && len(parts) > 1 {
|
||||
region := parts[1]
|
||||
matched := false
|
||||
for _, f := range source.IATAFilter {
|
||||
if f == region {
|
||||
matched = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !matched {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Format 1: Raw packet (meshcoretomqtt / Cisien format)
|
||||
rawHex, _ := msg["raw"].(string)
|
||||
if rawHex != "" {
|
||||
validateSigs := cfg.ShouldValidateSignatures()
|
||||
decoded, err := DecodePacket(rawHex, channelKeys, validateSigs)
|
||||
decoded, err := DecodePacket(rawHex, channelKeys)
|
||||
if err != nil {
|
||||
log.Printf("MQTT [%s] decode error: %v", tag, err)
|
||||
return
|
||||
@@ -351,27 +308,7 @@ func handleMessage(store *Store, tag string, source MQTTSource, m mqtt.Message,
|
||||
log.Printf("MQTT [%s] skipping corrupted ADVERT: %s", tag, reason)
|
||||
return
|
||||
}
|
||||
// Signature validation: drop adverts with invalid ed25519 signatures
|
||||
if validateSigs && decoded.Payload.SignatureValid != nil && !*decoded.Payload.SignatureValid {
|
||||
hash := ComputeContentHash(rawHex)
|
||||
truncPK := decoded.Payload.PubKey
|
||||
if len(truncPK) > 16 {
|
||||
truncPK = truncPK[:16]
|
||||
}
|
||||
log.Printf("MQTT [%s] DROPPED invalid signature: hash=%s name=%s observer=%s pubkey=%s",
|
||||
tag, hash, decoded.Payload.Name, firstNonEmpty(mqttMsg.Origin, observerID), truncPK)
|
||||
store.InsertDroppedPacket(&DroppedPacket{
|
||||
Hash: hash,
|
||||
RawHex: rawHex,
|
||||
Reason: "invalid signature",
|
||||
ObserverID: observerID,
|
||||
ObserverName: mqttMsg.Origin,
|
||||
NodePubKey: decoded.Payload.PubKey,
|
||||
NodeName: decoded.Payload.Name,
|
||||
})
|
||||
return
|
||||
}
|
||||
if !NodePassesGeoFilter(decoded.Payload.Lat, decoded.Payload.Lon, cfg.GeoFilter) {
|
||||
if !NodePassesGeoFilter(decoded.Payload.Lat, decoded.Payload.Lon, geoFilter) {
|
||||
return
|
||||
}
|
||||
pktData := BuildPacketData(mqttMsg, decoded, observerID, region)
|
||||
@@ -503,18 +440,19 @@ func handleMessage(store *Store, tag string, source MQTTSource, m mqtt.Message,
|
||||
PayloadType: 5, // GRP_TXT
|
||||
PathJSON: "[]",
|
||||
DecodedJSON: string(decodedJSON),
|
||||
ChannelHash: channelName, // fast channel queries (#762)
|
||||
}
|
||||
|
||||
if _, err := store.InsertTransmission(pktData); err != nil {
|
||||
log.Printf("MQTT [%s] channel insert error: %v", tag, err)
|
||||
}
|
||||
|
||||
// Note: we intentionally do NOT create a node entry for channel message senders.
|
||||
// Channel messages don't carry the sender's real pubkey, so any entry we create
|
||||
// would use a synthetic key ("sender-<name>") that doesn't match the real pubkey
|
||||
// used for claiming/health lookups. The node will get a proper entry when it
|
||||
// sends an advert. See issue #665.
|
||||
// Upsert sender as a companion node
|
||||
if sender != "" {
|
||||
senderKey := "sender-" + strings.ToLower(sender)
|
||||
if err := store.UpsertNode(senderKey, sender, "companion", nil, nil, now); err != nil {
|
||||
log.Printf("MQTT [%s] sender node upsert error: %v", tag, err)
|
||||
}
|
||||
}
|
||||
|
||||
log.Printf("MQTT [%s] channel message: ch%s from %s", tag, channelIdx, firstNonEmpty(sender, "unknown"))
|
||||
return
|
||||
|
||||
+22
-63
@@ -130,7 +130,7 @@ func TestHandleMessageRawPacket(t *testing.T) {
|
||||
payload := []byte(`{"raw":"` + rawHex + `","SNR":5.5,"RSSI":-100.0,"origin":"myobs"}`)
|
||||
msg := &mockMessage{topic: "meshcore/SJC/obs1/packets", payload: payload}
|
||||
|
||||
handleMessage(store, "test", source, msg, nil, &Config{})
|
||||
handleMessage(store, "test", source, msg, nil, nil)
|
||||
|
||||
var count int
|
||||
store.db.QueryRow("SELECT COUNT(*) FROM transmissions").Scan(&count)
|
||||
@@ -147,7 +147,7 @@ func TestHandleMessageRawPacketAdvert(t *testing.T) {
|
||||
payload := []byte(`{"raw":"` + rawHex + `"}`)
|
||||
msg := &mockMessage{topic: "meshcore/SJC/obs1/packets", payload: payload}
|
||||
|
||||
handleMessage(store, "test", source, msg, nil, &Config{})
|
||||
handleMessage(store, "test", source, msg, nil, nil)
|
||||
|
||||
// Should create a node from the ADVERT
|
||||
var count int
|
||||
@@ -169,7 +169,7 @@ func TestHandleMessageInvalidJSON(t *testing.T) {
|
||||
msg := &mockMessage{topic: "meshcore/SJC/obs1/packets", payload: []byte(`not json`)}
|
||||
|
||||
// Should not panic
|
||||
handleMessage(store, "test", source, msg, nil, &Config{})
|
||||
handleMessage(store, "test", source, msg, nil, nil)
|
||||
|
||||
var count int
|
||||
store.db.QueryRow("SELECT COUNT(*) FROM transmissions").Scan(&count)
|
||||
@@ -186,7 +186,7 @@ func TestHandleMessageStatusTopic(t *testing.T) {
|
||||
payload: []byte(`{"origin":"MyObserver"}`),
|
||||
}
|
||||
|
||||
handleMessage(store, "test", source, msg, nil, &Config{})
|
||||
handleMessage(store, "test", source, msg, nil, nil)
|
||||
|
||||
var name, iata string
|
||||
err := store.db.QueryRow("SELECT name, iata FROM observers WHERE id = 'obs1'").Scan(&name, &iata)
|
||||
@@ -207,11 +207,11 @@ func TestHandleMessageSkipStatusTopics(t *testing.T) {
|
||||
|
||||
// meshcore/status should be skipped
|
||||
msg1 := &mockMessage{topic: "meshcore/status", payload: []byte(`{"raw":"0A00"}`)}
|
||||
handleMessage(store, "test", source, msg1, nil, &Config{})
|
||||
handleMessage(store, "test", source, msg1, nil, nil)
|
||||
|
||||
// meshcore/events/connection should be skipped
|
||||
msg2 := &mockMessage{topic: "meshcore/events/connection", payload: []byte(`{"raw":"0A00"}`)}
|
||||
handleMessage(store, "test", source, msg2, nil, &Config{})
|
||||
handleMessage(store, "test", source, msg2, nil, nil)
|
||||
|
||||
var count int
|
||||
store.db.QueryRow("SELECT COUNT(*) FROM transmissions").Scan(&count)
|
||||
@@ -230,7 +230,7 @@ func TestHandleMessageIATAFilter(t *testing.T) {
|
||||
topic: "meshcore/SJC/obs1/packets",
|
||||
payload: []byte(`{"raw":"` + rawHex + `"}`),
|
||||
}
|
||||
handleMessage(store, "test", source, msg, nil, &Config{})
|
||||
handleMessage(store, "test", source, msg, nil, nil)
|
||||
|
||||
var count int
|
||||
store.db.QueryRow("SELECT COUNT(*) FROM transmissions").Scan(&count)
|
||||
@@ -243,7 +243,7 @@ func TestHandleMessageIATAFilter(t *testing.T) {
|
||||
topic: "meshcore/LAX/obs2/packets",
|
||||
payload: []byte(`{"raw":"` + rawHex + `"}`),
|
||||
}
|
||||
handleMessage(store, "test", source, msg2, nil, &Config{})
|
||||
handleMessage(store, "test", source, msg2, nil, nil)
|
||||
|
||||
store.db.QueryRow("SELECT COUNT(*) FROM transmissions").Scan(&count)
|
||||
if count != 1 {
|
||||
@@ -261,7 +261,7 @@ func TestHandleMessageIATAFilterNoRegion(t *testing.T) {
|
||||
topic: "meshcore",
|
||||
payload: []byte(`{"raw":"` + rawHex + `"}`),
|
||||
}
|
||||
handleMessage(store, "test", source, msg, nil, &Config{})
|
||||
handleMessage(store, "test", source, msg, nil, nil)
|
||||
|
||||
// No region part → filter doesn't apply, message goes through
|
||||
// Actually the code checks len(parts) > 1 for IATA filter
|
||||
@@ -277,7 +277,7 @@ func TestHandleMessageNoRawHex(t *testing.T) {
|
||||
topic: "meshcore/SJC/obs1/packets",
|
||||
payload: []byte(`{"type":"companion","data":"something"}`),
|
||||
}
|
||||
handleMessage(store, "test", source, msg, nil, &Config{})
|
||||
handleMessage(store, "test", source, msg, nil, nil)
|
||||
|
||||
var count int
|
||||
store.db.QueryRow("SELECT COUNT(*) FROM transmissions").Scan(&count)
|
||||
@@ -295,7 +295,7 @@ func TestHandleMessageBadRawHex(t *testing.T) {
|
||||
topic: "meshcore/SJC/obs1/packets",
|
||||
payload: []byte(`{"raw":"ZZZZ"}`),
|
||||
}
|
||||
handleMessage(store, "test", source, msg, nil, &Config{})
|
||||
handleMessage(store, "test", source, msg, nil, nil)
|
||||
|
||||
var count int
|
||||
store.db.QueryRow("SELECT COUNT(*) FROM transmissions").Scan(&count)
|
||||
@@ -312,7 +312,7 @@ func TestHandleMessageWithSNRRSSIAsNumbers(t *testing.T) {
|
||||
payload := []byte(`{"raw":"` + rawHex + `","SNR":7.2,"RSSI":-95}`)
|
||||
msg := &mockMessage{topic: "meshcore/SJC/obs1/packets", payload: payload}
|
||||
|
||||
handleMessage(store, "test", source, msg, nil, &Config{})
|
||||
handleMessage(store, "test", source, msg, nil, nil)
|
||||
|
||||
var snr, rssi *float64
|
||||
store.db.QueryRow("SELECT snr, rssi FROM observations LIMIT 1").Scan(&snr, &rssi)
|
||||
@@ -331,7 +331,7 @@ func TestHandleMessageMinimalTopic(t *testing.T) {
|
||||
topic: "meshcore/SJC",
|
||||
payload: []byte(`{"raw":"` + rawHex + `"}`),
|
||||
}
|
||||
handleMessage(store, "test", source, msg, nil, &Config{})
|
||||
handleMessage(store, "test", source, msg, nil, nil)
|
||||
|
||||
var count int
|
||||
store.db.QueryRow("SELECT COUNT(*) FROM transmissions").Scan(&count)
|
||||
@@ -352,7 +352,7 @@ func TestHandleMessageCorruptedAdvert(t *testing.T) {
|
||||
topic: "meshcore/SJC/obs1/packets",
|
||||
payload: []byte(`{"raw":"` + rawHex + `"}`),
|
||||
}
|
||||
handleMessage(store, "test", source, msg, nil, &Config{})
|
||||
handleMessage(store, "test", source, msg, nil, nil)
|
||||
|
||||
// Transmission should be inserted (even if advert is invalid)
|
||||
var count int
|
||||
@@ -378,7 +378,7 @@ func TestHandleMessageNoObserverID(t *testing.T) {
|
||||
topic: "packets",
|
||||
payload: []byte(`{"raw":"` + rawHex + `","origin":"obs1"}`),
|
||||
}
|
||||
handleMessage(store, "test", source, msg, nil, &Config{})
|
||||
handleMessage(store, "test", source, msg, nil, nil)
|
||||
|
||||
var count int
|
||||
store.db.QueryRow("SELECT COUNT(*) FROM transmissions").Scan(&count)
|
||||
@@ -400,7 +400,7 @@ func TestHandleMessageSNRNotFloat(t *testing.T) {
|
||||
// SNR as a string value — should not parse as float
|
||||
payload := []byte(`{"raw":"` + rawHex + `","SNR":"bad","RSSI":"bad"}`)
|
||||
msg := &mockMessage{topic: "meshcore/SJC/obs1/packets", payload: payload}
|
||||
handleMessage(store, "test", source, msg, nil, &Config{})
|
||||
handleMessage(store, "test", source, msg, nil, nil)
|
||||
|
||||
var count int
|
||||
store.db.QueryRow("SELECT COUNT(*) FROM transmissions").Scan(&count)
|
||||
@@ -416,7 +416,7 @@ func TestHandleMessageOriginExtraction(t *testing.T) {
|
||||
rawHex := "0A00D69FD7A5A7475DB07337749AE61FA53A4788E976"
|
||||
payload := []byte(`{"raw":"` + rawHex + `","origin":"MyOrigin"}`)
|
||||
msg := &mockMessage{topic: "meshcore/SJC/obs1/packets", payload: payload}
|
||||
handleMessage(store, "test", source, msg, nil, &Config{})
|
||||
handleMessage(store, "test", source, msg, nil, nil)
|
||||
|
||||
// Verify origin was extracted to observer name
|
||||
var name string
|
||||
@@ -439,7 +439,7 @@ func TestHandleMessagePanicRecovery(t *testing.T) {
|
||||
}
|
||||
|
||||
// Should not panic — the defer/recover should catch it
|
||||
handleMessage(store, "test", source, msg, nil, &Config{})
|
||||
handleMessage(store, "test", source, msg, nil, nil)
|
||||
}
|
||||
|
||||
func TestHandleMessageStatusOriginFallback(t *testing.T) {
|
||||
@@ -451,7 +451,7 @@ func TestHandleMessageStatusOriginFallback(t *testing.T) {
|
||||
topic: "meshcore/SJC/obs1/status",
|
||||
payload: []byte(`{"type":"status"}`),
|
||||
}
|
||||
handleMessage(store, "test", source, msg, nil, &Config{})
|
||||
handleMessage(store, "test", source, msg, nil, nil)
|
||||
|
||||
var name string
|
||||
err := store.db.QueryRow("SELECT name FROM observers WHERE id = 'obs1'").Scan(&name)
|
||||
@@ -640,7 +640,7 @@ func TestHandleMessageWithLowercaseSNRRSSI(t *testing.T) {
|
||||
payload := []byte(`{"raw":"` + rawHex + `","snr":5.5,"rssi":-102}`)
|
||||
msg := &mockMessage{topic: "meshcore/SJC/obs1/packets", payload: payload}
|
||||
|
||||
handleMessage(store, "test", source, msg, nil, &Config{})
|
||||
handleMessage(store, "test", source, msg, nil, nil)
|
||||
|
||||
var snr, rssi *float64
|
||||
store.db.QueryRow("SELECT snr, rssi FROM observations LIMIT 1").Scan(&snr, &rssi)
|
||||
@@ -661,7 +661,7 @@ func TestHandleMessageSNRRSSIUppercaseWins(t *testing.T) {
|
||||
payload := []byte(`{"raw":"` + rawHex + `","SNR":7.2,"snr":1.0,"RSSI":-95,"rssi":-50}`)
|
||||
msg := &mockMessage{topic: "meshcore/SJC/obs1/packets", payload: payload}
|
||||
|
||||
handleMessage(store, "test", source, msg, nil, &Config{})
|
||||
handleMessage(store, "test", source, msg, nil, nil)
|
||||
|
||||
var snr, rssi *float64
|
||||
store.db.QueryRow("SELECT snr, rssi FROM observations LIMIT 1").Scan(&snr, &rssi)
|
||||
@@ -681,7 +681,7 @@ func TestHandleMessageNoSNRRSSI(t *testing.T) {
|
||||
payload := []byte(`{"raw":"` + rawHex + `"}`)
|
||||
msg := &mockMessage{topic: "meshcore/SJC/obs1/packets", payload: payload}
|
||||
|
||||
handleMessage(store, "test", source, msg, nil, &Config{})
|
||||
handleMessage(store, "test", source, msg, nil, nil)
|
||||
|
||||
var snr, rssi *float64
|
||||
store.db.QueryRow("SELECT snr, rssi FROM observations LIMIT 1").Scan(&snr, &rssi)
|
||||
@@ -739,44 +739,3 @@ func TestToFloat64WithUnits(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestIATAFilterDoesNotDropStatusMessages verifies that status messages from
|
||||
// out-of-region observers are still processed (noise_floor, battery, etc.)
|
||||
// even when an IATA filter is configured for packet data.
|
||||
func TestIATAFilterDoesNotDropStatusMessages(t *testing.T) {
|
||||
store := newTestStore(t)
|
||||
source := MQTTSource{Name: "test", IATAFilter: []string{"SJC"}}
|
||||
|
||||
// BFL observer sends a status message with noise_floor — outside the IATA filter.
|
||||
msg := &mockMessage{
|
||||
topic: "meshcore/BFL/bfl-obs1/status",
|
||||
payload: []byte(`{"origin":"BFLObserver","stats":{"noise_floor":-105.0}}`),
|
||||
}
|
||||
handleMessage(store, "test", source, msg, nil, &Config{})
|
||||
|
||||
var name string
|
||||
var noiseFloor *float64
|
||||
err := store.db.QueryRow("SELECT name, noise_floor FROM observers WHERE id = 'bfl-obs1'").Scan(&name, &noiseFloor)
|
||||
if err != nil {
|
||||
t.Fatalf("observer not found after status from out-of-region observer: %v", err)
|
||||
}
|
||||
if name != "BFLObserver" {
|
||||
t.Errorf("name=%q, want BFLObserver", name)
|
||||
}
|
||||
if noiseFloor == nil || *noiseFloor != -105.0 {
|
||||
t.Errorf("noise_floor=%v, want -105.0 — status message was dropped by IATA filter when it should not be", noiseFloor)
|
||||
}
|
||||
|
||||
// Verify that a packet from BFL is still filtered.
|
||||
rawHex := "0A00D69FD7A5A7475DB07337749AE61FA53A4788E976"
|
||||
pktMsg := &mockMessage{
|
||||
topic: "meshcore/BFL/bfl-obs1/packets",
|
||||
payload: []byte(`{"raw":"` + rawHex + `"}`),
|
||||
}
|
||||
handleMessage(store, "test", source, pktMsg, nil, &Config{})
|
||||
var count int
|
||||
store.db.QueryRow("SELECT COUNT(*) FROM transmissions").Scan(&count)
|
||||
if count != 0 {
|
||||
t.Error("packet from out-of-region BFL should still be filtered by IATA")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,76 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestBuildMQTTOpts_ReconnectSettings(t *testing.T) {
|
||||
source := MQTTSource{
|
||||
Broker: "tcp://localhost:1883",
|
||||
Name: "test",
|
||||
}
|
||||
opts := buildMQTTOpts(source)
|
||||
|
||||
if opts.MaxReconnectInterval != 30*time.Second {
|
||||
t.Errorf("MaxReconnectInterval = %v, want 30s", opts.MaxReconnectInterval)
|
||||
}
|
||||
if opts.ConnectTimeout != 10*time.Second {
|
||||
t.Errorf("ConnectTimeout = %v, want 10s", opts.ConnectTimeout)
|
||||
}
|
||||
if opts.WriteTimeout != 10*time.Second {
|
||||
t.Errorf("WriteTimeout = %v, want 10s", opts.WriteTimeout)
|
||||
}
|
||||
if !opts.AutoReconnect {
|
||||
t.Error("AutoReconnect should be true")
|
||||
}
|
||||
if !opts.ConnectRetry {
|
||||
t.Error("ConnectRetry should be true")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildMQTTOpts_Credentials(t *testing.T) {
|
||||
source := MQTTSource{
|
||||
Broker: "tcp://broker:1883",
|
||||
Username: "user1",
|
||||
Password: "pass1",
|
||||
}
|
||||
opts := buildMQTTOpts(source)
|
||||
|
||||
if opts.Username != "user1" {
|
||||
t.Errorf("Username = %q, want %q", opts.Username, "user1")
|
||||
}
|
||||
if opts.Password != "pass1" {
|
||||
t.Errorf("Password = %q, want %q", opts.Password, "pass1")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildMQTTOpts_TLS_InsecureSkipVerify(t *testing.T) {
|
||||
f := false
|
||||
source := MQTTSource{
|
||||
Broker: "ssl://broker:8883",
|
||||
RejectUnauthorized: &f,
|
||||
}
|
||||
opts := buildMQTTOpts(source)
|
||||
|
||||
if opts.TLSConfig == nil {
|
||||
t.Fatal("TLSConfig should be set")
|
||||
}
|
||||
if !opts.TLSConfig.InsecureSkipVerify {
|
||||
t.Error("InsecureSkipVerify should be true when RejectUnauthorized=false")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildMQTTOpts_TLS_SSL_Prefix(t *testing.T) {
|
||||
source := MQTTSource{
|
||||
Broker: "ssl://broker:8883",
|
||||
}
|
||||
opts := buildMQTTOpts(source)
|
||||
|
||||
if opts.TLSConfig == nil {
|
||||
t.Fatal("TLSConfig should be set for ssl:// brokers")
|
||||
}
|
||||
if opts.TLSConfig.InsecureSkipVerify {
|
||||
t.Error("InsecureSkipVerify should be false by default")
|
||||
}
|
||||
}
|
||||
@@ -1,339 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/ed25519"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// buildAdvertHex constructs a full ADVERT packet hex string.
|
||||
// header(1) + pathByte(1) + pubkey(32) + timestamp(4) + signature(64) + appdata
|
||||
func buildAdvertHex(pubKey ed25519.PublicKey, privKey ed25519.PrivateKey, timestamp uint32, appdata []byte) string {
|
||||
// Build signed message: pubkey(32) + timestamp(4 LE) + appdata
|
||||
msg := make([]byte, 32+4+len(appdata))
|
||||
copy(msg[0:32], pubKey)
|
||||
binary.LittleEndian.PutUint32(msg[32:36], timestamp)
|
||||
copy(msg[36:], appdata)
|
||||
|
||||
sig := ed25519.Sign(privKey, msg)
|
||||
|
||||
// Payload: pubkey(32) + timestamp(4) + signature(64) + appdata
|
||||
payload := make([]byte, 0, 100+len(appdata))
|
||||
payload = append(payload, pubKey...)
|
||||
ts := make([]byte, 4)
|
||||
binary.LittleEndian.PutUint32(ts, timestamp)
|
||||
payload = append(payload, ts...)
|
||||
payload = append(payload, sig...)
|
||||
payload = append(payload, appdata...)
|
||||
|
||||
// Header: ADVERT (0x04 << 2) | FLOOD (1) = 0x11, pathByte=0 (no hops)
|
||||
header := byte(0x11)
|
||||
pathByte := byte(0x00)
|
||||
|
||||
pkt := append([]byte{header, pathByte}, payload...)
|
||||
return hex.EncodeToString(pkt)
|
||||
}
|
||||
|
||||
// makeAppdata builds minimal appdata: flags(1) + name
|
||||
func makeAppdata(name string) []byte {
|
||||
flags := byte(0x81) // hasName=true, type=companion(1)
|
||||
data := []byte{flags}
|
||||
data = append(data, []byte(name)...)
|
||||
data = append(data, 0x00) // null terminator
|
||||
return data
|
||||
}
|
||||
|
||||
func TestSigValidation_ValidAdvertStored(t *testing.T) {
|
||||
dbPath := t.TempDir() + "/test.db"
|
||||
store, err := OpenStoreWithInterval(dbPath, 300)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer store.Close()
|
||||
|
||||
pub, priv, _ := ed25519.GenerateKey(nil)
|
||||
appdata := makeAppdata("TestNode")
|
||||
rawHex := buildAdvertHex(pub, priv, 1700000000, appdata)
|
||||
|
||||
source := MQTTSource{Name: "test"}
|
||||
msg := newMockMsg("meshcore/US/obs1/packet", `{"raw":"`+rawHex+`","origin":"TestObs"}`)
|
||||
cfg := &Config{}
|
||||
|
||||
handleMessage(store, "test", source, msg, nil, cfg)
|
||||
|
||||
// Verify packet was stored
|
||||
var count int
|
||||
store.db.QueryRow("SELECT COUNT(*) FROM transmissions").Scan(&count)
|
||||
if count == 0 {
|
||||
t.Fatal("valid advert should be stored, got 0 transmissions")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSigValidation_TamperedSignatureDropped(t *testing.T) {
|
||||
dbPath := t.TempDir() + "/test.db"
|
||||
store, err := OpenStoreWithInterval(dbPath, 300)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer store.Close()
|
||||
|
||||
pub, priv, _ := ed25519.GenerateKey(nil)
|
||||
appdata := makeAppdata("BadNode")
|
||||
rawHex := buildAdvertHex(pub, priv, 1700000000, appdata)
|
||||
|
||||
// Tamper with signature (flip a byte in the signature area)
|
||||
// Signature starts at offset 2 (header+path) + 32 (pubkey) + 4 (timestamp) = 38
|
||||
// That's byte 38 in the packet, hex chars 76-77
|
||||
rawBytes := []byte(rawHex)
|
||||
if rawBytes[76] == '0' {
|
||||
rawBytes[76] = 'f'
|
||||
} else {
|
||||
rawBytes[76] = '0'
|
||||
}
|
||||
tamperedHex := string(rawBytes)
|
||||
|
||||
source := MQTTSource{Name: "test"}
|
||||
msg := newMockMsg("meshcore/US/obs1/packet", `{"raw":"`+tamperedHex+`","origin":"TestObs"}`)
|
||||
cfg := &Config{}
|
||||
|
||||
handleMessage(store, "test", source, msg, nil, cfg)
|
||||
|
||||
// Verify packet was NOT stored in transmissions
|
||||
var txCount int
|
||||
store.db.QueryRow("SELECT COUNT(*) FROM transmissions").Scan(&txCount)
|
||||
if txCount != 0 {
|
||||
t.Fatalf("tampered advert should be dropped, got %d transmissions", txCount)
|
||||
}
|
||||
|
||||
// Verify it was recorded in dropped_packets
|
||||
var dropCount int
|
||||
store.db.QueryRow("SELECT COUNT(*) FROM dropped_packets").Scan(&dropCount)
|
||||
if dropCount == 0 {
|
||||
t.Fatal("tampered advert should be recorded in dropped_packets")
|
||||
}
|
||||
|
||||
// Verify drop counter incremented
|
||||
if store.Stats.SignatureDrops.Load() != 1 {
|
||||
t.Fatalf("expected 1 signature drop, got %d", store.Stats.SignatureDrops.Load())
|
||||
}
|
||||
|
||||
// Verify dropped_packets has correct fields
|
||||
var reason, nodeKey, nodeName, obsID string
|
||||
store.db.QueryRow("SELECT reason, node_pubkey, node_name, observer_id FROM dropped_packets LIMIT 1").Scan(&reason, &nodeKey, &nodeName, &obsID)
|
||||
if reason != "invalid signature" {
|
||||
t.Fatalf("expected reason 'invalid signature', got %q", reason)
|
||||
}
|
||||
if nodeKey == "" {
|
||||
t.Fatal("dropped packet should have node_pubkey")
|
||||
}
|
||||
if !strings.Contains(nodeName, "BadNode") {
|
||||
t.Fatalf("expected node_name to contain 'BadNode', got %q", nodeName)
|
||||
}
|
||||
if obsID != "obs1" {
|
||||
t.Fatalf("expected observer_id 'obs1', got %q", obsID)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSigValidation_TruncatedAppdataDropped(t *testing.T) {
|
||||
dbPath := t.TempDir() + "/test.db"
|
||||
store, err := OpenStoreWithInterval(dbPath, 300)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer store.Close()
|
||||
|
||||
pub, priv, _ := ed25519.GenerateKey(nil)
|
||||
appdata := makeAppdata("TruncNode")
|
||||
rawHex := buildAdvertHex(pub, priv, 1700000000, appdata)
|
||||
|
||||
// Sign was computed with full appdata. Now truncate the raw hex to remove
|
||||
// some appdata bytes, making the signature invalid.
|
||||
// Truncate last 4 hex chars (2 bytes of appdata)
|
||||
truncatedHex := rawHex[:len(rawHex)-4]
|
||||
|
||||
source := MQTTSource{Name: "test"}
|
||||
msg := newMockMsg("meshcore/US/obs1/packet", `{"raw":"`+truncatedHex+`","origin":"TestObs"}`)
|
||||
cfg := &Config{}
|
||||
|
||||
handleMessage(store, "test", source, msg, nil, cfg)
|
||||
|
||||
var txCount int
|
||||
store.db.QueryRow("SELECT COUNT(*) FROM transmissions").Scan(&txCount)
|
||||
if txCount != 0 {
|
||||
t.Fatalf("truncated advert should be dropped, got %d transmissions", txCount)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSigValidation_DisabledByConfig(t *testing.T) {
|
||||
dbPath := t.TempDir() + "/test.db"
|
||||
store, err := OpenStoreWithInterval(dbPath, 300)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer store.Close()
|
||||
|
||||
pub, priv, _ := ed25519.GenerateKey(nil)
|
||||
appdata := makeAppdata("NoValNode")
|
||||
rawHex := buildAdvertHex(pub, priv, 1700000000, appdata)
|
||||
|
||||
// Tamper with signature
|
||||
rawBytes := []byte(rawHex)
|
||||
if rawBytes[76] == '0' {
|
||||
rawBytes[76] = 'f'
|
||||
} else {
|
||||
rawBytes[76] = '0'
|
||||
}
|
||||
tamperedHex := string(rawBytes)
|
||||
|
||||
source := MQTTSource{Name: "test"}
|
||||
msg := newMockMsg("meshcore/US/obs1/packet", `{"raw":"`+tamperedHex+`","origin":"TestObs"}`)
|
||||
falseVal := false
|
||||
cfg := &Config{ValidateSignatures: &falseVal}
|
||||
|
||||
handleMessage(store, "test", source, msg, nil, cfg)
|
||||
|
||||
// With validation disabled, tampered packet should be stored
|
||||
var txCount int
|
||||
store.db.QueryRow("SELECT COUNT(*) FROM transmissions").Scan(&txCount)
|
||||
if txCount == 0 {
|
||||
t.Fatal("with validateSignatures=false, tampered advert should be stored")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSigValidation_DropCounterIncrements(t *testing.T) {
|
||||
dbPath := t.TempDir() + "/test.db"
|
||||
store, err := OpenStoreWithInterval(dbPath, 300)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer store.Close()
|
||||
|
||||
pub, priv, _ := ed25519.GenerateKey(nil)
|
||||
source := MQTTSource{Name: "test"}
|
||||
cfg := &Config{}
|
||||
|
||||
for i := 0; i < 3; i++ {
|
||||
appdata := makeAppdata("Node")
|
||||
rawHex := buildAdvertHex(pub, priv, uint32(1700000000+i), appdata)
|
||||
// Tamper
|
||||
rawBytes := []byte(rawHex)
|
||||
if rawBytes[76] == '0' {
|
||||
rawBytes[76] = 'f'
|
||||
} else {
|
||||
rawBytes[76] = '0'
|
||||
}
|
||||
msg := newMockMsg("meshcore/US/obs1/packet", `{"raw":"`+string(rawBytes)+`","origin":"Obs"}`)
|
||||
handleMessage(store, "test", source, msg, nil, cfg)
|
||||
}
|
||||
|
||||
if store.Stats.SignatureDrops.Load() != 3 {
|
||||
t.Fatalf("expected 3 signature drops, got %d", store.Stats.SignatureDrops.Load())
|
||||
}
|
||||
}
|
||||
|
||||
func TestSigValidation_LogContainsFields(t *testing.T) {
|
||||
// This test verifies the dropped_packets row has all required fields
|
||||
dbPath := t.TempDir() + "/test.db"
|
||||
store, err := OpenStoreWithInterval(dbPath, 300)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer store.Close()
|
||||
|
||||
pub, priv, _ := ed25519.GenerateKey(nil)
|
||||
appdata := makeAppdata("LogTestNode")
|
||||
rawHex := buildAdvertHex(pub, priv, 1700000000, appdata)
|
||||
|
||||
// Tamper
|
||||
rawBytes := []byte(rawHex)
|
||||
if rawBytes[76] == '0' {
|
||||
rawBytes[76] = 'f'
|
||||
} else {
|
||||
rawBytes[76] = '0'
|
||||
}
|
||||
|
||||
source := MQTTSource{Name: "test"}
|
||||
msg := newMockMsg("meshcore/US/obs1/packet", `{"raw":"`+string(rawBytes)+`","origin":"MyObserver"}`)
|
||||
cfg := &Config{}
|
||||
|
||||
handleMessage(store, "test", source, msg, nil, cfg)
|
||||
|
||||
var hash, reason, obsID, obsName, pubkey, nodeName string
|
||||
err = store.db.QueryRow("SELECT hash, reason, observer_id, observer_name, node_pubkey, node_name FROM dropped_packets LIMIT 1").
|
||||
Scan(&hash, &reason, &obsID, &obsName, &pubkey, &nodeName)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if hash == "" {
|
||||
t.Error("dropped packet should have hash")
|
||||
}
|
||||
if reason != "invalid signature" {
|
||||
t.Errorf("expected reason 'invalid signature', got %q", reason)
|
||||
}
|
||||
if obsID != "obs1" {
|
||||
t.Errorf("expected observer_id 'obs1', got %q", obsID)
|
||||
}
|
||||
if obsName != "MyObserver" {
|
||||
t.Errorf("expected observer_name 'MyObserver', got %q", obsName)
|
||||
}
|
||||
if pubkey == "" {
|
||||
t.Error("dropped packet should have node_pubkey")
|
||||
}
|
||||
if !strings.Contains(nodeName, "LogTestNode") {
|
||||
t.Errorf("expected node_name containing 'LogTestNode', got %q", nodeName)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPruneDroppedPackets(t *testing.T) {
|
||||
dbPath := t.TempDir() + "/test.db"
|
||||
store, err := OpenStoreWithInterval(dbPath, 300)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer store.Close()
|
||||
|
||||
// Insert an old dropped packet
|
||||
store.db.Exec(`INSERT INTO dropped_packets (hash, reason, dropped_at) VALUES ('old', 'test', datetime('now', '-60 days'))`)
|
||||
store.db.Exec(`INSERT INTO dropped_packets (hash, reason, dropped_at) VALUES ('new', 'test', datetime('now'))`)
|
||||
|
||||
n, err := store.PruneDroppedPackets(30)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if n != 1 {
|
||||
t.Fatalf("expected 1 pruned, got %d", n)
|
||||
}
|
||||
|
||||
var count int
|
||||
store.db.QueryRow("SELECT COUNT(*) FROM dropped_packets").Scan(&count)
|
||||
if count != 1 {
|
||||
t.Fatalf("expected 1 remaining, got %d", count)
|
||||
}
|
||||
}
|
||||
|
||||
func TestShouldValidateSignatures_Default(t *testing.T) {
|
||||
cfg := &Config{}
|
||||
if !cfg.ShouldValidateSignatures() {
|
||||
t.Fatal("default should be true")
|
||||
}
|
||||
|
||||
falseVal := false
|
||||
cfg2 := &Config{ValidateSignatures: &falseVal}
|
||||
if cfg2.ShouldValidateSignatures() {
|
||||
t.Fatal("explicit false should be false")
|
||||
}
|
||||
|
||||
trueVal := true
|
||||
cfg3 := &Config{ValidateSignatures: &trueVal}
|
||||
if !cfg3.ShouldValidateSignatures() {
|
||||
t.Fatal("explicit true should be true")
|
||||
}
|
||||
}
|
||||
|
||||
// newMockMsg creates a minimal mqtt.Message for testing.
|
||||
func newMockMsg(topic, payload string) *mockMessage {
|
||||
return &mockMessage{topic: topic, payload: []byte(payload)}
|
||||
}
|
||||
@@ -1,407 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
_ "modernc.org/sqlite"
|
||||
)
|
||||
|
||||
// createTestDB creates a temporary SQLite database with N transmissions (1 obs each).
|
||||
func createTestDB(t *testing.T, numTx int) string {
|
||||
t.Helper()
|
||||
dir := t.TempDir()
|
||||
dbPath := filepath.Join(dir, "test.db")
|
||||
createTestDBAt(t, dbPath, numTx)
|
||||
return dbPath
|
||||
}
|
||||
|
||||
// loadStore creates a PacketStore from a test DB with given maxMemoryMB.
|
||||
func loadStore(t *testing.T, dbPath string, maxMemMB int) *PacketStore {
|
||||
t.Helper()
|
||||
db, err := OpenDB(dbPath)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cfg := &PacketStoreConfig{MaxMemoryMB: maxMemMB}
|
||||
store := NewPacketStore(db, cfg)
|
||||
if err := store.Load(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return store
|
||||
}
|
||||
|
||||
func TestBoundedLoad_LimitedMemory(t *testing.T) {
|
||||
dbPath := createTestDB(t, 5000)
|
||||
defer os.RemoveAll(filepath.Dir(dbPath))
|
||||
|
||||
// Use 1MB budget — should load far fewer than 5000 packets
|
||||
store := loadStore(t, dbPath, 1)
|
||||
defer store.db.conn.Close()
|
||||
|
||||
loaded := len(store.packets)
|
||||
if loaded >= 5000 {
|
||||
t.Errorf("expected bounded load to limit packets, got %d/5000", loaded)
|
||||
}
|
||||
if loaded < 1000 {
|
||||
t.Errorf("expected at least 1000 packets (minimum), got %d", loaded)
|
||||
}
|
||||
t.Logf("Loaded %d/5000 packets with 1MB budget", loaded)
|
||||
}
|
||||
|
||||
func TestBoundedLoad_NewestFirst(t *testing.T) {
|
||||
dbPath := createTestDB(t, 5000)
|
||||
defer os.RemoveAll(filepath.Dir(dbPath))
|
||||
|
||||
store := loadStore(t, dbPath, 1)
|
||||
defer store.db.conn.Close()
|
||||
|
||||
loaded := len(store.packets)
|
||||
if loaded >= 5000 {
|
||||
t.Skip("all packets loaded, can't verify newest-first")
|
||||
}
|
||||
|
||||
// The newest packet in DB has first_seen based on minute 5000.
|
||||
// The loaded packets should be the newest ones.
|
||||
// Last packet in store (sorted ASC) should be the newest in DB.
|
||||
last := store.packets[loaded-1]
|
||||
base := time.Date(2026, 1, 1, 0, 0, 0, 0, time.UTC)
|
||||
newestExpected := base.Add(5000 * time.Minute).Format(time.RFC3339)
|
||||
if last.FirstSeen != newestExpected {
|
||||
t.Errorf("expected last packet to be newest (%s), got %s", newestExpected, last.FirstSeen)
|
||||
}
|
||||
|
||||
// First packet should NOT be the oldest in the DB (minute 1)
|
||||
first := store.packets[0]
|
||||
oldestAll := base.Add(1 * time.Minute).Format(time.RFC3339)
|
||||
if first.FirstSeen == oldestAll {
|
||||
t.Errorf("first loaded packet should not be the absolute oldest when bounded")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBoundedLoad_OldestLoadedSet(t *testing.T) {
|
||||
dbPath := createTestDB(t, 5000)
|
||||
defer os.RemoveAll(filepath.Dir(dbPath))
|
||||
|
||||
store := loadStore(t, dbPath, 1)
|
||||
defer store.db.conn.Close()
|
||||
|
||||
if store.oldestLoaded == "" {
|
||||
t.Fatal("oldestLoaded should be set after bounded load")
|
||||
}
|
||||
if len(store.packets) > 0 && store.oldestLoaded != store.packets[0].FirstSeen {
|
||||
t.Errorf("oldestLoaded (%s) should match first packet (%s)", store.oldestLoaded, store.packets[0].FirstSeen)
|
||||
}
|
||||
t.Logf("oldestLoaded = %s", store.oldestLoaded)
|
||||
}
|
||||
|
||||
func TestBoundedLoad_UnlimitedWithZero(t *testing.T) {
|
||||
dbPath := createTestDB(t, 200)
|
||||
defer os.RemoveAll(filepath.Dir(dbPath))
|
||||
|
||||
store := loadStore(t, dbPath, 0)
|
||||
defer store.db.conn.Close()
|
||||
|
||||
if len(store.packets) != 200 {
|
||||
t.Errorf("expected all 200 packets with maxMemoryMB=0, got %d", len(store.packets))
|
||||
}
|
||||
}
|
||||
|
||||
func TestBoundedLoad_AscendingOrder(t *testing.T) {
|
||||
dbPath := createTestDB(t, 3000)
|
||||
defer os.RemoveAll(filepath.Dir(dbPath))
|
||||
|
||||
store := loadStore(t, dbPath, 1)
|
||||
defer store.db.conn.Close()
|
||||
|
||||
// Verify packets are in ascending first_seen order
|
||||
for i := 1; i < len(store.packets); i++ {
|
||||
if store.packets[i].FirstSeen < store.packets[i-1].FirstSeen {
|
||||
t.Fatalf("packets not in ascending order at index %d: %s < %s",
|
||||
i, store.packets[i].FirstSeen, store.packets[i-1].FirstSeen)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// loadStoreWithRetention creates a PacketStore with retentionHours set.
|
||||
func loadStoreWithRetention(t *testing.T, dbPath string, retentionHours float64) *PacketStore {
|
||||
t.Helper()
|
||||
db, err := OpenDB(dbPath)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cfg := &PacketStoreConfig{RetentionHours: retentionHours}
|
||||
store := NewPacketStore(db, cfg)
|
||||
if err := store.Load(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return store
|
||||
}
|
||||
|
||||
// createTestDBWithAgedPackets inserts numRecent packets with timestamps within
|
||||
// the last hour and numOld packets with timestamps 48 hours ago.
|
||||
func createTestDBWithAgedPackets(t *testing.T, numRecent, numOld int) string {
|
||||
t.Helper()
|
||||
dir := t.TempDir()
|
||||
dbPath := filepath.Join(dir, "test.db")
|
||||
|
||||
conn, err := sql.Open("sqlite", dbPath+"?_journal_mode=WAL")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
execOrFail := func(s string) {
|
||||
if _, err := conn.Exec(s); err != nil {
|
||||
t.Fatalf("setup: %v\nSQL: %s", err, s)
|
||||
}
|
||||
}
|
||||
execOrFail(`CREATE TABLE transmissions (id INTEGER PRIMARY KEY, raw_hex TEXT, hash TEXT, first_seen TEXT, route_type INTEGER, payload_type INTEGER, payload_version INTEGER, decoded_json TEXT)`)
|
||||
execOrFail(`CREATE TABLE observations (id INTEGER PRIMARY KEY, transmission_id INTEGER, observer_id TEXT, observer_name TEXT, direction TEXT, snr REAL, rssi REAL, score INTEGER, path_json TEXT, timestamp TEXT, raw_hex TEXT)`)
|
||||
execOrFail(`CREATE TABLE observers (rowid INTEGER PRIMARY KEY, id TEXT, name TEXT)`)
|
||||
execOrFail(`CREATE TABLE nodes (pubkey TEXT PRIMARY KEY, name TEXT, role TEXT, lat REAL, lon REAL, last_seen TEXT, first_seen TEXT, frequency REAL)`)
|
||||
execOrFail(`CREATE TABLE schema_version (version INTEGER)`)
|
||||
execOrFail(`INSERT INTO schema_version (version) VALUES (1)`)
|
||||
execOrFail(`CREATE INDEX idx_tx_first_seen ON transmissions(first_seen)`)
|
||||
|
||||
now := time.Now().UTC()
|
||||
id := 1
|
||||
// Insert old packets (48 hours ago)
|
||||
for i := 0; i < numOld; i++ {
|
||||
ts := now.Add(-48 * time.Hour).Add(time.Duration(i) * time.Second).Format(time.RFC3339)
|
||||
conn.Exec("INSERT INTO transmissions VALUES (?,?,?,?,0,4,1,?)", id, "aa", fmt.Sprintf("old%d", i), ts, `{}`)
|
||||
conn.Exec("INSERT INTO observations VALUES (?,?,?,?,?,?,?,?,?,?,?)", id, id, "obs1", "Obs1", "RX", -10.0, -80.0, 5, `[]`, ts, "")
|
||||
id++
|
||||
}
|
||||
// Insert recent packets (within last hour)
|
||||
for i := 0; i < numRecent; i++ {
|
||||
ts := now.Add(-30 * time.Minute).Add(time.Duration(i) * time.Second).Format(time.RFC3339)
|
||||
conn.Exec("INSERT INTO transmissions VALUES (?,?,?,?,0,4,1,?)", id, "bb", fmt.Sprintf("new%d", i), ts, `{}`)
|
||||
conn.Exec("INSERT INTO observations VALUES (?,?,?,?,?,?,?,?,?,?,?)", id, id, "obs1", "Obs1", "RX", -10.0, -80.0, 5, `[]`, ts, "")
|
||||
id++
|
||||
}
|
||||
return dbPath
|
||||
}
|
||||
|
||||
func TestRetentionLoad_OnlyLoadsRecentPackets(t *testing.T) {
|
||||
dbPath := createTestDBWithAgedPackets(t, 50, 100)
|
||||
defer os.RemoveAll(filepath.Dir(dbPath))
|
||||
|
||||
// retention = 2 hours — should load only the 50 recent packets, not the 100 old ones
|
||||
store := loadStoreWithRetention(t, dbPath, 2)
|
||||
defer store.db.conn.Close()
|
||||
|
||||
if len(store.packets) != 50 {
|
||||
t.Errorf("expected 50 recent packets, got %d (old packets should be excluded by retentionHours)", len(store.packets))
|
||||
}
|
||||
}
|
||||
|
||||
func TestRetentionLoad_ZeroRetentionLoadsAll(t *testing.T) {
|
||||
dbPath := createTestDBWithAgedPackets(t, 50, 100)
|
||||
defer os.RemoveAll(filepath.Dir(dbPath))
|
||||
|
||||
// retention = 0 (unlimited) — should load all 150 packets
|
||||
store := loadStoreWithRetention(t, dbPath, 0)
|
||||
defer store.db.conn.Close()
|
||||
|
||||
if len(store.packets) != 150 {
|
||||
t.Errorf("expected all 150 packets with retentionHours=0, got %d", len(store.packets))
|
||||
}
|
||||
}
|
||||
|
||||
func TestEstimateStoreTxBytesTypical(t *testing.T) {
|
||||
est := estimateStoreTxBytesTypical(10)
|
||||
if est < 1000 {
|
||||
t.Errorf("typical estimate too low: %d", est)
|
||||
}
|
||||
// Should be roughly proportional to observation count
|
||||
est1 := estimateStoreTxBytesTypical(1)
|
||||
est20 := estimateStoreTxBytesTypical(20)
|
||||
if est20 <= est1 {
|
||||
t.Errorf("estimate should grow with observations: 1obs=%d, 20obs=%d", est1, est20)
|
||||
}
|
||||
t.Logf("Typical estimate: 1obs=%d, 10obs=%d, 20obs=%d bytes", est1, est, est20)
|
||||
}
|
||||
|
||||
func BenchmarkLoad_Bounded(b *testing.B) {
|
||||
dir := b.TempDir()
|
||||
dbPath := filepath.Join(dir, "bench.db")
|
||||
createTestDBAt(b, dbPath, 5000)
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
db, _ := OpenDB(dbPath)
|
||||
cfg := &PacketStoreConfig{MaxMemoryMB: 1}
|
||||
store := NewPacketStore(db, cfg)
|
||||
store.Load()
|
||||
db.conn.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkLoad_Unlimited(b *testing.B) {
|
||||
dir := b.TempDir()
|
||||
dbPath := filepath.Join(dir, "bench.db")
|
||||
createTestDBAt(b, dbPath, 5000)
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
db, _ := OpenDB(dbPath)
|
||||
cfg := &PacketStoreConfig{MaxMemoryMB: 0}
|
||||
store := NewPacketStore(db, cfg)
|
||||
store.Load()
|
||||
db.conn.Close()
|
||||
}
|
||||
}
|
||||
|
||||
// BenchmarkLoad_30K_Bounded benchmarks bounded Load() with 30K transmissions
|
||||
// and realistic observation counts (1–5 per transmission).
|
||||
func BenchmarkLoad_30K_Bounded(b *testing.B) {
|
||||
dir := b.TempDir()
|
||||
dbPath := filepath.Join(dir, "bench30k.db")
|
||||
createTestDBWithObs(b, dbPath, 30000)
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
db, _ := OpenDB(dbPath)
|
||||
cfg := &PacketStoreConfig{MaxMemoryMB: 50}
|
||||
store := NewPacketStore(db, cfg)
|
||||
store.Load()
|
||||
db.conn.Close()
|
||||
}
|
||||
}
|
||||
|
||||
// BenchmarkLoad_30K_Unlimited benchmarks unlimited Load() with 30K transmissions
|
||||
// and realistic observation counts (1–5 per transmission).
|
||||
func BenchmarkLoad_30K_Unlimited(b *testing.B) {
|
||||
dir := b.TempDir()
|
||||
dbPath := filepath.Join(dir, "bench30k.db")
|
||||
createTestDBWithObs(b, dbPath, 30000)
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
db, _ := OpenDB(dbPath)
|
||||
cfg := &PacketStoreConfig{MaxMemoryMB: 0}
|
||||
store := NewPacketStore(db, cfg)
|
||||
store.Load()
|
||||
db.conn.Close()
|
||||
}
|
||||
}
|
||||
|
||||
// createTestDBAt is like createTestDB but writes to a specific path.
|
||||
func createTestDBAt(tb testing.TB, dbPath string, numTx int) {
|
||||
tb.Helper()
|
||||
conn, err := sql.Open("sqlite", dbPath+"?_journal_mode=WAL")
|
||||
if err != nil {
|
||||
tb.Fatal(err)
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
execOrFail := func(sql string) {
|
||||
if _, err := conn.Exec(sql); err != nil {
|
||||
tb.Fatalf("test DB setup exec failed: %v\nSQL: %s", err, sql)
|
||||
}
|
||||
}
|
||||
execOrFail(`CREATE TABLE IF NOT EXISTS transmissions (
|
||||
id INTEGER PRIMARY KEY,
|
||||
raw_hex TEXT, hash TEXT, first_seen TEXT,
|
||||
route_type INTEGER, payload_type INTEGER,
|
||||
payload_version INTEGER, decoded_json TEXT
|
||||
)`)
|
||||
execOrFail(`CREATE TABLE IF NOT EXISTS observations (
|
||||
id INTEGER PRIMARY KEY,
|
||||
transmission_id INTEGER, observer_id TEXT, observer_name TEXT,
|
||||
direction TEXT, snr REAL, rssi REAL, score INTEGER,
|
||||
path_json TEXT, timestamp TEXT, raw_hex TEXT
|
||||
)`)
|
||||
execOrFail(`CREATE TABLE IF NOT EXISTS observers (rowid INTEGER PRIMARY KEY, id TEXT, name TEXT)`)
|
||||
execOrFail(`CREATE TABLE IF NOT EXISTS nodes (
|
||||
pubkey TEXT PRIMARY KEY, name TEXT, role TEXT, lat REAL, lon REAL,
|
||||
last_seen TEXT, first_seen TEXT, frequency REAL
|
||||
)`)
|
||||
execOrFail(`CREATE TABLE IF NOT EXISTS schema_version (version INTEGER)`)
|
||||
execOrFail(`INSERT INTO schema_version (version) VALUES (1)`)
|
||||
execOrFail(`CREATE INDEX IF NOT EXISTS idx_tx_first_seen ON transmissions(first_seen)`)
|
||||
|
||||
txStmt, err := conn.Prepare("INSERT INTO transmissions (id, raw_hex, hash, first_seen, route_type, payload_type, payload_version, decoded_json) VALUES (?, ?, ?, ?, ?, ?, ?, ?)")
|
||||
if err != nil {
|
||||
tb.Fatalf("test DB prepare transmissions insert: %v", err)
|
||||
}
|
||||
obsStmt, err := conn.Prepare("INSERT INTO observations (id, transmission_id, observer_id, observer_name, direction, snr, rssi, score, path_json, timestamp) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)")
|
||||
if err != nil {
|
||||
tb.Fatalf("test DB prepare observations insert: %v", err)
|
||||
}
|
||||
defer txStmt.Close()
|
||||
defer obsStmt.Close()
|
||||
|
||||
base := time.Date(2026, 1, 1, 0, 0, 0, 0, time.UTC)
|
||||
for i := 1; i <= numTx; i++ {
|
||||
ts := base.Add(time.Duration(i) * time.Minute).Format(time.RFC3339)
|
||||
hash := fmt.Sprintf("h%04d", i)
|
||||
txStmt.Exec(i, "aabb", hash, ts, 0, 4, 1, fmt.Sprintf(`{"pubKey":"pk%04d"}`, i))
|
||||
obsStmt.Exec(i, i, "obs1", "Obs1", "RX", -10.0, -80.0, 5, `["aa","bb"]`, ts)
|
||||
}
|
||||
}
|
||||
|
||||
// createTestDBWithObs creates a test DB with realistic observation counts (1–5 per tx).
|
||||
func createTestDBWithObs(tb testing.TB, dbPath string, numTx int) {
|
||||
tb.Helper()
|
||||
conn, err := sql.Open("sqlite", dbPath+"?_journal_mode=WAL")
|
||||
if err != nil {
|
||||
tb.Fatal(err)
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
execOrFail := func(sqlStr string) {
|
||||
if _, err := conn.Exec(sqlStr); err != nil {
|
||||
tb.Fatalf("test DB setup exec failed: %v\nSQL: %s", err, sqlStr)
|
||||
}
|
||||
}
|
||||
execOrFail(`CREATE TABLE IF NOT EXISTS transmissions (
|
||||
id INTEGER PRIMARY KEY, raw_hex TEXT, hash TEXT, first_seen TEXT,
|
||||
route_type INTEGER, payload_type INTEGER, payload_version INTEGER, decoded_json TEXT
|
||||
)`)
|
||||
execOrFail(`CREATE TABLE IF NOT EXISTS observations (
|
||||
id INTEGER PRIMARY KEY, transmission_id INTEGER, observer_id TEXT, observer_name TEXT,
|
||||
direction TEXT, snr REAL, rssi REAL, score INTEGER, path_json TEXT, timestamp TEXT, raw_hex TEXT
|
||||
)`)
|
||||
execOrFail(`CREATE TABLE IF NOT EXISTS observers (rowid INTEGER PRIMARY KEY, id TEXT, name TEXT)`)
|
||||
execOrFail(`CREATE TABLE IF NOT EXISTS nodes (
|
||||
pubkey TEXT PRIMARY KEY, name TEXT, role TEXT, lat REAL, lon REAL,
|
||||
last_seen TEXT, first_seen TEXT, frequency REAL
|
||||
)`)
|
||||
execOrFail(`CREATE TABLE IF NOT EXISTS schema_version (version INTEGER)`)
|
||||
execOrFail(`INSERT INTO schema_version (version) VALUES (1)`)
|
||||
execOrFail(`CREATE INDEX IF NOT EXISTS idx_tx_first_seen ON transmissions(first_seen)`)
|
||||
|
||||
txStmt, err := conn.Prepare("INSERT INTO transmissions (id, raw_hex, hash, first_seen, route_type, payload_type, payload_version, decoded_json) VALUES (?, ?, ?, ?, ?, ?, ?, ?)")
|
||||
if err != nil {
|
||||
tb.Fatalf("test DB prepare transmissions: %v", err)
|
||||
}
|
||||
obsStmt, err := conn.Prepare("INSERT INTO observations (id, transmission_id, observer_id, observer_name, direction, snr, rssi, score, path_json, timestamp) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)")
|
||||
if err != nil {
|
||||
tb.Fatalf("test DB prepare observations: %v", err)
|
||||
}
|
||||
defer txStmt.Close()
|
||||
defer obsStmt.Close()
|
||||
|
||||
observers := []string{"obs1", "obs2", "obs3", "obs4", "obs5"}
|
||||
obsNames := []string{"Alpha", "Bravo", "Charlie", "Delta", "Echo"}
|
||||
obsID := 1
|
||||
base := time.Date(2026, 1, 1, 0, 0, 0, 0, time.UTC)
|
||||
for i := 1; i <= numTx; i++ {
|
||||
ts := base.Add(time.Duration(i) * time.Minute).Format(time.RFC3339)
|
||||
hash := fmt.Sprintf("h%06d", i)
|
||||
txStmt.Exec(i, "aabb", hash, ts, 0, 4, 1, fmt.Sprintf(`{"pubKey":"pk%06d"}`, i))
|
||||
nObs := (i % 5) + 1 // 1–5 observations per transmission
|
||||
for j := 0; j < nObs; j++ {
|
||||
snr := -5.0 + float64(j)*2.5
|
||||
rssi := -90.0 + float64(j)*5.0
|
||||
obsStmt.Exec(obsID, i, observers[j], obsNames[j], "RX", snr, rssi, 5-j, `["aa","bb"]`, ts)
|
||||
obsID++
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -9,15 +9,14 @@ import (
|
||||
func newTestStore(t *testing.T) *PacketStore {
|
||||
t.Helper()
|
||||
return &PacketStore{
|
||||
rfCache: make(map[string]*cachedResult),
|
||||
topoCache: make(map[string]*cachedResult),
|
||||
hashCache: make(map[string]*cachedResult),
|
||||
collisionCache: make(map[string]*cachedResult),
|
||||
chanCache: make(map[string]*cachedResult),
|
||||
distCache: make(map[string]*cachedResult),
|
||||
subpathCache: make(map[string]*cachedResult),
|
||||
rfCacheTTL: 15 * time.Second,
|
||||
invCooldown: 10 * time.Second,
|
||||
rfCache: make(map[string]*cachedResult),
|
||||
topoCache: make(map[string]*cachedResult),
|
||||
hashCache: make(map[string]*cachedResult),
|
||||
chanCache: make(map[string]*cachedResult),
|
||||
distCache: make(map[string]*cachedResult),
|
||||
subpathCache: make(map[string]*cachedResult),
|
||||
rfCacheTTL: 15 * time.Second,
|
||||
invCooldown: 10 * time.Second,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -30,7 +29,6 @@ func populateAllCaches(s *PacketStore) {
|
||||
s.rfCache["global"] = dummy
|
||||
s.topoCache["global"] = dummy
|
||||
s.hashCache["global"] = dummy
|
||||
s.collisionCache["global"] = dummy
|
||||
s.chanCache["global"] = dummy
|
||||
s.distCache["global"] = dummy
|
||||
s.subpathCache["global"] = dummy
|
||||
@@ -41,13 +39,12 @@ func cachePopulated(s *PacketStore) map[string]bool {
|
||||
s.cacheMu.Lock()
|
||||
defer s.cacheMu.Unlock()
|
||||
return map[string]bool{
|
||||
"rf": len(s.rfCache) > 0,
|
||||
"topo": len(s.topoCache) > 0,
|
||||
"hash": len(s.hashCache) > 0,
|
||||
"collision": len(s.collisionCache) > 0,
|
||||
"chan": len(s.chanCache) > 0,
|
||||
"dist": len(s.distCache) > 0,
|
||||
"subpath": len(s.subpathCache) > 0,
|
||||
"rf": len(s.rfCache) > 0,
|
||||
"topo": len(s.topoCache) > 0,
|
||||
"hash": len(s.hashCache) > 0,
|
||||
"chan": len(s.chanCache) > 0,
|
||||
"dist": len(s.distCache) > 0,
|
||||
"subpath": len(s.subpathCache) > 0,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -93,8 +90,7 @@ func TestInvalidateCachesFor_NewTransmissionsOnly(t *testing.T) {
|
||||
if pop["hash"] {
|
||||
t.Error("hash cache should be cleared on new transmissions")
|
||||
}
|
||||
// collisionCache should NOT be cleared by transmissions alone (only by hasNewNodes)
|
||||
for _, name := range []string{"rf", "topo", "collision", "chan", "dist", "subpath"} {
|
||||
for _, name := range []string{"rf", "topo", "chan", "dist", "subpath"} {
|
||||
if !pop[name] {
|
||||
t.Errorf("%s cache should NOT be cleared on transmission-only ingest", name)
|
||||
}
|
||||
@@ -335,180 +331,3 @@ func BenchmarkCacheHitDuringIngestion(b *testing.B) {
|
||||
}
|
||||
b.ReportMetric(float64(hits)/float64(hits+misses)*100, "hit%")
|
||||
}
|
||||
|
||||
// TestInvCooldownFromConfig verifies that invalidationDebounce from config
|
||||
// is wired to invCooldown on PacketStore.
|
||||
func TestInvCooldownFromConfig(t *testing.T) {
|
||||
// Default without config
|
||||
ps := NewPacketStore(nil, nil)
|
||||
if ps.invCooldown != 300*time.Second {
|
||||
t.Errorf("default invCooldown = %v, want 300s", ps.invCooldown)
|
||||
}
|
||||
|
||||
// With config override
|
||||
ct := map[string]interface{}{"invalidationDebounce": float64(60)}
|
||||
ps2 := NewPacketStore(nil, nil, ct)
|
||||
if ps2.invCooldown != 60*time.Second {
|
||||
t.Errorf("configured invCooldown = %v, want 60s", ps2.invCooldown)
|
||||
}
|
||||
}
|
||||
|
||||
// TestCollisionCacheNotClearedByTransmissions verifies that collisionCache
|
||||
// is only cleared by hasNewNodes, not hasNewTransmissions (fixes #720).
|
||||
func TestCollisionCacheNotClearedByTransmissions(t *testing.T) {
|
||||
s := newTestStore(t)
|
||||
populateAllCaches(s)
|
||||
|
||||
s.invalidateCachesFor(cacheInvalidation{hasNewTransmissions: true})
|
||||
|
||||
pop := cachePopulated(s)
|
||||
if !pop["collision"] {
|
||||
t.Error("collisionCache should NOT be cleared by hasNewTransmissions alone")
|
||||
}
|
||||
if pop["hash"] {
|
||||
t.Error("hashCache should be cleared by hasNewTransmissions")
|
||||
}
|
||||
}
|
||||
|
||||
// TestCollisionCacheClearedByNewNodes verifies that collisionCache IS cleared
|
||||
// when genuinely new nodes are discovered.
|
||||
func TestCollisionCacheClearedByNewNodes(t *testing.T) {
|
||||
s := newTestStore(t)
|
||||
populateAllCaches(s)
|
||||
|
||||
s.invalidateCachesFor(cacheInvalidation{hasNewNodes: true})
|
||||
|
||||
pop := cachePopulated(s)
|
||||
if pop["collision"] {
|
||||
t.Error("collisionCache should be cleared by hasNewNodes")
|
||||
}
|
||||
// Other caches should survive
|
||||
for _, name := range []string{"rf", "topo", "hash", "chan", "dist", "subpath"} {
|
||||
if !pop[name] {
|
||||
t.Errorf("%s cache should NOT be cleared on new-nodes-only ingest", name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestCacheSurvivesMultipleIngestCyclesWithinCooldown verifies that caches
|
||||
// survive repeated ingest cycles during the cooldown period.
|
||||
func TestCacheSurvivesMultipleIngestCyclesWithinCooldown(t *testing.T) {
|
||||
s := newTestStore(t)
|
||||
s.invCooldown = 200 * time.Millisecond
|
||||
|
||||
// First invalidation goes through (starts cooldown)
|
||||
populateAllCaches(s)
|
||||
s.invalidateCachesFor(cacheInvalidation{hasNewObservations: true})
|
||||
pop := cachePopulated(s)
|
||||
if pop["rf"] {
|
||||
t.Error("rf should be cleared on first invalidation")
|
||||
}
|
||||
|
||||
// Repopulate and simulate 5 rapid ingest cycles
|
||||
populateAllCaches(s)
|
||||
for i := 0; i < 5; i++ {
|
||||
s.invalidateCachesFor(cacheInvalidation{
|
||||
hasNewObservations: true,
|
||||
hasNewTransmissions: true,
|
||||
hasNewPaths: true,
|
||||
})
|
||||
}
|
||||
|
||||
// All caches should survive during cooldown
|
||||
pop = cachePopulated(s)
|
||||
for name, has := range pop {
|
||||
if !has {
|
||||
t.Errorf("%s cache should survive during cooldown period (ingest cycle %d)", name, 5)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestNewNodesAccumulatedDuringCooldown verifies that hasNewNodes flags
|
||||
// accumulated during cooldown are applied when cooldown expires.
|
||||
func TestNewNodesAccumulatedDuringCooldown(t *testing.T) {
|
||||
s := newTestStore(t)
|
||||
s.invCooldown = 100 * time.Millisecond
|
||||
|
||||
// First call starts cooldown
|
||||
s.invalidateCachesFor(cacheInvalidation{hasNewObservations: true})
|
||||
|
||||
// During cooldown, accumulate hasNewNodes
|
||||
s.invalidateCachesFor(cacheInvalidation{hasNewNodes: true})
|
||||
|
||||
// Verify accumulated
|
||||
s.cacheMu.Lock()
|
||||
if s.pendingInv == nil || !s.pendingInv.hasNewNodes {
|
||||
t.Error("hasNewNodes should be accumulated in pendingInv")
|
||||
}
|
||||
s.cacheMu.Unlock()
|
||||
|
||||
// Wait for cooldown
|
||||
time.Sleep(150 * time.Millisecond)
|
||||
|
||||
// Trigger flush
|
||||
populateAllCaches(s)
|
||||
s.invalidateCachesFor(cacheInvalidation{})
|
||||
|
||||
pop := cachePopulated(s)
|
||||
if pop["collision"] {
|
||||
t.Error("collisionCache should be cleared after pending hasNewNodes is flushed")
|
||||
}
|
||||
}
|
||||
|
||||
// BenchmarkAnalyticsLatencyCacheHitVsMiss benchmarks cache hit vs miss
|
||||
// for analytics endpoints to demonstrate the performance impact.
|
||||
func BenchmarkAnalyticsLatencyCacheHitVsMiss(b *testing.B) {
|
||||
s := &PacketStore{
|
||||
rfCache: make(map[string]*cachedResult),
|
||||
topoCache: make(map[string]*cachedResult),
|
||||
hashCache: make(map[string]*cachedResult),
|
||||
collisionCache: make(map[string]*cachedResult),
|
||||
chanCache: make(map[string]*cachedResult),
|
||||
distCache: make(map[string]*cachedResult),
|
||||
subpathCache: make(map[string]*cachedResult),
|
||||
rfCacheTTL: 1800 * time.Second,
|
||||
invCooldown: 300 * time.Second,
|
||||
}
|
||||
|
||||
// Pre-populate cache
|
||||
s.cacheMu.Lock()
|
||||
s.rfCache["global"] = &cachedResult{
|
||||
data: map[string]interface{}{"bins": make([]int, 100)},
|
||||
expiresAt: time.Now().Add(time.Hour),
|
||||
}
|
||||
s.cacheMu.Unlock()
|
||||
|
||||
// Trigger initial invalidation to start cooldown
|
||||
s.invalidateCachesFor(cacheInvalidation{hasNewObservations: true})
|
||||
|
||||
var hits, misses int64
|
||||
for i := 0; i < b.N; i++ {
|
||||
// Re-populate (simulates query filling cache)
|
||||
s.cacheMu.Lock()
|
||||
if len(s.rfCache) == 0 {
|
||||
s.rfCache["global"] = &cachedResult{
|
||||
data: map[string]interface{}{"bins": make([]int, 100)},
|
||||
expiresAt: time.Now().Add(time.Hour),
|
||||
}
|
||||
}
|
||||
s.cacheMu.Unlock()
|
||||
|
||||
// Simulate ingest (rate-limited)
|
||||
s.invalidateCachesFor(cacheInvalidation{hasNewObservations: true})
|
||||
|
||||
// Check hit
|
||||
s.cacheMu.Lock()
|
||||
if len(s.rfCache) > 0 {
|
||||
hits++
|
||||
} else {
|
||||
misses++
|
||||
}
|
||||
s.cacheMu.Unlock()
|
||||
}
|
||||
|
||||
hitRate := float64(hits) / float64(hits+misses) * 100
|
||||
b.ReportMetric(hitRate, "hit%")
|
||||
if hitRate < 50 {
|
||||
b.Errorf("hit rate %.1f%% is below 50%% target", hitRate)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,57 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// TestPacketsChannelFilter verifies /api/packets?channel=... actually filters
|
||||
// (regression test for #812).
|
||||
func TestPacketsChannelFilter(t *testing.T) {
|
||||
_, router := setupTestServer(t)
|
||||
|
||||
get := func(url string) map[string]interface{} {
|
||||
req := httptest.NewRequest("GET", url, nil)
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("GET %s: expected 200, got %d", url, w.Code)
|
||||
}
|
||||
var body map[string]interface{}
|
||||
if err := json.Unmarshal(w.Body.Bytes(), &body); err != nil {
|
||||
t.Fatalf("decode %s: %v", url, err)
|
||||
}
|
||||
return body
|
||||
}
|
||||
|
||||
all := get("/api/packets?limit=50")
|
||||
allTotal := int(all["total"].(float64))
|
||||
if allTotal < 2 {
|
||||
t.Fatalf("expected baseline >= 2 packets, got %d", allTotal)
|
||||
}
|
||||
|
||||
test := get("/api/packets?limit=50&channel=%23test")
|
||||
testTotal := int(test["total"].(float64))
|
||||
if testTotal == 0 {
|
||||
t.Fatalf("channel=#test: expected >= 1 match, got 0 (filter ignored?)")
|
||||
}
|
||||
if testTotal >= allTotal {
|
||||
t.Fatalf("channel=#test: expected fewer packets than baseline (%d), got %d", allTotal, testTotal)
|
||||
}
|
||||
|
||||
// Every returned packet must be a CHAN/GRP_TXT (payload_type=5) on #test.
|
||||
pkts, _ := test["packets"].([]interface{})
|
||||
for _, p := range pkts {
|
||||
m := p.(map[string]interface{})
|
||||
if pt, _ := m["payload_type"].(float64); int(pt) != 5 {
|
||||
t.Errorf("channel=#test: returned non-GRP_TXT packet (payload_type=%v)", m["payload_type"])
|
||||
}
|
||||
}
|
||||
|
||||
none := get("/api/packets?limit=50&channel=nonexistentchannel")
|
||||
if int(none["total"].(float64)) != 0 {
|
||||
t.Fatalf("channel=nonexistentchannel: expected total=0, got %v", none["total"])
|
||||
}
|
||||
}
|
||||
@@ -1,748 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"math"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// ── Clock Skew Severity ────────────────────────────────────────────────────────
|
||||
|
||||
type SkewSeverity string
|
||||
|
||||
const (
|
||||
SkewOK SkewSeverity = "ok" // < 5 min
|
||||
SkewWarning SkewSeverity = "warning" // 5 min – 1 hour
|
||||
SkewCritical SkewSeverity = "critical" // 1 hour – 30 days
|
||||
SkewAbsurd SkewSeverity = "absurd" // > 30 days
|
||||
SkewNoClock SkewSeverity = "no_clock" // > 365 days — uninitialized RTC
|
||||
SkewBimodalClock SkewSeverity = "bimodal_clock" // mixed good+bad recent samples (flaky RTC)
|
||||
)
|
||||
|
||||
// Default thresholds in seconds.
|
||||
const (
|
||||
skewThresholdWarnSec = 5 * 60 // 5 minutes
|
||||
skewThresholdCriticalSec = 60 * 60 // 1 hour
|
||||
skewThresholdAbsurdSec = 30 * 24 * 3600 // 30 days
|
||||
skewThresholdNoClockSec = 365 * 24 * 3600 // 365 days — uninitialized RTC
|
||||
|
||||
// minDriftSamples is the minimum number of advert transmissions needed
|
||||
// to compute a meaningful linear drift rate.
|
||||
minDriftSamples = 5
|
||||
|
||||
// maxReasonableDriftPerDay caps drift display. Physically impossible
|
||||
// drift rates (> 1 day/day) indicate insufficient or outlier samples.
|
||||
maxReasonableDriftPerDay = 86400.0
|
||||
|
||||
// recentSkewWindowCount is the number of most-recent advert samples
|
||||
// used to derive the "current" skew for severity classification (see
|
||||
// issue #789). The all-time median is poisoned by historical bad
|
||||
// samples (e.g. a node that was off and then GPS-corrected); severity
|
||||
// must reflect current health, not lifetime statistics.
|
||||
recentSkewWindowCount = 5
|
||||
|
||||
// recentSkewWindowSec bounds the recent-window in time as well: only
|
||||
// samples from the last N seconds count as "recent" for severity.
|
||||
// The effective window is min(recentSkewWindowCount, samples in 1h).
|
||||
recentSkewWindowSec = 3600
|
||||
|
||||
// bimodalSkewThresholdSec is the absolute skew threshold (1 hour)
|
||||
// above which a sample is considered "bad" — likely firmware emitting
|
||||
// a nonsense timestamp from an uninitialized RTC, not real drift.
|
||||
// Chosen to match the warning/critical severity boundary: real clock
|
||||
// drift rarely exceeds 1 hour, while epoch-0 RTCs produce ~1.7B sec.
|
||||
bimodalSkewThresholdSec = 3600.0
|
||||
|
||||
// maxPlausibleSkewJumpSec is the largest skew change between
|
||||
// consecutive samples that we treat as physical drift. Anything larger
|
||||
// (e.g. a GPS sync that jumps the clock by minutes/days) is rejected
|
||||
// as an outlier when computing drift. Real microcontroller drift is
|
||||
// fractions of a second per advert; 60s is a generous safety factor.
|
||||
maxPlausibleSkewJumpSec = 60.0
|
||||
|
||||
// theilSenMaxPoints caps the number of points fed to Theil-Sen
|
||||
// regression (O(n²) in pairs). For nodes with thousands of samples we
|
||||
// keep the most-recent points, which are also the most relevant for
|
||||
// current drift.
|
||||
theilSenMaxPoints = 200
|
||||
)
|
||||
|
||||
// classifySkew maps absolute skew (seconds) to a severity level.
|
||||
// Float64 comparison is safe: inputs are rounded to 1 decimal via round(),
|
||||
// and thresholds are integer multiples of 60 — no rounding artifacts.
|
||||
func classifySkew(absSkewSec float64) SkewSeverity {
|
||||
switch {
|
||||
case absSkewSec >= skewThresholdNoClockSec:
|
||||
return SkewNoClock
|
||||
case absSkewSec >= skewThresholdAbsurdSec:
|
||||
return SkewAbsurd
|
||||
case absSkewSec >= skewThresholdCriticalSec:
|
||||
return SkewCritical
|
||||
case absSkewSec >= skewThresholdWarnSec:
|
||||
return SkewWarning
|
||||
default:
|
||||
return SkewOK
|
||||
}
|
||||
}
|
||||
|
||||
// ── Data Types ─────────────────────────────────────────────────────────────────
|
||||
|
||||
// skewSample is a single raw skew measurement from one advert observation.
|
||||
type skewSample struct {
|
||||
advertTS int64 // node's advert Unix timestamp
|
||||
observedTS int64 // observation Unix timestamp
|
||||
observerID string // which observer saw this
|
||||
hash string // transmission hash (for multi-observer grouping)
|
||||
}
|
||||
|
||||
// ObserverCalibration holds the computed clock offset for an observer.
|
||||
type ObserverCalibration struct {
|
||||
ObserverID string `json:"observerID"`
|
||||
OffsetSec float64 `json:"offsetSec"` // positive = observer clock ahead
|
||||
Samples int `json:"samples"` // number of multi-observer packets used
|
||||
}
|
||||
|
||||
// NodeClockSkew is the API response for a single node's clock skew data.
|
||||
type NodeClockSkew struct {
|
||||
Pubkey string `json:"pubkey"`
|
||||
MeanSkewSec float64 `json:"meanSkewSec"` // corrected mean skew (positive = node ahead)
|
||||
MedianSkewSec float64 `json:"medianSkewSec"` // corrected median skew
|
||||
LastSkewSec float64 `json:"lastSkewSec"` // most recent corrected skew
|
||||
RecentMedianSkewSec float64 `json:"recentMedianSkewSec"` // median across most-recent samples (drives severity, see #789)
|
||||
DriftPerDaySec float64 `json:"driftPerDaySec"` // linear drift rate (sec/day)
|
||||
Severity SkewSeverity `json:"severity"`
|
||||
SampleCount int `json:"sampleCount"`
|
||||
Calibrated bool `json:"calibrated"` // true if observer calibration was applied
|
||||
LastAdvertTS int64 `json:"lastAdvertTS"` // most recent advert timestamp
|
||||
LastObservedTS int64 `json:"lastObservedTS"` // most recent observation timestamp
|
||||
Samples []SkewSample `json:"samples,omitempty"` // time-series for sparklines
|
||||
GoodFraction float64 `json:"goodFraction"` // fraction of recent samples with |skew| <= 1h
|
||||
RecentBadSampleCount int `json:"recentBadSampleCount"` // count of recent samples with |skew| > 1h
|
||||
RecentSampleCount int `json:"recentSampleCount"` // total recent samples in window
|
||||
NodeName string `json:"nodeName,omitempty"` // populated in fleet responses
|
||||
NodeRole string `json:"nodeRole,omitempty"` // populated in fleet responses
|
||||
}
|
||||
|
||||
// SkewSample is a single (timestamp, skew) point for sparkline rendering.
|
||||
type SkewSample struct {
|
||||
Timestamp int64 `json:"ts"` // Unix epoch of observation
|
||||
SkewSec float64 `json:"skew"` // corrected skew in seconds
|
||||
}
|
||||
|
||||
// txSkewResult maps tx hash → per-transmission skew stats. This is an
|
||||
// intermediate result keyed by hash (not pubkey); the store maps hash → pubkey
|
||||
// when building the final per-node view.
|
||||
type txSkewResult = map[string]*NodeClockSkew
|
||||
|
||||
// ── Clock Skew Engine ──────────────────────────────────────────────────────────
|
||||
|
||||
// ClockSkewEngine computes and caches clock skew data for nodes and observers.
|
||||
type ClockSkewEngine struct {
|
||||
mu sync.RWMutex
|
||||
observerOffsets map[string]float64 // observerID → calibrated offset (seconds)
|
||||
observerSamples map[string]int // observerID → number of multi-observer packets used
|
||||
nodeSkew txSkewResult
|
||||
lastComputed time.Time
|
||||
computeInterval time.Duration
|
||||
}
|
||||
|
||||
func NewClockSkewEngine() *ClockSkewEngine {
|
||||
return &ClockSkewEngine{
|
||||
observerOffsets: make(map[string]float64),
|
||||
observerSamples: make(map[string]int),
|
||||
nodeSkew: make(txSkewResult),
|
||||
computeInterval: 30 * time.Second,
|
||||
}
|
||||
}
|
||||
|
||||
// Recompute recalculates all clock skew data from the packet store.
|
||||
// Called periodically or on demand. Holds store RLock externally.
|
||||
// Uses read-copy-update: heavy computation runs outside the write lock,
|
||||
// then results are swapped in under a brief lock.
|
||||
func (e *ClockSkewEngine) Recompute(store *PacketStore) {
|
||||
// Fast path: check under read lock if recompute is needed.
|
||||
e.mu.RLock()
|
||||
fresh := time.Since(e.lastComputed) < e.computeInterval
|
||||
e.mu.RUnlock()
|
||||
if fresh {
|
||||
return
|
||||
}
|
||||
|
||||
// Phase 1: Collect skew samples from ADVERT packets (store RLock held by caller).
|
||||
samples := collectSamples(store)
|
||||
|
||||
// Phase 2–3: Compute outside the write lock.
|
||||
var newOffsets map[string]float64
|
||||
var newSamples map[string]int
|
||||
var newNodeSkew txSkewResult
|
||||
|
||||
if len(samples) > 0 {
|
||||
newOffsets, newSamples = calibrateObservers(samples)
|
||||
newNodeSkew = computeNodeSkew(samples, newOffsets)
|
||||
} else {
|
||||
newOffsets = make(map[string]float64)
|
||||
newSamples = make(map[string]int)
|
||||
newNodeSkew = make(txSkewResult)
|
||||
}
|
||||
|
||||
// Swap results under brief write lock.
|
||||
e.mu.Lock()
|
||||
// Re-check: another goroutine may have computed while we were working.
|
||||
if time.Since(e.lastComputed) < e.computeInterval {
|
||||
e.mu.Unlock()
|
||||
return
|
||||
}
|
||||
e.observerOffsets = newOffsets
|
||||
e.observerSamples = newSamples
|
||||
e.nodeSkew = newNodeSkew
|
||||
e.lastComputed = time.Now()
|
||||
e.mu.Unlock()
|
||||
}
|
||||
|
||||
// collectSamples extracts skew samples from ADVERT packets in the store.
|
||||
// Must be called with store.mu held (at least RLock).
|
||||
func collectSamples(store *PacketStore) []skewSample {
|
||||
adverts := store.byPayloadType[PayloadADVERT]
|
||||
if len(adverts) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
samples := make([]skewSample, 0, len(adverts)*2)
|
||||
for _, tx := range adverts {
|
||||
decoded := tx.ParsedDecoded()
|
||||
if decoded == nil {
|
||||
continue
|
||||
}
|
||||
// Extract advert timestamp from decoded JSON.
|
||||
advertTS := extractTimestamp(decoded)
|
||||
if advertTS <= 0 {
|
||||
continue
|
||||
}
|
||||
// Sanity: skip timestamps before year 2020 or after year 2100.
|
||||
if advertTS < 1577836800 || advertTS > 4102444800 {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, obs := range tx.Observations {
|
||||
obsTS := parseISO(obs.Timestamp)
|
||||
if obsTS <= 0 {
|
||||
continue
|
||||
}
|
||||
samples = append(samples, skewSample{
|
||||
advertTS: advertTS,
|
||||
observedTS: obsTS,
|
||||
observerID: obs.ObserverID,
|
||||
hash: tx.Hash,
|
||||
})
|
||||
}
|
||||
}
|
||||
return samples
|
||||
}
|
||||
|
||||
// extractTimestamp gets the Unix timestamp from a decoded ADVERT payload.
|
||||
func extractTimestamp(decoded map[string]interface{}) int64 {
|
||||
// Try payload.timestamp first (nested in "payload" key).
|
||||
if payload, ok := decoded["payload"]; ok {
|
||||
if pm, ok := payload.(map[string]interface{}); ok {
|
||||
if ts := jsonNumber(pm, "timestamp"); ts > 0 {
|
||||
return ts
|
||||
}
|
||||
}
|
||||
}
|
||||
// Fallback: top-level timestamp.
|
||||
if ts := jsonNumber(decoded, "timestamp"); ts > 0 {
|
||||
return ts
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// jsonNumber extracts an int64 from a JSON-parsed map (handles float64 and json.Number).
|
||||
func jsonNumber(m map[string]interface{}, key string) int64 {
|
||||
v, ok := m[key]
|
||||
if !ok || v == nil {
|
||||
return 0
|
||||
}
|
||||
switch n := v.(type) {
|
||||
case float64:
|
||||
return int64(n)
|
||||
case int64:
|
||||
return n
|
||||
case int:
|
||||
return int64(n)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// parseISO parses an ISO 8601 timestamp string to Unix seconds.
|
||||
func parseISO(s string) int64 {
|
||||
if s == "" {
|
||||
return 0
|
||||
}
|
||||
t, err := time.Parse(time.RFC3339, s)
|
||||
if err != nil {
|
||||
// Try with fractional seconds.
|
||||
t, err = time.Parse("2006-01-02T15:04:05.999999999Z07:00", s)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
}
|
||||
return t.Unix()
|
||||
}
|
||||
|
||||
// ── Phase 2: Observer Calibration ──────────────────────────────────────────────
|
||||
|
||||
// calibrateObservers computes each observer's clock offset using multi-observer
|
||||
// packets. Returns offset map and sample count map.
|
||||
func calibrateObservers(samples []skewSample) (map[string]float64, map[string]int) {
|
||||
// Group observations by packet hash.
|
||||
byHash := make(map[string][]skewSample)
|
||||
for _, s := range samples {
|
||||
byHash[s.hash] = append(byHash[s.hash], s)
|
||||
}
|
||||
|
||||
// For each multi-observer packet, compute per-observer deviation from median.
|
||||
deviations := make(map[string][]float64) // observerID → list of deviations
|
||||
for _, group := range byHash {
|
||||
if len(group) < 2 {
|
||||
continue // single-observer packet, can't calibrate
|
||||
}
|
||||
// Compute median observation timestamp for this packet.
|
||||
obsTimes := make([]float64, len(group))
|
||||
for i, s := range group {
|
||||
obsTimes[i] = float64(s.observedTS)
|
||||
}
|
||||
medianObs := median(obsTimes)
|
||||
for _, s := range group {
|
||||
dev := float64(s.observedTS) - medianObs
|
||||
deviations[s.observerID] = append(deviations[s.observerID], dev)
|
||||
}
|
||||
}
|
||||
|
||||
// Each observer's offset = median of its deviations.
|
||||
offsets := make(map[string]float64, len(deviations))
|
||||
counts := make(map[string]int, len(deviations))
|
||||
for obsID, devs := range deviations {
|
||||
offsets[obsID] = median(devs)
|
||||
counts[obsID] = len(devs)
|
||||
}
|
||||
return offsets, counts
|
||||
}
|
||||
|
||||
// ── Phase 3: Per-Node Skew ─────────────────────────────────────────────────────
|
||||
|
||||
// computeNodeSkew calculates corrected skew statistics for each node.
|
||||
func computeNodeSkew(samples []skewSample, obsOffsets map[string]float64) txSkewResult {
|
||||
// Compute corrected skew per sample, grouped by hash (each hash = one
|
||||
// node's advert transmission). The caller maps hash → pubkey via byNode.
|
||||
type correctedSample struct {
|
||||
skew float64
|
||||
observedTS int64
|
||||
calibrated bool
|
||||
}
|
||||
|
||||
byHash := make(map[string][]correctedSample)
|
||||
hashAdvertTS := make(map[string]int64)
|
||||
|
||||
for _, s := range samples {
|
||||
obsOffset, hasCal := obsOffsets[s.observerID]
|
||||
rawSkew := float64(s.advertTS - s.observedTS)
|
||||
corrected := rawSkew
|
||||
if hasCal {
|
||||
// Observer offset = obs_ts - median(all_obs_ts). If observer is ahead,
|
||||
// its obs_ts is inflated, making raw_skew too low. Add offset to correct.
|
||||
corrected = rawSkew + obsOffset
|
||||
}
|
||||
byHash[s.hash] = append(byHash[s.hash], correctedSample{
|
||||
skew: corrected,
|
||||
observedTS: s.observedTS,
|
||||
calibrated: hasCal,
|
||||
})
|
||||
hashAdvertTS[s.hash] = s.advertTS
|
||||
}
|
||||
|
||||
// Each hash represents one advert from one node. Compute median corrected
|
||||
// skew per hash (across multiple observers).
|
||||
|
||||
result := make(map[string]*NodeClockSkew) // keyed by hash for now
|
||||
for hash, cs := range byHash {
|
||||
skews := make([]float64, len(cs))
|
||||
for i, c := range cs {
|
||||
skews[i] = c.skew
|
||||
}
|
||||
medSkew := median(skews)
|
||||
meanSkew := mean(skews)
|
||||
|
||||
// Find latest observation.
|
||||
var latestObsTS int64
|
||||
var anyCal bool
|
||||
for _, c := range cs {
|
||||
if c.observedTS > latestObsTS {
|
||||
latestObsTS = c.observedTS
|
||||
}
|
||||
if c.calibrated {
|
||||
anyCal = true
|
||||
}
|
||||
}
|
||||
|
||||
absMedian := math.Abs(medSkew)
|
||||
result[hash] = &NodeClockSkew{
|
||||
MeanSkewSec: round(meanSkew, 1),
|
||||
MedianSkewSec: round(medSkew, 1),
|
||||
LastSkewSec: round(cs[len(cs)-1].skew, 1),
|
||||
Severity: classifySkew(absMedian),
|
||||
SampleCount: len(cs),
|
||||
Calibrated: anyCal,
|
||||
LastAdvertTS: hashAdvertTS[hash],
|
||||
LastObservedTS: latestObsTS,
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// ── Integration with PacketStore ───────────────────────────────────────────────
|
||||
|
||||
// GetNodeClockSkew returns the clock skew data for a specific node (acquires RLock).
|
||||
func (s *PacketStore) GetNodeClockSkew(pubkey string) *NodeClockSkew {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
return s.getNodeClockSkewLocked(pubkey)
|
||||
}
|
||||
|
||||
// getNodeClockSkewLocked returns clock skew for a node.
|
||||
// Must be called with s.mu held (at least RLock).
|
||||
func (s *PacketStore) getNodeClockSkewLocked(pubkey string) *NodeClockSkew {
|
||||
s.clockSkew.Recompute(s)
|
||||
|
||||
txs := s.byNode[pubkey]
|
||||
if len(txs) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
s.clockSkew.mu.RLock()
|
||||
defer s.clockSkew.mu.RUnlock()
|
||||
|
||||
var allSkews []float64
|
||||
var lastSkew float64
|
||||
var lastObsTS, lastAdvTS int64
|
||||
var totalSamples int
|
||||
var anyCal bool
|
||||
var tsSkews []tsSkewPair
|
||||
|
||||
for _, tx := range txs {
|
||||
if tx.PayloadType == nil || *tx.PayloadType != PayloadADVERT {
|
||||
continue
|
||||
}
|
||||
cs, ok := s.clockSkew.nodeSkew[tx.Hash]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
allSkews = append(allSkews, cs.MedianSkewSec)
|
||||
totalSamples += cs.SampleCount
|
||||
if cs.Calibrated {
|
||||
anyCal = true
|
||||
}
|
||||
if cs.LastObservedTS > lastObsTS {
|
||||
lastObsTS = cs.LastObservedTS
|
||||
lastSkew = cs.LastSkewSec
|
||||
lastAdvTS = cs.LastAdvertTS
|
||||
}
|
||||
tsSkews = append(tsSkews, tsSkewPair{ts: cs.LastObservedTS, skew: cs.MedianSkewSec})
|
||||
}
|
||||
|
||||
if len(allSkews) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
medSkew := median(allSkews)
|
||||
meanSkew := mean(allSkews)
|
||||
|
||||
// Severity is derived from RECENT samples only (issue #789). The
|
||||
// all-time median is poisoned by historical bad data — a node that
|
||||
// was off for hours and then GPS-corrected can have median = -59M sec
|
||||
// while its current skew is -0.8s. Operators need severity to reflect
|
||||
// current health, so they trust the dashboard.
|
||||
//
|
||||
// Sort tsSkews by time and take the last recentSkewWindowCount samples
|
||||
// (or all samples within recentSkewWindowSec of the latest, whichever
|
||||
// gives FEWER samples — we want the more-current view; a chatty node
|
||||
// can fit dozens of samples in 1h, in which case the count cap wins).
|
||||
sort.Slice(tsSkews, func(i, j int) bool { return tsSkews[i].ts < tsSkews[j].ts })
|
||||
|
||||
recentSkew := lastSkew
|
||||
var recentVals []float64
|
||||
if n := len(tsSkews); n > 0 {
|
||||
latestTS := tsSkews[n-1].ts
|
||||
// Index-based window: last K samples.
|
||||
startByCount := n - recentSkewWindowCount
|
||||
if startByCount < 0 {
|
||||
startByCount = 0
|
||||
}
|
||||
// Time-based window: samples newer than latestTS - windowSec.
|
||||
startByTime := n - 1
|
||||
for i := n - 1; i >= 0; i-- {
|
||||
if latestTS-tsSkews[i].ts <= recentSkewWindowSec {
|
||||
startByTime = i
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
// Pick the narrower (larger-index) of the two windows — the most
|
||||
// current view of the node's clock health.
|
||||
start := startByCount
|
||||
if startByTime > start {
|
||||
start = startByTime
|
||||
}
|
||||
recentVals = make([]float64, 0, n-start)
|
||||
for i := start; i < n; i++ {
|
||||
recentVals = append(recentVals, tsSkews[i].skew)
|
||||
}
|
||||
if len(recentVals) > 0 {
|
||||
recentSkew = median(recentVals)
|
||||
}
|
||||
}
|
||||
|
||||
// ── Bimodal detection (#845) ─────────────────────────────────────────
|
||||
// Split recent samples into "good" (|skew| <= 1h, real clock) and
|
||||
// "bad" (|skew| > 1h, firmware nonsense from uninitialized RTC).
|
||||
// Classification order (first match wins):
|
||||
// no_clock — goodFraction < 0.10 (essentially no real clock)
|
||||
// bimodal_clock — 0.10 <= goodFraction < 0.80 AND badCount > 0
|
||||
// ok/warn/etc. — goodFraction >= 0.80 (normal, outliers filtered)
|
||||
var goodSamples []float64
|
||||
for _, v := range recentVals {
|
||||
if math.Abs(v) <= bimodalSkewThresholdSec {
|
||||
goodSamples = append(goodSamples, v)
|
||||
}
|
||||
}
|
||||
recentSampleCount := len(recentVals)
|
||||
recentBadCount := recentSampleCount - len(goodSamples)
|
||||
var goodFraction float64
|
||||
if recentSampleCount > 0 {
|
||||
goodFraction = float64(len(goodSamples)) / float64(recentSampleCount)
|
||||
}
|
||||
|
||||
var severity SkewSeverity
|
||||
if goodFraction < 0.10 {
|
||||
// Essentially no real clock — classify as no_clock regardless
|
||||
// of the raw skew magnitude.
|
||||
severity = SkewNoClock
|
||||
} else if goodFraction < 0.80 && recentBadCount > 0 {
|
||||
// Bimodal: use median of GOOD samples as the "real" skew.
|
||||
severity = SkewBimodalClock
|
||||
if len(goodSamples) > 0 {
|
||||
recentSkew = median(goodSamples)
|
||||
}
|
||||
} else {
|
||||
// Normal path: if there are good samples, use their median
|
||||
// (filters out rare outliers in ≥80% good case).
|
||||
if len(goodSamples) > 0 && recentBadCount > 0 {
|
||||
recentSkew = median(goodSamples)
|
||||
}
|
||||
severity = classifySkew(math.Abs(recentSkew))
|
||||
}
|
||||
|
||||
// For no_clock / bimodal_clock nodes, skip drift when data is unreliable.
|
||||
var drift float64
|
||||
if severity != SkewNoClock && severity != SkewBimodalClock && len(tsSkews) >= minDriftSamples {
|
||||
drift = computeDrift(tsSkews)
|
||||
// Cap physically impossible drift rates.
|
||||
if math.Abs(drift) > maxReasonableDriftPerDay {
|
||||
drift = 0
|
||||
}
|
||||
}
|
||||
|
||||
// Build sparkline samples from tsSkews (already sorted by time above).
|
||||
samples := make([]SkewSample, len(tsSkews))
|
||||
for i, p := range tsSkews {
|
||||
samples[i] = SkewSample{Timestamp: p.ts, SkewSec: round(p.skew, 1)}
|
||||
}
|
||||
|
||||
return &NodeClockSkew{
|
||||
Pubkey: pubkey,
|
||||
MeanSkewSec: round(meanSkew, 1),
|
||||
MedianSkewSec: round(medSkew, 1),
|
||||
LastSkewSec: round(lastSkew, 1),
|
||||
RecentMedianSkewSec: round(recentSkew, 1),
|
||||
DriftPerDaySec: round(drift, 2),
|
||||
Severity: severity,
|
||||
SampleCount: totalSamples,
|
||||
Calibrated: anyCal,
|
||||
LastAdvertTS: lastAdvTS,
|
||||
LastObservedTS: lastObsTS,
|
||||
Samples: samples,
|
||||
GoodFraction: round(goodFraction, 2),
|
||||
RecentBadSampleCount: recentBadCount,
|
||||
RecentSampleCount: recentSampleCount,
|
||||
}
|
||||
}
|
||||
|
||||
// GetFleetClockSkew returns clock skew data for all nodes that have skew data.
|
||||
// Must NOT be called with s.mu held.
|
||||
func (s *PacketStore) GetFleetClockSkew() []*NodeClockSkew {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
|
||||
// Build name/role lookup from DB cache (requires s.mu held).
|
||||
allNodes, _ := s.getCachedNodesAndPM()
|
||||
nameMap := make(map[string]nodeInfo, len(allNodes))
|
||||
for _, ni := range allNodes {
|
||||
nameMap[ni.PublicKey] = ni
|
||||
}
|
||||
|
||||
var results []*NodeClockSkew
|
||||
for pubkey := range s.byNode {
|
||||
cs := s.getNodeClockSkewLocked(pubkey)
|
||||
if cs == nil {
|
||||
continue
|
||||
}
|
||||
// Enrich with node name/role.
|
||||
if ni, ok := nameMap[pubkey]; ok {
|
||||
cs.NodeName = ni.Name
|
||||
cs.NodeRole = ni.Role
|
||||
}
|
||||
// Omit samples in fleet response (too much data).
|
||||
cs.Samples = nil
|
||||
results = append(results, cs)
|
||||
}
|
||||
return results
|
||||
}
|
||||
|
||||
// GetObserverCalibrations returns the current observer clock offsets.
|
||||
func (s *PacketStore) GetObserverCalibrations() []ObserverCalibration {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
|
||||
s.clockSkew.Recompute(s)
|
||||
|
||||
s.clockSkew.mu.RLock()
|
||||
defer s.clockSkew.mu.RUnlock()
|
||||
|
||||
result := make([]ObserverCalibration, 0, len(s.clockSkew.observerOffsets))
|
||||
for obsID, offset := range s.clockSkew.observerOffsets {
|
||||
result = append(result, ObserverCalibration{
|
||||
ObserverID: obsID,
|
||||
OffsetSec: round(offset, 1),
|
||||
Samples: s.clockSkew.observerSamples[obsID],
|
||||
})
|
||||
}
|
||||
// Sort by absolute offset descending.
|
||||
sort.Slice(result, func(i, j int) bool {
|
||||
return math.Abs(result[i].OffsetSec) > math.Abs(result[j].OffsetSec)
|
||||
})
|
||||
return result
|
||||
}
|
||||
|
||||
// ── Math Helpers ───────────────────────────────────────────────────────────────
|
||||
|
||||
func median(vals []float64) float64 {
|
||||
if len(vals) == 0 {
|
||||
return 0
|
||||
}
|
||||
sorted := make([]float64, len(vals))
|
||||
copy(sorted, vals)
|
||||
sort.Float64s(sorted)
|
||||
n := len(sorted)
|
||||
if n%2 == 0 {
|
||||
return (sorted[n/2-1] + sorted[n/2]) / 2
|
||||
}
|
||||
return sorted[n/2]
|
||||
}
|
||||
|
||||
func mean(vals []float64) float64 {
|
||||
if len(vals) == 0 {
|
||||
return 0
|
||||
}
|
||||
sum := 0.0
|
||||
for _, v := range vals {
|
||||
sum += v
|
||||
}
|
||||
return sum / float64(len(vals))
|
||||
}
|
||||
|
||||
// tsSkewPair is a (timestamp, skew) pair for drift estimation.
|
||||
type tsSkewPair struct {
|
||||
ts int64
|
||||
skew float64
|
||||
}
|
||||
|
||||
// computeDrift estimates linear drift in seconds per day from time-ordered
|
||||
// (timestamp, skew) pairs. Issue #789: a single GPS-correction event (huge
|
||||
// skew jump in seconds) used to dominate ordinary least squares and produce
|
||||
// absurd drift like 1.7M sec/day. We now:
|
||||
//
|
||||
// 1. Drop pairs whose consecutive skew jump exceeds maxPlausibleSkewJumpSec
|
||||
// (clock corrections, not physical drift). This protects both OLS-style
|
||||
// consumers and Theil-Sen.
|
||||
// 2. Use Theil-Sen regression — the slope is the median of all pairwise
|
||||
// slopes, naturally robust to remaining outliers (breakdown point ~29%).
|
||||
//
|
||||
// For very small samples after filtering we fall back to a simple slope
|
||||
// between first and last calibrated samples.
|
||||
func computeDrift(pairs []tsSkewPair) float64 {
|
||||
if len(pairs) < 2 {
|
||||
return 0
|
||||
}
|
||||
// Sort by timestamp.
|
||||
sort.Slice(pairs, func(i, j int) bool {
|
||||
return pairs[i].ts < pairs[j].ts
|
||||
})
|
||||
|
||||
// Time span too short? Skip.
|
||||
spanSec := float64(pairs[len(pairs)-1].ts - pairs[0].ts)
|
||||
if spanSec < 3600 { // need at least 1 hour of data
|
||||
return 0
|
||||
}
|
||||
|
||||
// Outlier filter: drop samples where the skew jumps more than
|
||||
// maxPlausibleSkewJumpSec from the running "stable" baseline.
|
||||
// We anchor on the first sample, then accept each subsequent point
|
||||
// that's within the threshold of the most recent accepted point —
|
||||
// this preserves a slow drift while rejecting correction events.
|
||||
filtered := make([]tsSkewPair, 0, len(pairs))
|
||||
filtered = append(filtered, pairs[0])
|
||||
for i := 1; i < len(pairs); i++ {
|
||||
prev := filtered[len(filtered)-1]
|
||||
if math.Abs(pairs[i].skew-prev.skew) <= maxPlausibleSkewJumpSec {
|
||||
filtered = append(filtered, pairs[i])
|
||||
}
|
||||
}
|
||||
// If the filter killed too much (e.g. unstable node), fall back to the
|
||||
// raw series so we at least produce *something* — it'll be capped by
|
||||
// maxReasonableDriftPerDay downstream.
|
||||
if len(filtered) < 2 || float64(filtered[len(filtered)-1].ts-filtered[0].ts) < 3600 {
|
||||
filtered = pairs
|
||||
}
|
||||
|
||||
// Cap point count for Theil-Sen (O(n²) on pairs). Keep most-recent.
|
||||
if len(filtered) > theilSenMaxPoints {
|
||||
filtered = filtered[len(filtered)-theilSenMaxPoints:]
|
||||
}
|
||||
|
||||
return theilSenSlope(filtered) * 86400 // sec/sec → sec/day
|
||||
}
|
||||
|
||||
// theilSenSlope returns the Theil-Sen estimator: median of all pairwise
|
||||
// slopes (yj - yi) / (tj - ti) for i < j. Naturally robust to outliers.
|
||||
// Pairs must be sorted by timestamp ascending.
|
||||
func theilSenSlope(pairs []tsSkewPair) float64 {
|
||||
n := len(pairs)
|
||||
if n < 2 {
|
||||
return 0
|
||||
}
|
||||
// Pre-allocate: n*(n-1)/2 pairs.
|
||||
slopes := make([]float64, 0, n*(n-1)/2)
|
||||
for i := 0; i < n; i++ {
|
||||
for j := i + 1; j < n; j++ {
|
||||
dt := float64(pairs[j].ts - pairs[i].ts)
|
||||
if dt <= 0 {
|
||||
continue
|
||||
}
|
||||
slopes = append(slopes, (pairs[j].skew-pairs[i].skew)/dt)
|
||||
}
|
||||
}
|
||||
if len(slopes) == 0 {
|
||||
return 0
|
||||
}
|
||||
return median(slopes)
|
||||
}
|
||||
@@ -1,956 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// ── classifySkew ───────────────────────────────────────────────────────────────
|
||||
|
||||
func TestClassifySkew(t *testing.T) {
|
||||
tests := []struct {
|
||||
absSkew float64
|
||||
expected SkewSeverity
|
||||
}{
|
||||
{0, SkewOK},
|
||||
{60, SkewOK}, // 1 min
|
||||
{299, SkewOK}, // just under 5 min
|
||||
{300, SkewWarning}, // exactly 5 min
|
||||
{1800, SkewWarning}, // 30 min
|
||||
{3599, SkewWarning}, // just under 1 hour
|
||||
{3600, SkewCritical}, // exactly 1 hour
|
||||
{86400, SkewCritical}, // 1 day
|
||||
{2592000 - 1, SkewCritical}, // just under 30 days
|
||||
{2592000, SkewAbsurd}, // exactly 30 days
|
||||
{86400 * 365 - 1, SkewAbsurd}, // just under 365 days
|
||||
{86400 * 365, SkewNoClock}, // exactly 365 days
|
||||
{86400 * 365 * 10, SkewNoClock}, // 10 years (epoch-0 style)
|
||||
}
|
||||
for _, tc := range tests {
|
||||
got := classifySkew(tc.absSkew)
|
||||
if got != tc.expected {
|
||||
t.Errorf("classifySkew(%v) = %v, want %v", tc.absSkew, got, tc.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ── median ─────────────────────────────────────────────────────────────────────
|
||||
|
||||
func TestMedian(t *testing.T) {
|
||||
tests := []struct {
|
||||
vals []float64
|
||||
expected float64
|
||||
}{
|
||||
{nil, 0},
|
||||
{[]float64{}, 0},
|
||||
{[]float64{5}, 5},
|
||||
{[]float64{1, 3}, 2},
|
||||
{[]float64{3, 1, 2}, 2},
|
||||
{[]float64{4, 1, 3, 2}, 2.5},
|
||||
{[]float64{-10, 0, 10}, 0},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
got := median(tc.vals)
|
||||
if got != tc.expected {
|
||||
t.Errorf("median(%v) = %v, want %v", tc.vals, got, tc.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMean(t *testing.T) {
|
||||
tests := []struct {
|
||||
vals []float64
|
||||
expected float64
|
||||
}{
|
||||
{nil, 0},
|
||||
{[]float64{10}, 10},
|
||||
{[]float64{2, 4, 6}, 4},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
got := mean(tc.vals)
|
||||
if got != tc.expected {
|
||||
t.Errorf("mean(%v) = %v, want %v", tc.vals, got, tc.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ── parseISO ───────────────────────────────────────────────────────────────────
|
||||
|
||||
func TestParseISO(t *testing.T) {
|
||||
tests := []struct {
|
||||
input string
|
||||
expected int64
|
||||
}{
|
||||
{"", 0},
|
||||
{"garbage", 0},
|
||||
{"2026-04-15T12:00:00Z", 1776254400},
|
||||
{"2026-04-15T12:00:00+00:00", 1776254400},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
got := parseISO(tc.input)
|
||||
if got != tc.expected {
|
||||
t.Errorf("parseISO(%q) = %v, want %v", tc.input, got, tc.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ── extractTimestamp ────────────────────────────────────────────────────────────
|
||||
|
||||
func TestExtractTimestamp(t *testing.T) {
|
||||
// Nested payload.timestamp
|
||||
decoded := map[string]interface{}{
|
||||
"payload": map[string]interface{}{
|
||||
"timestamp": float64(1776340800),
|
||||
},
|
||||
}
|
||||
got := extractTimestamp(decoded)
|
||||
if got != 1776340800 {
|
||||
t.Errorf("extractTimestamp (nested) = %v, want 1776340800", got)
|
||||
}
|
||||
|
||||
// Top-level timestamp
|
||||
decoded2 := map[string]interface{}{
|
||||
"timestamp": float64(1776340900),
|
||||
}
|
||||
got2 := extractTimestamp(decoded2)
|
||||
if got2 != 1776340900 {
|
||||
t.Errorf("extractTimestamp (top-level) = %v, want 1776340900", got2)
|
||||
}
|
||||
|
||||
// No timestamp
|
||||
decoded3 := map[string]interface{}{"foo": "bar"}
|
||||
got3 := extractTimestamp(decoded3)
|
||||
if got3 != 0 {
|
||||
t.Errorf("extractTimestamp (missing) = %v, want 0", got3)
|
||||
}
|
||||
}
|
||||
|
||||
// ── calibrateObservers ─────────────────────────────────────────────────────────
|
||||
|
||||
func TestCalibrateObservers_SingleObserver(t *testing.T) {
|
||||
// Single-observer packets can't calibrate — should return empty.
|
||||
samples := []skewSample{
|
||||
{advertTS: 1000, observedTS: 1000, observerID: "obs1", hash: "h1"},
|
||||
{advertTS: 2000, observedTS: 2000, observerID: "obs1", hash: "h2"},
|
||||
}
|
||||
offsets, _ := calibrateObservers(samples)
|
||||
if len(offsets) != 0 {
|
||||
t.Errorf("expected no offsets for single-observer, got %v", offsets)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCalibrateObservers_MultiObserver(t *testing.T) {
|
||||
// Packet h1 seen by 3 observers: obs1 at t=100, obs2 at t=110, obs3 at t=100.
|
||||
// Median observation = 100. obs1=0, obs2=+10, obs3=0
|
||||
// Packet h2 seen by 3 observers: obs1 at t=200, obs2 at t=210, obs3 at t=200.
|
||||
// Median observation = 200. obs1=0, obs2=+10, obs3=0
|
||||
samples := []skewSample{
|
||||
{advertTS: 100, observedTS: 100, observerID: "obs1", hash: "h1"},
|
||||
{advertTS: 100, observedTS: 110, observerID: "obs2", hash: "h1"},
|
||||
{advertTS: 100, observedTS: 100, observerID: "obs3", hash: "h1"},
|
||||
{advertTS: 200, observedTS: 200, observerID: "obs1", hash: "h2"},
|
||||
{advertTS: 200, observedTS: 210, observerID: "obs2", hash: "h2"},
|
||||
{advertTS: 200, observedTS: 200, observerID: "obs3", hash: "h2"},
|
||||
}
|
||||
offsets, _ := calibrateObservers(samples)
|
||||
if offsets["obs1"] != 0 {
|
||||
t.Errorf("obs1 offset = %v, want 0", offsets["obs1"])
|
||||
}
|
||||
if offsets["obs2"] != 10 {
|
||||
t.Errorf("obs2 offset = %v, want 10", offsets["obs2"])
|
||||
}
|
||||
if offsets["obs3"] != 0 {
|
||||
t.Errorf("obs3 offset = %v, want 0", offsets["obs3"])
|
||||
}
|
||||
}
|
||||
|
||||
// ── computeNodeSkew ────────────────────────────────────────────────────────────
|
||||
|
||||
func TestComputeNodeSkew_BasicCorrection(t *testing.T) {
|
||||
// Validates observer offset correction direction.
|
||||
//
|
||||
// Setup: node is 60s ahead, obs1 accurate, obs2 is 10s ahead.
|
||||
// With 2 observers, median obs_ts = 1005.
|
||||
// obs1 offset = 1000 - 1005 = -5
|
||||
// obs2 offset = 1010 - 1005 = +5
|
||||
// Correction: corrected = raw_skew + obsOffset
|
||||
// obs1: raw=60, corrected = 60 + (-5) = 55
|
||||
// obs2: raw=50, corrected = 50 + 5 = 55
|
||||
// Both converge to 55 (not exact 60 because with only 2 observers,
|
||||
// the median can't fully distinguish which observer is drifted).
|
||||
|
||||
samples := []skewSample{
|
||||
// Same packet seen by accurate obs1 and obs2 (+10s ahead)
|
||||
{advertTS: 1060, observedTS: 1000, observerID: "obs1", hash: "h1"},
|
||||
{advertTS: 1060, observedTS: 1010, observerID: "obs2", hash: "h1"},
|
||||
}
|
||||
offsets, _ := calibrateObservers(samples)
|
||||
// median obs = 1005, obs1 offset = -5, obs2 offset = +5
|
||||
// So the median approach finds obs2 is +5 ahead (relative to median)
|
||||
|
||||
// Now compute node skew with those offsets:
|
||||
nodeSkew := computeNodeSkew(samples, offsets)
|
||||
cs, ok := nodeSkew["h1"]
|
||||
if !ok {
|
||||
t.Fatal("expected skew data for hash h1")
|
||||
}
|
||||
// With only 2 observers, median obs_ts = 1005.
|
||||
// obs1 offset = 1000-1005 = -5, obs2 offset = 1010-1005 = +5
|
||||
// raw from obs1 = 60, corrected = 60 + (-5) = 55
|
||||
// raw from obs2 = 50, corrected = 50 + 5 = 55
|
||||
// median = 55
|
||||
if cs.MedianSkewSec != 55 {
|
||||
t.Errorf("median skew = %v, want 55", cs.MedianSkewSec)
|
||||
}
|
||||
}
|
||||
|
||||
func TestComputeNodeSkew_ThreeObservers(t *testing.T) {
|
||||
// Node is exactly 60s ahead. obs1 accurate, obs2 accurate, obs3 +30s ahead.
|
||||
// advertTS = 1060, real time = 1000
|
||||
samples := []skewSample{
|
||||
{advertTS: 1060, observedTS: 1000, observerID: "obs1", hash: "h1"},
|
||||
{advertTS: 1060, observedTS: 1000, observerID: "obs2", hash: "h1"},
|
||||
{advertTS: 1060, observedTS: 1030, observerID: "obs3", hash: "h1"},
|
||||
}
|
||||
offsets, _ := calibrateObservers(samples)
|
||||
// median obs_ts = 1000. obs1=0, obs2=0, obs3=+30
|
||||
if offsets["obs3"] != 30 {
|
||||
t.Errorf("obs3 offset = %v, want 30", offsets["obs3"])
|
||||
}
|
||||
|
||||
nodeSkew := computeNodeSkew(samples, offsets)
|
||||
cs := nodeSkew["h1"]
|
||||
if cs == nil {
|
||||
t.Fatal("expected skew data for h1")
|
||||
}
|
||||
// raw from obs1 = 60, corrected = 60 + 0 = 60
|
||||
// raw from obs2 = 60, corrected = 60 + 0 = 60
|
||||
// raw from obs3 = 30, corrected = 30 + 30 = 60
|
||||
// All three converge to 60.
|
||||
if cs.MedianSkewSec != 60 {
|
||||
t.Errorf("median skew = %v, want 60 (node is 60s ahead)", cs.MedianSkewSec)
|
||||
}
|
||||
}
|
||||
|
||||
// ── computeDrift ───────────────────────────────────────────────────────────────
|
||||
|
||||
func TestComputeDrift_Stable(t *testing.T) {
|
||||
// Constant skew = no drift.
|
||||
pairs := []tsSkewPair{
|
||||
{ts: 0, skew: 60},
|
||||
{ts: 7200, skew: 60},
|
||||
{ts: 14400, skew: 60},
|
||||
}
|
||||
drift := computeDrift(pairs)
|
||||
if drift != 0 {
|
||||
t.Errorf("drift = %v, want 0 for stable skew", drift)
|
||||
}
|
||||
}
|
||||
|
||||
func TestComputeDrift_LinearDrift(t *testing.T) {
|
||||
// 1 second drift per hour = 24 sec/day.
|
||||
pairs := []tsSkewPair{
|
||||
{ts: 0, skew: 0},
|
||||
{ts: 3600, skew: 1},
|
||||
{ts: 7200, skew: 2},
|
||||
}
|
||||
drift := computeDrift(pairs)
|
||||
expected := 24.0
|
||||
if math.Abs(drift-expected) > 0.1 {
|
||||
t.Errorf("drift = %v, want ~%v", drift, expected)
|
||||
}
|
||||
}
|
||||
|
||||
func TestComputeDrift_TooFewSamples(t *testing.T) {
|
||||
pairs := []tsSkewPair{{ts: 0, skew: 10}}
|
||||
if computeDrift(pairs) != 0 {
|
||||
t.Error("expected 0 drift for single sample")
|
||||
}
|
||||
}
|
||||
|
||||
func TestComputeDrift_TooShortSpan(t *testing.T) {
|
||||
// Less than 1 hour apart.
|
||||
pairs := []tsSkewPair{
|
||||
{ts: 0, skew: 0},
|
||||
{ts: 1800, skew: 10},
|
||||
}
|
||||
if computeDrift(pairs) != 0 {
|
||||
t.Error("expected 0 drift for short time span")
|
||||
}
|
||||
}
|
||||
|
||||
// ── jsonNumber ─────────────────────────────────────────────────────────────────
|
||||
|
||||
func TestJsonNumber(t *testing.T) {
|
||||
m := map[string]interface{}{
|
||||
"a": float64(42),
|
||||
"b": int64(99),
|
||||
"c": "not a number",
|
||||
"d": nil,
|
||||
}
|
||||
if jsonNumber(m, "a") != 42 {
|
||||
t.Error("float64 case failed")
|
||||
}
|
||||
if jsonNumber(m, "b") != 99 {
|
||||
t.Error("int64 case failed")
|
||||
}
|
||||
if jsonNumber(m, "c") != 0 {
|
||||
t.Error("string case should return 0")
|
||||
}
|
||||
if jsonNumber(m, "d") != 0 {
|
||||
t.Error("nil case should return 0")
|
||||
}
|
||||
if jsonNumber(m, "missing") != 0 {
|
||||
t.Error("missing key should return 0")
|
||||
}
|
||||
}
|
||||
|
||||
// ── Integration: GetNodeClockSkew via PacketStore ──────────────────────────────
|
||||
|
||||
func TestGetNodeClockSkew_Integration(t *testing.T) {
|
||||
ps := NewPacketStore(nil, nil)
|
||||
|
||||
// Simulate two ADVERT transmissions for the same node, seen by 2 observers each.
|
||||
// Node "AABB" has clock 120s ahead.
|
||||
pt := 4 // ADVERT
|
||||
tx1 := &StoreTx{
|
||||
Hash: "hash1",
|
||||
PayloadType: &pt,
|
||||
DecodedJSON: `{"payload":{"timestamp":1700002320}}`, // obs=1700002200, node ahead by 120s
|
||||
Observations: []*StoreObs{
|
||||
{ObserverID: "obs1", Timestamp: "2023-11-14T22:50:00Z"}, // 1700002200
|
||||
{ObserverID: "obs2", Timestamp: "2023-11-14T22:50:00Z"}, // 1700002200
|
||||
},
|
||||
}
|
||||
tx2 := &StoreTx{
|
||||
Hash: "hash2",
|
||||
PayloadType: &pt,
|
||||
DecodedJSON: `{"payload":{"timestamp":1700005920}}`, // obs=1700005800, node ahead by 120s
|
||||
Observations: []*StoreObs{
|
||||
{ObserverID: "obs1", Timestamp: "2023-11-14T23:50:00Z"}, // 1700005800
|
||||
{ObserverID: "obs2", Timestamp: "2023-11-14T23:50:00Z"}, // 1700005800
|
||||
},
|
||||
}
|
||||
|
||||
ps.mu.Lock()
|
||||
ps.byNode["AABB"] = []*StoreTx{tx1, tx2}
|
||||
ps.byPayloadType[4] = []*StoreTx{tx1, tx2}
|
||||
// Force recompute by setting interval to 0.
|
||||
ps.clockSkew.computeInterval = 0
|
||||
ps.mu.Unlock()
|
||||
|
||||
result := ps.GetNodeClockSkew("AABB")
|
||||
if result == nil {
|
||||
t.Fatal("expected clock skew result for node AABB")
|
||||
}
|
||||
if result.Pubkey != "AABB" {
|
||||
t.Errorf("pubkey = %q, want AABB", result.Pubkey)
|
||||
}
|
||||
// Both transmissions show 120s skew, so median should be 120.
|
||||
if result.MedianSkewSec != 120 {
|
||||
t.Errorf("median skew = %v, want 120", result.MedianSkewSec)
|
||||
}
|
||||
if result.SampleCount < 2 {
|
||||
t.Errorf("sample count = %v, want >= 2", result.SampleCount)
|
||||
}
|
||||
if result.Severity != SkewOK {
|
||||
t.Errorf("severity = %v, want ok (120s < 5min)", result.Severity)
|
||||
}
|
||||
// Drift should be ~0 since skew is constant.
|
||||
if math.Abs(result.DriftPerDaySec) > 1 {
|
||||
t.Errorf("drift = %v, want ~0 for constant skew", result.DriftPerDaySec)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetNodeClockSkew_NoData(t *testing.T) {
|
||||
ps := NewPacketStore(nil, nil)
|
||||
result := ps.GetNodeClockSkew("nonexistent")
|
||||
if result != nil {
|
||||
t.Error("expected nil for nonexistent node")
|
||||
}
|
||||
}
|
||||
|
||||
// ── Sanity check tests (#XXX — clock skew crazy stats) ────────────────────────
|
||||
|
||||
func TestGetNodeClockSkew_NoClock_EpochZero(t *testing.T) {
|
||||
// Node with epoch-0 timestamp produces huge skew → no_clock severity, drift=0.
|
||||
ps := NewPacketStore(nil, nil)
|
||||
pt := 4 // ADVERT
|
||||
|
||||
// Epoch-ish advert: advertTS near start of 2020, observed in 2023 → |skew| > 365 days
|
||||
var txs []*StoreTx
|
||||
baseObs := int64(1700000000) // ~Nov 2023
|
||||
for i := 0; i < 6; i++ {
|
||||
obsTS := baseObs + int64(i)*7200
|
||||
tx := &StoreTx{
|
||||
Hash: "epoch-h" + string(rune('0'+i)),
|
||||
PayloadType: &pt,
|
||||
DecodedJSON: `{"payload":{"timestamp":1577836800}}`, // Jan 1 2020 — valid but way off
|
||||
Observations: []*StoreObs{
|
||||
{ObserverID: "obs1", Timestamp: time.Unix(obsTS, 0).UTC().Format(time.RFC3339)},
|
||||
},
|
||||
}
|
||||
txs = append(txs, tx)
|
||||
}
|
||||
|
||||
ps.mu.Lock()
|
||||
ps.byNode["EPOCH"] = txs
|
||||
for _, tx := range txs {
|
||||
ps.byPayloadType[4] = append(ps.byPayloadType[4], tx)
|
||||
}
|
||||
ps.clockSkew.computeInterval = 0
|
||||
ps.mu.Unlock()
|
||||
|
||||
result := ps.GetNodeClockSkew("EPOCH")
|
||||
if result == nil {
|
||||
t.Fatal("expected clock skew result for epoch-0 node")
|
||||
}
|
||||
if result.Severity != SkewNoClock {
|
||||
t.Errorf("severity = %v, want no_clock", result.Severity)
|
||||
}
|
||||
if result.DriftPerDaySec != 0 {
|
||||
t.Errorf("drift = %v, want 0 for no_clock node", result.DriftPerDaySec)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetNodeClockSkew_TooFewSamplesForDrift(t *testing.T) {
|
||||
// Node with only 2 advert samples → drift should not be computed.
|
||||
ps := NewPacketStore(nil, nil)
|
||||
pt := 4
|
||||
|
||||
baseObs := int64(1700000000)
|
||||
var txs []*StoreTx
|
||||
for i := 0; i < 2; i++ {
|
||||
obsTS := baseObs + int64(i)*7200
|
||||
advTS := obsTS + 120 // 120s ahead
|
||||
tx := &StoreTx{
|
||||
Hash: "few-h" + string(rune('0'+i)),
|
||||
PayloadType: &pt,
|
||||
DecodedJSON: `{"payload":{"timestamp":` + formatInt64(advTS) + `}}`,
|
||||
Observations: []*StoreObs{
|
||||
{ObserverID: "obs1", Timestamp: time.Unix(obsTS, 0).UTC().Format(time.RFC3339)},
|
||||
},
|
||||
}
|
||||
txs = append(txs, tx)
|
||||
}
|
||||
|
||||
ps.mu.Lock()
|
||||
ps.byNode["FEWSAMP"] = txs
|
||||
for _, tx := range txs {
|
||||
ps.byPayloadType[4] = append(ps.byPayloadType[4], tx)
|
||||
}
|
||||
ps.clockSkew.computeInterval = 0
|
||||
ps.mu.Unlock()
|
||||
|
||||
result := ps.GetNodeClockSkew("FEWSAMP")
|
||||
if result == nil {
|
||||
t.Fatal("expected clock skew result")
|
||||
}
|
||||
if result.DriftPerDaySec != 0 {
|
||||
t.Errorf("drift = %v, want 0 for 2-sample node (minimum is %d)", result.DriftPerDaySec, minDriftSamples)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetNodeClockSkew_AbsurdDriftCapped(t *testing.T) {
|
||||
// Node with wildly varying skew producing |drift| > 86400 s/day → drift capped to 0.
|
||||
ps := NewPacketStore(nil, nil)
|
||||
pt := 4
|
||||
|
||||
// Create 6 samples with extreme skew variation to produce absurd drift.
|
||||
baseObs := int64(1700000000)
|
||||
var txs []*StoreTx
|
||||
for i := 0; i < 6; i++ {
|
||||
obsTS := baseObs + int64(i)*3600
|
||||
// Alternate between huge positive and negative skew offsets
|
||||
skewOffset := int64(50000 * (1 - 2*(i%2))) // +50000 or -50000
|
||||
advTS := obsTS + skewOffset
|
||||
tx := &StoreTx{
|
||||
Hash: "wild-h" + string(rune('0'+i)),
|
||||
PayloadType: &pt,
|
||||
DecodedJSON: `{"payload":{"timestamp":` + formatInt64(advTS) + `}}`,
|
||||
Observations: []*StoreObs{
|
||||
{ObserverID: "obs1", Timestamp: time.Unix(obsTS, 0).UTC().Format(time.RFC3339)},
|
||||
},
|
||||
}
|
||||
txs = append(txs, tx)
|
||||
}
|
||||
|
||||
ps.mu.Lock()
|
||||
ps.byNode["WILD"] = txs
|
||||
for _, tx := range txs {
|
||||
ps.byPayloadType[4] = append(ps.byPayloadType[4], tx)
|
||||
}
|
||||
ps.clockSkew.computeInterval = 0
|
||||
ps.mu.Unlock()
|
||||
|
||||
result := ps.GetNodeClockSkew("WILD")
|
||||
if result == nil {
|
||||
t.Fatal("expected clock skew result")
|
||||
}
|
||||
if math.Abs(result.DriftPerDaySec) > maxReasonableDriftPerDay {
|
||||
t.Errorf("drift = %v, should be capped (|drift| > %v)", result.DriftPerDaySec, maxReasonableDriftPerDay)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetNodeClockSkew_NormalNodeWithDrift(t *testing.T) {
|
||||
// Normal node with 6 samples and consistent linear drift → drift computed correctly.
|
||||
ps := NewPacketStore(nil, nil)
|
||||
pt := 4
|
||||
|
||||
baseObs := int64(1700000000)
|
||||
var txs []*StoreTx
|
||||
for i := 0; i < 6; i++ {
|
||||
obsTS := baseObs + int64(i)*7200 // every 2 hours
|
||||
// Drift: 1 sec/hour = 24 sec/day
|
||||
advTS := obsTS + 120 + int64(i) // skew grows by 1s per sample (2h apart)
|
||||
tx := &StoreTx{
|
||||
Hash: "norm-h" + string(rune('0'+i)),
|
||||
PayloadType: &pt,
|
||||
DecodedJSON: `{"payload":{"timestamp":` + formatInt64(advTS) + `}}`,
|
||||
Observations: []*StoreObs{
|
||||
{ObserverID: "obs1", Timestamp: time.Unix(obsTS, 0).UTC().Format(time.RFC3339)},
|
||||
},
|
||||
}
|
||||
txs = append(txs, tx)
|
||||
}
|
||||
|
||||
ps.mu.Lock()
|
||||
ps.byNode["NORMAL"] = txs
|
||||
for _, tx := range txs {
|
||||
ps.byPayloadType[4] = append(ps.byPayloadType[4], tx)
|
||||
}
|
||||
ps.clockSkew.computeInterval = 0
|
||||
ps.mu.Unlock()
|
||||
|
||||
result := ps.GetNodeClockSkew("NORMAL")
|
||||
if result == nil {
|
||||
t.Fatal("expected clock skew result")
|
||||
}
|
||||
if result.Severity != SkewOK {
|
||||
t.Errorf("severity = %v, want ok", result.Severity)
|
||||
}
|
||||
// 1s per 7200s = 12 s/day
|
||||
if result.DriftPerDaySec == 0 {
|
||||
t.Error("expected non-zero drift for linearly drifting node")
|
||||
}
|
||||
if math.Abs(result.DriftPerDaySec) > maxReasonableDriftPerDay {
|
||||
t.Errorf("drift = %v, should be reasonable", result.DriftPerDaySec)
|
||||
}
|
||||
}
|
||||
|
||||
// formatInt64 is a test helper to format int64 as string for JSON embedding.
|
||||
func formatInt64(n int64) string {
|
||||
return fmt.Sprintf("%d", n)
|
||||
}
|
||||
|
||||
// ── #789: Recent-window severity & robust drift ───────────────────────────────
|
||||
|
||||
// TestSeverityUsesRecentNotMedian: 100 historical bad samples (skew=-60s,
|
||||
// each ~5min apart) followed by 5 fresh good samples (skew=-1s). All-time
|
||||
// median is still huge-ish but recent-window severity must reflect the
|
||||
// current healthy state.
|
||||
func TestSeverityUsesRecentNotMedian(t *testing.T) {
|
||||
ps := NewPacketStore(nil, nil)
|
||||
pt := 4
|
||||
|
||||
baseObs := int64(1700000000)
|
||||
var txs []*StoreTx
|
||||
for i := 0; i < 105; i++ {
|
||||
obsTS := baseObs + int64(i)*300 // 5 min apart
|
||||
var skew int64 = -60
|
||||
if i >= 100 {
|
||||
skew = -1 // good samples at the tail
|
||||
}
|
||||
advTS := obsTS + skew
|
||||
tx := &StoreTx{
|
||||
Hash: fmt.Sprintf("recent-h%03d", i),
|
||||
PayloadType: &pt,
|
||||
DecodedJSON: `{"payload":{"timestamp":` + formatInt64(advTS) + `}}`,
|
||||
Observations: []*StoreObs{
|
||||
{ObserverID: "obs1", Timestamp: time.Unix(obsTS, 0).UTC().Format(time.RFC3339)},
|
||||
},
|
||||
}
|
||||
txs = append(txs, tx)
|
||||
}
|
||||
ps.mu.Lock()
|
||||
ps.byNode["RECENT"] = txs
|
||||
for _, tx := range txs {
|
||||
ps.byPayloadType[4] = append(ps.byPayloadType[4], tx)
|
||||
}
|
||||
ps.clockSkew.computeInterval = 0
|
||||
ps.mu.Unlock()
|
||||
|
||||
r := ps.GetNodeClockSkew("RECENT")
|
||||
if r == nil {
|
||||
t.Fatal("nil result")
|
||||
}
|
||||
if r.Severity != SkewOK {
|
||||
t.Errorf("severity = %v, want ok (recent samples are healthy)", r.Severity)
|
||||
}
|
||||
if math.Abs(r.RecentMedianSkewSec) > 5 {
|
||||
t.Errorf("recentMedianSkewSec = %v, want ~-1", r.RecentMedianSkewSec)
|
||||
}
|
||||
// Historical median should still be retained for context.
|
||||
if math.Abs(r.MedianSkewSec) < 30 {
|
||||
t.Errorf("medianSkewSec = %v, expected historical median to remain large", r.MedianSkewSec)
|
||||
}
|
||||
}
|
||||
|
||||
// TestDriftRejectsCorrectionJump: 30 minutes of clean linear drift, then a
|
||||
// single 60-second skew jump. The pre-jump slope should win — drift must
|
||||
// not be catastrophically inflated by the correction event.
|
||||
func TestDriftRejectsCorrectionJump(t *testing.T) {
|
||||
pairs := []tsSkewPair{}
|
||||
// 30 min of stable, ~12 sec/day drift: 1s per 7200s.
|
||||
for i := 0; i < 12; i++ {
|
||||
ts := int64(i) * 300
|
||||
skew := float64(i) * (1.0 / 24.0) // ~0.04s per 5min step → 12 s/day
|
||||
pairs = append(pairs, tsSkewPair{ts: ts, skew: skew})
|
||||
}
|
||||
// Wait an hour, then a single 1000-sec correction jump (clearly outlier).
|
||||
pairs = append(pairs, tsSkewPair{ts: 3600 + 12*300, skew: 1000})
|
||||
|
||||
drift := computeDrift(pairs)
|
||||
// Without rejection this would be ~ (1000-0)/(end-0) * 86400 = enormous.
|
||||
if math.Abs(drift) > 100 {
|
||||
t.Errorf("drift = %v, expected small (~12 s/day), correction jump should be filtered", drift)
|
||||
}
|
||||
}
|
||||
|
||||
// TestTheilSenMatchesOLSWhenClean: on clean linear data Theil-Sen should
|
||||
// produce essentially the OLS answer.
|
||||
func TestTheilSenMatchesOLSWhenClean(t *testing.T) {
|
||||
// 1 sec drift per hour = 24 sec/day, 20 evenly-spaced samples.
|
||||
pairs := []tsSkewPair{}
|
||||
for i := 0; i < 20; i++ {
|
||||
pairs = append(pairs, tsSkewPair{
|
||||
ts: int64(i) * 600,
|
||||
skew: float64(i) * (600.0 / 3600.0),
|
||||
})
|
||||
}
|
||||
drift := computeDrift(pairs)
|
||||
if math.Abs(drift-24.0) > 0.25 { // ~1%
|
||||
t.Errorf("drift = %v, want ~24", drift)
|
||||
}
|
||||
}
|
||||
|
||||
// TestReporterScenario_789: reproduce the exact scenario from issue #789.
|
||||
// Reporter saw mean=-52565156, median=-59063561, last=-0.8, sample count
|
||||
// 1662, drift +1793549.9 s/day, severity=absurd. After the fix, severity
|
||||
// must be ok (recent samples are healthy) and drift must be sane.
|
||||
func TestReporterScenario_789(t *testing.T) {
|
||||
ps := NewPacketStore(nil, nil)
|
||||
pt := 4
|
||||
|
||||
baseObs := int64(1700000000)
|
||||
var txs []*StoreTx
|
||||
// 1657 samples with the bad ~-683-day skew (the historical poison),
|
||||
// then 5 freshly corrected samples at -0.8s — totals 1662.
|
||||
for i := 0; i < 1662; i++ {
|
||||
obsTS := baseObs + int64(i)*60 // 1 min apart
|
||||
var skew int64
|
||||
if i < 1657 {
|
||||
skew = -59063561 // ~ -683 days
|
||||
} else {
|
||||
skew = -1 // corrected (rounded; reporter saw -0.8)
|
||||
}
|
||||
advTS := obsTS + skew
|
||||
tx := &StoreTx{
|
||||
Hash: fmt.Sprintf("rep-%04d", i),
|
||||
PayloadType: &pt,
|
||||
DecodedJSON: `{"payload":{"timestamp":` + formatInt64(advTS) + `}}`,
|
||||
Observations: []*StoreObs{
|
||||
{ObserverID: "obs1", Timestamp: time.Unix(obsTS, 0).UTC().Format(time.RFC3339)},
|
||||
},
|
||||
}
|
||||
txs = append(txs, tx)
|
||||
}
|
||||
ps.mu.Lock()
|
||||
ps.byNode["REPNODE"] = txs
|
||||
for _, tx := range txs {
|
||||
ps.byPayloadType[4] = append(ps.byPayloadType[4], tx)
|
||||
}
|
||||
ps.clockSkew.computeInterval = 0
|
||||
ps.mu.Unlock()
|
||||
|
||||
r := ps.GetNodeClockSkew("REPNODE")
|
||||
if r == nil {
|
||||
t.Fatal("nil result")
|
||||
}
|
||||
// Severity must reflect current health, not the all-time median.
|
||||
if r.Severity != SkewOK && r.Severity != SkewWarning {
|
||||
t.Errorf("severity = %v, want ok/warning (recent samples are healthy)", r.Severity)
|
||||
}
|
||||
if math.Abs(r.RecentMedianSkewSec) > 5 {
|
||||
t.Errorf("recentMedianSkewSec = %v, want near 0", r.RecentMedianSkewSec)
|
||||
}
|
||||
// Drift must not be absurd. The historical jump is one event between
|
||||
// the 1657th and 1658th sample; outlier rejection must contain it.
|
||||
if math.Abs(r.DriftPerDaySec) > maxReasonableDriftPerDay {
|
||||
t.Errorf("drift = %v, must be <= cap %v", r.DriftPerDaySec, maxReasonableDriftPerDay)
|
||||
}
|
||||
// And it should be close to zero (stable historical + stable corrected).
|
||||
if math.Abs(r.DriftPerDaySec) > 1000 {
|
||||
t.Errorf("drift = %v, expected near zero after outlier rejection", r.DriftPerDaySec)
|
||||
}
|
||||
// Historical median is preserved as context.
|
||||
if math.Abs(r.MedianSkewSec) < 1e6 {
|
||||
t.Errorf("medianSkewSec = %v, expected historical poison preserved as context", r.MedianSkewSec)
|
||||
}
|
||||
}
|
||||
|
||||
// TestBimodalClock_845: 60% good samples → bimodal_clock severity.
|
||||
func TestBimodalClock_845(t *testing.T) {
|
||||
ps := NewPacketStore(nil, nil)
|
||||
pt := 4
|
||||
|
||||
baseObs := int64(1700000000)
|
||||
var txs []*StoreTx
|
||||
// 6 good samples (-5s each), 4 bad samples (-50000000s each) = 60% good
|
||||
// Interleave so the recent window (last 5) captures both good and bad.
|
||||
skews := []int64{-5, -5, -50000000, -5, -50000000, -5, -50000000, -5, -50000000, -5}
|
||||
for i := 0; i < 10; i++ {
|
||||
obsTS := baseObs + int64(i)*60
|
||||
advTS := obsTS + skews[i]
|
||||
tx := &StoreTx{
|
||||
Hash: fmt.Sprintf("bimodal-%04d", i),
|
||||
PayloadType: &pt,
|
||||
DecodedJSON: `{"payload":{"timestamp":` + formatInt64(advTS) + `}}`,
|
||||
Observations: []*StoreObs{
|
||||
{ObserverID: "obs1", Timestamp: time.Unix(obsTS, 0).UTC().Format(time.RFC3339)},
|
||||
},
|
||||
}
|
||||
txs = append(txs, tx)
|
||||
}
|
||||
ps.mu.Lock()
|
||||
ps.byNode["BIMODAL"] = txs
|
||||
for _, tx := range txs {
|
||||
ps.byPayloadType[4] = append(ps.byPayloadType[4], tx)
|
||||
}
|
||||
ps.clockSkew.computeInterval = 0
|
||||
ps.mu.Unlock()
|
||||
|
||||
r := ps.GetNodeClockSkew("BIMODAL")
|
||||
if r == nil {
|
||||
t.Fatal("nil result")
|
||||
}
|
||||
if r.Severity != SkewBimodalClock {
|
||||
t.Errorf("severity = %v, want bimodal_clock", r.Severity)
|
||||
}
|
||||
if math.Abs(r.RecentMedianSkewSec-(-5)) > 1 {
|
||||
t.Errorf("recentMedianSkewSec = %v, want ≈ -5 (median of good samples)", r.RecentMedianSkewSec)
|
||||
}
|
||||
if r.GoodFraction < 0.5 || r.GoodFraction > 0.7 {
|
||||
t.Errorf("goodFraction = %v, want ~0.6", r.GoodFraction)
|
||||
}
|
||||
if r.RecentBadSampleCount < 1 {
|
||||
t.Errorf("recentBadSampleCount = %v, want > 0", r.RecentBadSampleCount)
|
||||
}
|
||||
}
|
||||
|
||||
// TestAllBad_NoClock_845: all samples bad → no_clock.
|
||||
func TestAllBad_NoClock_845(t *testing.T) {
|
||||
ps := NewPacketStore(nil, nil)
|
||||
pt := 4
|
||||
|
||||
baseObs := int64(1700000000)
|
||||
var txs []*StoreTx
|
||||
for i := 0; i < 10; i++ {
|
||||
obsTS := baseObs + int64(i)*60
|
||||
advTS := obsTS - 50000000
|
||||
tx := &StoreTx{
|
||||
Hash: fmt.Sprintf("allbad-%04d", i),
|
||||
PayloadType: &pt,
|
||||
DecodedJSON: `{"payload":{"timestamp":` + formatInt64(advTS) + `}}`,
|
||||
Observations: []*StoreObs{
|
||||
{ObserverID: "obs1", Timestamp: time.Unix(obsTS, 0).UTC().Format(time.RFC3339)},
|
||||
},
|
||||
}
|
||||
txs = append(txs, tx)
|
||||
}
|
||||
ps.mu.Lock()
|
||||
ps.byNode["ALLBAD"] = txs
|
||||
for _, tx := range txs {
|
||||
ps.byPayloadType[4] = append(ps.byPayloadType[4], tx)
|
||||
}
|
||||
ps.clockSkew.computeInterval = 0
|
||||
ps.mu.Unlock()
|
||||
|
||||
r := ps.GetNodeClockSkew("ALLBAD")
|
||||
if r == nil {
|
||||
t.Fatal("nil result")
|
||||
}
|
||||
if r.Severity != SkewNoClock {
|
||||
t.Errorf("severity = %v, want no_clock", r.Severity)
|
||||
}
|
||||
}
|
||||
|
||||
// TestMostlyGood_OK_845: 90% good 10% bad → ok (outlier filtered).
|
||||
func TestMostlyGood_OK_845(t *testing.T) {
|
||||
ps := NewPacketStore(nil, nil)
|
||||
pt := 4
|
||||
|
||||
baseObs := int64(1700000000)
|
||||
var txs []*StoreTx
|
||||
// 9 good at -5s, 1 bad at -50000000s
|
||||
for i := 0; i < 10; i++ {
|
||||
obsTS := baseObs + int64(i)*60
|
||||
var skew int64
|
||||
if i < 9 {
|
||||
skew = -5
|
||||
} else {
|
||||
skew = -50000000
|
||||
}
|
||||
advTS := obsTS + skew
|
||||
tx := &StoreTx{
|
||||
Hash: fmt.Sprintf("mostly-%04d", i),
|
||||
PayloadType: &pt,
|
||||
DecodedJSON: `{"payload":{"timestamp":` + formatInt64(advTS) + `}}`,
|
||||
Observations: []*StoreObs{
|
||||
{ObserverID: "obs1", Timestamp: time.Unix(obsTS, 0).UTC().Format(time.RFC3339)},
|
||||
},
|
||||
}
|
||||
txs = append(txs, tx)
|
||||
}
|
||||
ps.mu.Lock()
|
||||
ps.byNode["MOSTLY"] = txs
|
||||
for _, tx := range txs {
|
||||
ps.byPayloadType[4] = append(ps.byPayloadType[4], tx)
|
||||
}
|
||||
ps.clockSkew.computeInterval = 0
|
||||
ps.mu.Unlock()
|
||||
|
||||
r := ps.GetNodeClockSkew("MOSTLY")
|
||||
if r == nil {
|
||||
t.Fatal("nil result")
|
||||
}
|
||||
// 90% good → normal classification path, median of good samples = -5s → ok
|
||||
if r.Severity != SkewOK {
|
||||
t.Errorf("severity = %v, want ok", r.Severity)
|
||||
}
|
||||
if math.Abs(r.RecentMedianSkewSec-(-5)) > 1 {
|
||||
t.Errorf("recentMedianSkewSec = %v, want ≈ -5", r.RecentMedianSkewSec)
|
||||
}
|
||||
}
|
||||
|
||||
// TestSingleSample_845: one good sample → ok.
|
||||
func TestSingleSample_845(t *testing.T) {
|
||||
ps := NewPacketStore(nil, nil)
|
||||
pt := 4
|
||||
obsTS := int64(1700000000)
|
||||
advTS := obsTS - 30 // 30s skew
|
||||
tx := &StoreTx{
|
||||
Hash: "single-0001",
|
||||
PayloadType: &pt,
|
||||
DecodedJSON: `{"payload":{"timestamp":` + formatInt64(advTS) + `}}`,
|
||||
Observations: []*StoreObs{
|
||||
{ObserverID: "obs1", Timestamp: time.Unix(obsTS, 0).UTC().Format(time.RFC3339)},
|
||||
},
|
||||
}
|
||||
ps.mu.Lock()
|
||||
ps.byNode["SINGLE"] = []*StoreTx{tx}
|
||||
ps.byPayloadType[4] = append(ps.byPayloadType[4], tx)
|
||||
ps.clockSkew.computeInterval = 0
|
||||
ps.mu.Unlock()
|
||||
|
||||
r := ps.GetNodeClockSkew("SINGLE")
|
||||
if r == nil {
|
||||
t.Fatal("nil result")
|
||||
}
|
||||
if r.Severity != SkewOK {
|
||||
t.Errorf("severity = %v, want ok", r.Severity)
|
||||
}
|
||||
if r.RecentSampleCount != 1 {
|
||||
t.Errorf("recentSampleCount = %d, want 1", r.RecentSampleCount)
|
||||
}
|
||||
if r.GoodFraction != 1.0 {
|
||||
t.Errorf("goodFraction = %v, want 1.0", r.GoodFraction)
|
||||
}
|
||||
}
|
||||
|
||||
// TestFiftyFifty_Bimodal_845: 50% good / 50% bad → bimodal_clock.
|
||||
func TestFiftyFifty_Bimodal_845(t *testing.T) {
|
||||
ps := NewPacketStore(nil, nil)
|
||||
pt := 4
|
||||
baseObs := int64(1700000000)
|
||||
var txs []*StoreTx
|
||||
for i := 0; i < 10; i++ {
|
||||
obsTS := baseObs + int64(i)*60
|
||||
var skew int64
|
||||
if i%2 == 0 {
|
||||
skew = -10
|
||||
} else {
|
||||
skew = -50000000
|
||||
}
|
||||
tx := &StoreTx{
|
||||
Hash: fmt.Sprintf("fifty-%04d", i),
|
||||
PayloadType: &pt,
|
||||
DecodedJSON: `{"payload":{"timestamp":` + formatInt64(obsTS+skew) + `}}`,
|
||||
Observations: []*StoreObs{
|
||||
{ObserverID: "obs1", Timestamp: time.Unix(obsTS, 0).UTC().Format(time.RFC3339)},
|
||||
},
|
||||
}
|
||||
txs = append(txs, tx)
|
||||
}
|
||||
ps.mu.Lock()
|
||||
ps.byNode["FIFTY"] = txs
|
||||
for _, tx := range txs {
|
||||
ps.byPayloadType[4] = append(ps.byPayloadType[4], tx)
|
||||
}
|
||||
ps.clockSkew.computeInterval = 0
|
||||
ps.mu.Unlock()
|
||||
|
||||
r := ps.GetNodeClockSkew("FIFTY")
|
||||
if r == nil {
|
||||
t.Fatal("nil result")
|
||||
}
|
||||
if r.Severity != SkewBimodalClock {
|
||||
t.Errorf("severity = %v, want bimodal_clock", r.Severity)
|
||||
}
|
||||
if r.GoodFraction < 0.4 || r.GoodFraction > 0.6 {
|
||||
t.Errorf("goodFraction = %v, want ~0.5", r.GoodFraction)
|
||||
}
|
||||
}
|
||||
|
||||
// TestAllGood_OK_845: all samples good → ok, no bimodal.
|
||||
func TestAllGood_OK_845(t *testing.T) {
|
||||
ps := NewPacketStore(nil, nil)
|
||||
pt := 4
|
||||
baseObs := int64(1700000000)
|
||||
var txs []*StoreTx
|
||||
for i := 0; i < 10; i++ {
|
||||
obsTS := baseObs + int64(i)*60
|
||||
tx := &StoreTx{
|
||||
Hash: fmt.Sprintf("allgood-%04d", i),
|
||||
PayloadType: &pt,
|
||||
DecodedJSON: `{"payload":{"timestamp":` + formatInt64(obsTS-3) + `}}`,
|
||||
Observations: []*StoreObs{
|
||||
{ObserverID: "obs1", Timestamp: time.Unix(obsTS, 0).UTC().Format(time.RFC3339)},
|
||||
},
|
||||
}
|
||||
txs = append(txs, tx)
|
||||
}
|
||||
ps.mu.Lock()
|
||||
ps.byNode["ALLGOOD"] = txs
|
||||
for _, tx := range txs {
|
||||
ps.byPayloadType[4] = append(ps.byPayloadType[4], tx)
|
||||
}
|
||||
ps.clockSkew.computeInterval = 0
|
||||
ps.mu.Unlock()
|
||||
|
||||
r := ps.GetNodeClockSkew("ALLGOOD")
|
||||
if r == nil {
|
||||
t.Fatal("nil result")
|
||||
}
|
||||
if r.Severity != SkewOK {
|
||||
t.Errorf("severity = %v, want ok", r.Severity)
|
||||
}
|
||||
if r.GoodFraction != 1.0 {
|
||||
t.Errorf("goodFraction = %v, want 1.0", r.GoodFraction)
|
||||
}
|
||||
if r.RecentBadSampleCount != 0 {
|
||||
t.Errorf("recentBadSampleCount = %v, want 0", r.RecentBadSampleCount)
|
||||
}
|
||||
}
|
||||
@@ -1,131 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// TestCollisionDetailsIncludeNodePairs verifies that collision details contain
|
||||
// the correct prefix and matching node pairs (#757).
|
||||
func TestCollisionDetailsIncludeNodePairs(t *testing.T) {
|
||||
db := setupTestDB(t)
|
||||
defer db.Close()
|
||||
|
||||
now := time.Now().UTC()
|
||||
recent := now.Add(-1 * time.Hour).Format(time.RFC3339)
|
||||
recentEpoch := now.Add(-1 * time.Hour).Unix()
|
||||
|
||||
// Insert two repeater nodes with the same 3-byte prefix "AABB11"
|
||||
db.conn.Exec(`INSERT INTO nodes (public_key, name, role) VALUES ('aabb11ccdd001122', 'Node Alpha', 'repeater')`)
|
||||
db.conn.Exec(`INSERT INTO nodes (public_key, name, role) VALUES ('aabb11eeff334455', 'Node Beta', 'repeater')`)
|
||||
|
||||
// Add advert transmissions with hash_size=3 path bytes (0x80 = bits 10 → size 3)
|
||||
db.conn.Exec(`INSERT INTO transmissions (raw_hex, hash, first_seen, route_type, payload_type, decoded_json)
|
||||
VALUES ('0180aabb11ccdd', 'col_hash_01', ?, 1, 4, '{"pubKey":"aabb11ccdd001122","name":"Node Alpha","type":"ADVERT"}')`, recent)
|
||||
db.conn.Exec(`INSERT INTO observations (transmission_id, observer_idx, snr, rssi, path_json, timestamp)
|
||||
VALUES (1, 1, 10.0, -91, '["aabb11"]', ?)`, recentEpoch)
|
||||
|
||||
db.conn.Exec(`INSERT INTO transmissions (raw_hex, hash, first_seen, route_type, payload_type, decoded_json)
|
||||
VALUES ('0180aabb11eeff', 'col_hash_02', ?, 1, 4, '{"pubKey":"aabb11eeff334455","name":"Node Beta","type":"ADVERT"}')`, recent)
|
||||
db.conn.Exec(`INSERT INTO observations (transmission_id, observer_idx, snr, rssi, path_json, timestamp)
|
||||
VALUES (2, 1, 9.0, -93, '["aabb11"]', ?)`, recentEpoch)
|
||||
|
||||
store := NewPacketStore(db, nil)
|
||||
store.Load()
|
||||
|
||||
result := store.GetAnalyticsHashCollisions("")
|
||||
bySize, ok := result["by_size"].(map[string]interface{})
|
||||
if !ok {
|
||||
t.Fatal("expected by_size map")
|
||||
}
|
||||
|
||||
size3, ok := bySize["3"].(map[string]interface{})
|
||||
if !ok {
|
||||
t.Fatal("expected by_size[3] map")
|
||||
}
|
||||
|
||||
collisions, ok := size3["collisions"].([]collisionEntry)
|
||||
if !ok {
|
||||
t.Fatalf("expected collisions as []collisionEntry, got %T", size3["collisions"])
|
||||
}
|
||||
|
||||
// Find our collision
|
||||
var found *collisionEntry
|
||||
for i := range collisions {
|
||||
if collisions[i].Prefix == "AABB11" {
|
||||
found = &collisions[i]
|
||||
break
|
||||
}
|
||||
}
|
||||
if found == nil {
|
||||
t.Fatal("expected collision with prefix AABB11")
|
||||
}
|
||||
if found.Appearances != 2 {
|
||||
t.Errorf("expected 2 appearances, got %d", found.Appearances)
|
||||
}
|
||||
if len(found.Nodes) != 2 {
|
||||
t.Fatalf("expected 2 nodes in collision, got %d", len(found.Nodes))
|
||||
}
|
||||
|
||||
// Verify node pairs
|
||||
pubkeys := map[string]bool{}
|
||||
names := map[string]bool{}
|
||||
for _, n := range found.Nodes {
|
||||
pubkeys[n.PublicKey] = true
|
||||
names[n.Name] = true
|
||||
}
|
||||
if !pubkeys["aabb11ccdd001122"] {
|
||||
t.Error("expected node aabb11ccdd001122 in collision")
|
||||
}
|
||||
if !pubkeys["aabb11eeff334455"] {
|
||||
t.Error("expected node aabb11eeff334455 in collision")
|
||||
}
|
||||
if !names["Node Alpha"] {
|
||||
t.Error("expected Node Alpha in collision")
|
||||
}
|
||||
if !names["Node Beta"] {
|
||||
t.Error("expected Node Beta in collision")
|
||||
}
|
||||
}
|
||||
|
||||
// TestCollisionDetailsEmptyWhenNoCollisions verifies that collision details are
|
||||
// empty when there are no collisions (#757).
|
||||
func TestCollisionDetailsEmptyWhenNoCollisions(t *testing.T) {
|
||||
db := setupTestDB(t)
|
||||
defer db.Close()
|
||||
|
||||
now := time.Now().UTC()
|
||||
recent := now.Add(-1 * time.Hour).Format(time.RFC3339)
|
||||
recentEpoch := now.Add(-1 * time.Hour).Unix()
|
||||
|
||||
// Insert one repeater node with 3-byte hash
|
||||
db.conn.Exec(`INSERT INTO nodes (public_key, name, role) VALUES ('aabb11ccdd001122', 'Solo Node', 'repeater')`)
|
||||
|
||||
db.conn.Exec(`INSERT INTO transmissions (raw_hex, hash, first_seen, route_type, payload_type, decoded_json)
|
||||
VALUES ('0180aabb11ccdd', 'solo_hash_01', ?, 1, 4, '{"pubKey":"aabb11ccdd001122","name":"Solo Node","type":"ADVERT"}')`, recent)
|
||||
db.conn.Exec(`INSERT INTO observations (transmission_id, observer_idx, snr, rssi, path_json, timestamp)
|
||||
VALUES (1, 1, 10.0, -91, '["aabb11"]', ?)`, recentEpoch)
|
||||
|
||||
store := NewPacketStore(db, nil)
|
||||
store.Load()
|
||||
|
||||
result := store.GetAnalyticsHashCollisions("")
|
||||
bySize, ok := result["by_size"].(map[string]interface{})
|
||||
if !ok {
|
||||
t.Fatal("expected by_size map")
|
||||
}
|
||||
|
||||
size3, ok := bySize["3"].(map[string]interface{})
|
||||
if !ok {
|
||||
t.Fatal("expected by_size[3] map")
|
||||
}
|
||||
|
||||
collisions, ok := size3["collisions"].([]collisionEntry)
|
||||
if !ok {
|
||||
t.Fatalf("expected collisions as []collisionEntry, got %T", size3["collisions"])
|
||||
}
|
||||
|
||||
if len(collisions) != 0 {
|
||||
t.Errorf("expected 0 collisions, got %d", len(collisions))
|
||||
}
|
||||
}
|
||||
+4
-70
@@ -6,7 +6,6 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/meshcore-analyzer/geofilter"
|
||||
)
|
||||
@@ -17,17 +16,6 @@ type Config struct {
|
||||
APIKey string `json:"apiKey"`
|
||||
DBPath string `json:"dbPath"`
|
||||
|
||||
// NodeBlacklist is a list of public keys to exclude from all API responses.
|
||||
// Blacklisted nodes are hidden from node lists, search, detail, map, and stats.
|
||||
// Use this to filter out trolls, nodes with offensive names, or nodes
|
||||
// reporting deliberately false data (e.g. wrong GPS position) that the
|
||||
// operator refuses to fix.
|
||||
NodeBlacklist []string `json:"nodeBlacklist"`
|
||||
|
||||
// blacklistSetCached is the lazily-built set version of NodeBlacklist.
|
||||
blacklistSetCached map[string]bool
|
||||
blacklistOnce sync.Once
|
||||
|
||||
Branding map[string]interface{} `json:"branding"`
|
||||
Theme map[string]interface{} `json:"theme"`
|
||||
ThemeDark map[string]interface{} `json:"themeDark"`
|
||||
@@ -62,8 +50,6 @@ type Config struct {
|
||||
|
||||
Retention *RetentionConfig `json:"retention,omitempty"`
|
||||
|
||||
DB *DBConfig `json:"db,omitempty"`
|
||||
|
||||
PacketStore *PacketStoreConfig `json:"packetStore,omitempty"`
|
||||
|
||||
GeoFilter *GeoFilterConfig `json:"geo_filter,omitempty"`
|
||||
@@ -117,32 +103,16 @@ type NeighborGraphConfig struct {
|
||||
// PacketStoreConfig controls in-memory packet store limits.
|
||||
type PacketStoreConfig struct {
|
||||
RetentionHours float64 `json:"retentionHours"` // max age of packets in hours (0 = unlimited)
|
||||
MaxMemoryMB int `json:"maxMemoryMB"` // hard memory ceiling in MB (0 = unlimited)
|
||||
MaxResolvedPubkeyIndexEntries int `json:"maxResolvedPubkeyIndexEntries"` // warning threshold for index size (0 = 5M default)
|
||||
MaxMemoryMB int `json:"maxMemoryMB"` // hard memory ceiling in MB (0 = unlimited)
|
||||
}
|
||||
|
||||
// GeoFilterConfig is an alias for the shared geofilter.Config type.
|
||||
type GeoFilterConfig = geofilter.Config
|
||||
|
||||
type RetentionConfig struct {
|
||||
NodeDays int `json:"nodeDays"`
|
||||
ObserverDays int `json:"observerDays"`
|
||||
PacketDays int `json:"packetDays"`
|
||||
MetricsDays int `json:"metricsDays"`
|
||||
}
|
||||
|
||||
// DBConfig controls SQLite vacuum and maintenance behavior (#919).
|
||||
type DBConfig struct {
|
||||
VacuumOnStartup bool `json:"vacuumOnStartup"` // one-time full VACUUM on startup if auto_vacuum is not INCREMENTAL
|
||||
IncrementalVacuumPages int `json:"incrementalVacuumPages"` // pages returned to OS per reaper cycle (default 1024)
|
||||
}
|
||||
|
||||
// IncrementalVacuumPages returns the configured pages per vacuum or 1024 default.
|
||||
func (c *Config) IncrementalVacuumPages() int {
|
||||
if c.DB != nil && c.DB.IncrementalVacuumPages > 0 {
|
||||
return c.DB.IncrementalVacuumPages
|
||||
}
|
||||
return 1024
|
||||
NodeDays int `json:"nodeDays"`
|
||||
PacketDays int `json:"packetDays"`
|
||||
MetricsDays int `json:"metricsDays"`
|
||||
}
|
||||
|
||||
// MetricsRetentionDays returns configured metrics retention or 30 days default.
|
||||
@@ -195,15 +165,6 @@ func (c *Config) NodeDaysOrDefault() int {
|
||||
return 7
|
||||
}
|
||||
|
||||
// ObserverDaysOrDefault returns the configured retention.observerDays or 14 if not set.
|
||||
// A value of -1 means observers are never removed.
|
||||
func (c *Config) ObserverDaysOrDefault() int {
|
||||
if c.Retention != nil && c.Retention.ObserverDays != 0 {
|
||||
return c.Retention.ObserverDays
|
||||
}
|
||||
return 14
|
||||
}
|
||||
|
||||
type HealthThresholds struct {
|
||||
InfraDegradedHours float64 `json:"infraDegradedHours"`
|
||||
InfraSilentHours float64 `json:"infraSilentHours"`
|
||||
@@ -377,30 +338,3 @@ func (c *Config) PropagationBufferMs() int {
|
||||
}
|
||||
return 5000
|
||||
}
|
||||
|
||||
// blacklistSet lazily builds and caches the nodeBlacklist as a set for O(1) lookups.
|
||||
// Uses sync.Once to eliminate the data race on first concurrent access.
|
||||
func (c *Config) blacklistSet() map[string]bool {
|
||||
c.blacklistOnce.Do(func() {
|
||||
if len(c.NodeBlacklist) == 0 {
|
||||
return
|
||||
}
|
||||
m := make(map[string]bool, len(c.NodeBlacklist))
|
||||
for _, pk := range c.NodeBlacklist {
|
||||
trimmed := strings.ToLower(strings.TrimSpace(pk))
|
||||
if trimmed != "" {
|
||||
m[trimmed] = true
|
||||
}
|
||||
}
|
||||
c.blacklistSetCached = m
|
||||
})
|
||||
return c.blacklistSetCached
|
||||
}
|
||||
|
||||
// IsBlacklisted returns true if the given public key is in the nodeBlacklist.
|
||||
func (c *Config) IsBlacklisted(pubkey string) bool {
|
||||
if c == nil || len(c.NodeBlacklist) == 0 {
|
||||
return false
|
||||
}
|
||||
return c.blacklistSet()[strings.ToLower(strings.TrimSpace(pubkey))]
|
||||
}
|
||||
|
||||
@@ -365,25 +365,3 @@ func TestPropagationBufferMs(t *testing.T) {
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestObserverDaysOrDefault(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
cfg *Config
|
||||
want int
|
||||
}{
|
||||
{"nil retention", &Config{}, 14},
|
||||
{"zero observer days", &Config{Retention: &RetentionConfig{ObserverDays: 0}}, 14},
|
||||
{"positive value", &Config{Retention: &RetentionConfig{ObserverDays: 30}}, 30},
|
||||
{"keep forever", &Config{Retention: &RetentionConfig{ObserverDays: -1}}, -1},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := tt.cfg.ObserverDaysOrDefault()
|
||||
if got != tt.want {
|
||||
t.Errorf("ObserverDaysOrDefault() = %d, want %d", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
+13
-340
@@ -41,13 +41,13 @@ func setupTestDBv2(t *testing.T) *DB {
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT, raw_hex TEXT NOT NULL,
|
||||
hash TEXT NOT NULL UNIQUE, first_seen TEXT NOT NULL,
|
||||
route_type INTEGER, payload_type INTEGER, payload_version INTEGER,
|
||||
decoded_json TEXT, channel_hash TEXT DEFAULT NULL, created_at TEXT DEFAULT (datetime('now'))
|
||||
decoded_json TEXT, created_at TEXT DEFAULT (datetime('now'))
|
||||
);
|
||||
CREATE TABLE observations (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
transmission_id INTEGER NOT NULL REFERENCES transmissions(id),
|
||||
observer_id TEXT, observer_name TEXT, direction TEXT,
|
||||
snr REAL, rssi REAL, score INTEGER, path_json TEXT, timestamp INTEGER NOT NULL, raw_hex TEXT
|
||||
snr REAL, rssi REAL, score INTEGER, path_json TEXT, timestamp INTEGER NOT NULL
|
||||
);
|
||||
`
|
||||
if _, err := conn.Exec(schema); err != nil {
|
||||
@@ -585,15 +585,12 @@ func TestHandlePacketsMultiNodeWithStore(t *testing.T) {
|
||||
func TestHandlePacketDetailNoStore(t *testing.T) {
|
||||
_, router := setupNoStoreServer(t)
|
||||
|
||||
// With no in-memory store, handlePacketDetail now falls back to the DB
|
||||
// (#827). The seeded transmissions are present in the DB, so by-hash and
|
||||
// by-ID lookups succeed; only truly absent IDs return 404.
|
||||
t.Run("by hash", func(t *testing.T) {
|
||||
req := httptest.NewRequest("GET", "/api/packets/abc123def4567890", nil)
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
if w.Code != 200 {
|
||||
t.Fatalf("expected 200 (DB fallback), got %d: %s", w.Code, w.Body.String())
|
||||
if w.Code != 404 {
|
||||
t.Fatalf("expected 404 (no store), got %d: %s", w.Code, w.Body.String())
|
||||
}
|
||||
})
|
||||
|
||||
@@ -601,8 +598,8 @@ func TestHandlePacketDetailNoStore(t *testing.T) {
|
||||
req := httptest.NewRequest("GET", "/api/packets/1", nil)
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
if w.Code != 200 {
|
||||
t.Fatalf("expected 200 (DB fallback), got %d: %s", w.Code, w.Body.String())
|
||||
if w.Code != 404 {
|
||||
t.Fatalf("expected 404 (no store), got %d: %s", w.Code, w.Body.String())
|
||||
}
|
||||
})
|
||||
|
||||
@@ -763,9 +760,9 @@ func TestGetChannelsFromStore(t *testing.T) {
|
||||
|
||||
func TestPrefixMapResolve(t *testing.T) {
|
||||
nodes := []nodeInfo{
|
||||
{Role: "repeater", PublicKey: "aabbccdd11223344", Name: "NodeA", HasGPS: true, Lat: 37.5, Lon: -122.0},
|
||||
{Role: "repeater", PublicKey: "aabbccdd55667788", Name: "NodeB", HasGPS: false},
|
||||
{Role: "repeater", PublicKey: "eeff0011aabbccdd", Name: "NodeC", HasGPS: true, Lat: 38.0, Lon: -121.0},
|
||||
{PublicKey: "aabbccdd11223344", Name: "NodeA", HasGPS: true, Lat: 37.5, Lon: -122.0},
|
||||
{PublicKey: "aabbccdd55667788", Name: "NodeB", HasGPS: false},
|
||||
{PublicKey: "eeff0011aabbccdd", Name: "NodeC", HasGPS: true, Lat: 38.0, Lon: -121.0},
|
||||
}
|
||||
pm := buildPrefixMap(nodes)
|
||||
|
||||
@@ -805,8 +802,8 @@ func TestPrefixMapResolve(t *testing.T) {
|
||||
|
||||
t.Run("multiple candidates no GPS", func(t *testing.T) {
|
||||
noGPSNodes := []nodeInfo{
|
||||
{Role: "repeater", PublicKey: "aa11bb22", Name: "X", HasGPS: false},
|
||||
{Role: "repeater", PublicKey: "aa11cc33", Name: "Y", HasGPS: false},
|
||||
{PublicKey: "aa11bb22", Name: "X", HasGPS: false},
|
||||
{PublicKey: "aa11cc33", Name: "Y", HasGPS: false},
|
||||
}
|
||||
pm2 := buildPrefixMap(noGPSNodes)
|
||||
n := pm2.resolve("aa11")
|
||||
@@ -820,8 +817,8 @@ func TestPrefixMapResolve(t *testing.T) {
|
||||
func TestPrefixMapCap(t *testing.T) {
|
||||
// 16-char pubkey — longer than maxPrefixLen
|
||||
nodes := []nodeInfo{
|
||||
{Role: "repeater", PublicKey: "aabbccdd11223344", Name: "LongKey"},
|
||||
{Role: "repeater", PublicKey: "eeff0011", Name: "ShortKey"}, // exactly 8 chars
|
||||
{PublicKey: "aabbccdd11223344", Name: "LongKey"},
|
||||
{PublicKey: "eeff0011", Name: "ShortKey"}, // exactly 8 chars
|
||||
}
|
||||
pm := buildPrefixMap(nodes)
|
||||
|
||||
@@ -2201,53 +2198,6 @@ func TestStoreGetAnalyticsHashSizes(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestHashSizesDistributionByRepeatersFiltersRole(t *testing.T) {
|
||||
db := setupRichTestDB(t)
|
||||
defer db.Close()
|
||||
store := NewPacketStore(db, nil)
|
||||
store.Load()
|
||||
|
||||
result := store.GetAnalyticsHashSizes("")
|
||||
|
||||
// distributionByRepeaters should only count repeater nodes.
|
||||
// Rich test DB: aabbccdd11223344 = repeater (hash size 2), eeff00112233aabb = companion (hash size 3).
|
||||
dbr, ok := result["distributionByRepeaters"].(map[string]int)
|
||||
if !ok {
|
||||
t.Fatal("expected distributionByRepeaters map")
|
||||
}
|
||||
// Only the repeater node should be counted.
|
||||
if dbr["3"] != 0 {
|
||||
t.Errorf("distributionByRepeaters[3] = %d, want 0 (companion should be excluded)", dbr["3"])
|
||||
}
|
||||
if dbr["2"] != 1 {
|
||||
t.Errorf("distributionByRepeaters[2] = %d, want 1 (repeater)", dbr["2"])
|
||||
}
|
||||
|
||||
// multiByteNodes should include role field for frontend filtering.
|
||||
mbn, ok := result["multiByteNodes"].([]map[string]interface{})
|
||||
if !ok {
|
||||
t.Fatal("expected multiByteNodes slice")
|
||||
}
|
||||
for _, node := range mbn {
|
||||
if _, hasRole := node["role"]; !hasRole {
|
||||
t.Errorf("multiByteNodes entry missing 'role' field: %v", node)
|
||||
}
|
||||
}
|
||||
// Verify companion is included in multiByteNodes (it's multi-byte) with correct role.
|
||||
foundCompanion := false
|
||||
for _, node := range mbn {
|
||||
if node["pubkey"] == "eeff00112233aabb" {
|
||||
foundCompanion = true
|
||||
if node["role"] != "companion" {
|
||||
t.Errorf("companion node role = %v, want 'companion'", node["role"])
|
||||
}
|
||||
}
|
||||
}
|
||||
if !foundCompanion {
|
||||
t.Error("expected companion node in multiByteNodes (multi-byte adopters should include all roles)")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStoreGetAnalyticsSubpaths(t *testing.T) {
|
||||
db := setupRichTestDB(t)
|
||||
defer db.Close()
|
||||
@@ -3220,189 +3170,6 @@ func TestGetNodeHashSizeInfoEdgeCases(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestHashSizeTransportRoutePathByteOffset verifies that transport routes (0, 3)
|
||||
// read the path byte from offset 5 (after 4 transport code bytes), not offset 1.
|
||||
// Regression test for #744 / #722.
|
||||
func TestHashSizeTransportRoutePathByteOffset(t *testing.T) {
|
||||
db := setupTestDB(t)
|
||||
defer db.Close()
|
||||
|
||||
now := time.Now().UTC()
|
||||
recent := now.Add(-1 * time.Hour).Format(time.RFC3339)
|
||||
recentEpoch := now.Add(-1 * time.Hour).Unix()
|
||||
|
||||
db.conn.Exec(`INSERT INTO observers (id, name, iata, last_seen, first_seen, packet_count)
|
||||
VALUES ('obs1', 'Obs', 'SJC', ?, '2026-01-01T00:00:00Z', 10)`, recent)
|
||||
|
||||
// Route type 0 (TRANSPORT_FLOOD): header=0x04 (payload_type=1, route_type=0)
|
||||
// 4 transport bytes + path byte at offset 5.
|
||||
// Path byte 0x80 → hash_size bits = 10 → size 3
|
||||
// If bug is present, code reads byte 1 (0xAA) → hash_size bits = 10 → size 3 (coincidence)
|
||||
// Use path byte 0x40 (hash_size=2) and transport byte 0x01 at offset 1 (hash_size=1 if misread)
|
||||
db.conn.Exec(`INSERT INTO transmissions (raw_hex, hash, first_seen, route_type, payload_type, decoded_json)
|
||||
VALUES ('100102030440aabb', 'tf_offset', ?, 0, 4, '{"pubKey":"aaaa000000000001","name":"TF-Node","type":"ADVERT"}')`, recent)
|
||||
db.conn.Exec(`INSERT INTO observations (transmission_id, observer_idx, snr, rssi, path_json, timestamp)
|
||||
VALUES (1, 1, 10.0, -90, '[]', ?)`, recentEpoch)
|
||||
|
||||
// Route type 3 (TRANSPORT_DIRECT): header=0x13 (payload_type=4, route_type=3)
|
||||
// 4 transport bytes + path byte at offset 5.
|
||||
// Path byte 0xC1 → hash_size bits = 11 → size 4, hop_count = 1 (not zero-hop)
|
||||
// Byte 1 = 0x05 → hash_size bits = 00 → size 1 if misread
|
||||
db.conn.Exec(`INSERT INTO transmissions (raw_hex, hash, first_seen, route_type, payload_type, decoded_json)
|
||||
VALUES ('1305060708C1bbcc', 'td_offset', ?, 3, 4, '{"pubKey":"aaaa000000000002","name":"TD-Node","type":"ADVERT"}')`, recent)
|
||||
db.conn.Exec(`INSERT INTO observations (transmission_id, observer_idx, snr, rssi, path_json, timestamp)
|
||||
VALUES (2, 1, 10.0, -90, '[]', ?)`, recentEpoch)
|
||||
|
||||
// Route type 1 (FLOOD): header=0x11 (payload_type=4, route_type=1)
|
||||
// Path byte at offset 1. Path byte 0x80 → hash_size bits = 10 → size 3
|
||||
db.conn.Exec(`INSERT INTO transmissions (raw_hex, hash, first_seen, route_type, payload_type, decoded_json)
|
||||
VALUES ('1180aabbccdd', 'flood_offset', ?, 1, 4, '{"pubKey":"aaaa000000000003","name":"Flood-Node","type":"ADVERT"}')`, recent)
|
||||
db.conn.Exec(`INSERT INTO observations (transmission_id, observer_idx, snr, rssi, path_json, timestamp)
|
||||
VALUES (3, 1, 10.0, -90, '[]', ?)`, recentEpoch)
|
||||
|
||||
store := NewPacketStore(db, nil)
|
||||
store.Load()
|
||||
info := store.GetNodeHashSizeInfo()
|
||||
|
||||
// Transport flood node: path byte 0x40 → hash_size = 2
|
||||
if ni, ok := info["aaaa000000000001"]; !ok {
|
||||
t.Error("transport flood node missing from hash size info")
|
||||
} else if ni.HashSize != 2 {
|
||||
t.Errorf("transport flood node: want HashSize=2 (from path byte at offset 5), got %d", ni.HashSize)
|
||||
}
|
||||
|
||||
// Transport direct node: path byte 0xC1 → hash_size = 4
|
||||
if ni, ok := info["aaaa000000000002"]; !ok {
|
||||
t.Error("transport direct node missing from hash size info")
|
||||
} else if ni.HashSize != 4 {
|
||||
t.Errorf("transport direct node: want HashSize=4 (from path byte at offset 5), got %d", ni.HashSize)
|
||||
}
|
||||
|
||||
// Regular flood node: path byte 0x80 → hash_size = 3
|
||||
if ni, ok := info["aaaa000000000003"]; !ok {
|
||||
t.Error("regular flood node missing from hash size info")
|
||||
} else if ni.HashSize != 3 {
|
||||
t.Errorf("regular flood node: want HashSize=3 (from path byte at offset 1), got %d", ni.HashSize)
|
||||
}
|
||||
}
|
||||
|
||||
// TestHashSizeTransportDirectZeroHopSkipped verifies that RouteTransportDirect
|
||||
// zero-hop adverts are skipped (same as RouteDirect). Regression test for #744.
|
||||
func TestHashSizeTransportDirectZeroHopSkipped(t *testing.T) {
|
||||
db := setupTestDB(t)
|
||||
defer db.Close()
|
||||
|
||||
now := time.Now().UTC()
|
||||
recent := now.Add(-1 * time.Hour).Format(time.RFC3339)
|
||||
recentEpoch := now.Add(-1 * time.Hour).Unix()
|
||||
|
||||
db.conn.Exec(`INSERT INTO observers (id, name, iata, last_seen, first_seen, packet_count)
|
||||
VALUES ('obs1', 'Obs', 'SJC', ?, '2026-01-01T00:00:00Z', 10)`, recent)
|
||||
|
||||
// RouteDirect (2) zero-hop: path byte 0x40 → hop_count=0, hash_size bits=01
|
||||
// Should be skipped (existing behavior)
|
||||
db.conn.Exec(`INSERT INTO transmissions (raw_hex, hash, first_seen, route_type, payload_type, decoded_json)
|
||||
VALUES ('1240aabbccdd', 'direct_zh', ?, 2, 4, '{"pubKey":"bbbb000000000001","name":"Direct-ZH","type":"ADVERT"}')`, recent)
|
||||
db.conn.Exec(`INSERT INTO observations (transmission_id, observer_idx, snr, rssi, path_json, timestamp)
|
||||
VALUES (1, 1, 10.0, -90, '[]', ?)`, recentEpoch)
|
||||
|
||||
// RouteTransportDirect (3) zero-hop: 4 transport bytes + path byte 0x40 → hop_count=0
|
||||
// Should ALSO be skipped (this was the missing case)
|
||||
db.conn.Exec(`INSERT INTO transmissions (raw_hex, hash, first_seen, route_type, payload_type, decoded_json)
|
||||
VALUES ('130102030440aabb', 'tdirect_zh', ?, 3, 4, '{"pubKey":"bbbb000000000002","name":"TDirect-ZH","type":"ADVERT"}')`, recent)
|
||||
db.conn.Exec(`INSERT INTO observations (transmission_id, observer_idx, snr, rssi, path_json, timestamp)
|
||||
VALUES (2, 1, 10.0, -90, '[]', ?)`, recentEpoch)
|
||||
|
||||
// RouteDirect (2) non-zero-hop: path byte 0x41 → hop_count=1
|
||||
// Should NOT be skipped
|
||||
db.conn.Exec(`INSERT INTO transmissions (raw_hex, hash, first_seen, route_type, payload_type, decoded_json)
|
||||
VALUES ('1241aabbccdd', 'direct_1h', ?, 2, 4, '{"pubKey":"bbbb000000000003","name":"Direct-1H","type":"ADVERT"}')`, recent)
|
||||
db.conn.Exec(`INSERT INTO observations (transmission_id, observer_idx, snr, rssi, path_json, timestamp)
|
||||
VALUES (3, 1, 10.0, -90, '[]', ?)`, recentEpoch)
|
||||
|
||||
store := NewPacketStore(db, nil)
|
||||
store.Load()
|
||||
info := store.GetNodeHashSizeInfo()
|
||||
|
||||
// RouteDirect zero-hop should be absent
|
||||
if _, ok := info["bbbb000000000001"]; ok {
|
||||
t.Error("RouteDirect zero-hop advert should be skipped")
|
||||
}
|
||||
|
||||
// RouteTransportDirect zero-hop should also be absent
|
||||
if _, ok := info["bbbb000000000002"]; ok {
|
||||
t.Error("RouteTransportDirect zero-hop advert should be skipped")
|
||||
}
|
||||
|
||||
// RouteDirect non-zero-hop should be present with hash_size=2
|
||||
if ni, ok := info["bbbb000000000003"]; !ok {
|
||||
t.Error("RouteDirect non-zero-hop should be in hash size info")
|
||||
} else if ni.HashSize != 2 {
|
||||
t.Errorf("RouteDirect non-zero-hop: want HashSize=2, got %d", ni.HashSize)
|
||||
}
|
||||
}
|
||||
|
||||
// TestAnalyticsHashSizesZeroHopSkip verifies that computeAnalyticsHashSizes
|
||||
// does not overwrite a node's hash_size with a zero-hop advert's unreliable value.
|
||||
// Regression test for #744.
|
||||
func TestAnalyticsHashSizesZeroHopSkip(t *testing.T) {
|
||||
db := setupTestDB(t)
|
||||
defer db.Close()
|
||||
|
||||
now := time.Now().UTC()
|
||||
recent := now.Add(-1 * time.Hour).Format(time.RFC3339)
|
||||
recentEpoch := now.Add(-1 * time.Hour).Unix()
|
||||
|
||||
db.conn.Exec(`INSERT INTO observers (id, name, iata, last_seen, first_seen, packet_count)
|
||||
VALUES ('obs1', 'Obs', 'SJC', ?, '2026-01-01T00:00:00Z', 10)`, recent)
|
||||
|
||||
pk := "cccc000000000001"
|
||||
db.conn.Exec(`INSERT INTO nodes (public_key, name, role) VALUES (?, 'ZH-Analytics', 'repeater')`, pk)
|
||||
|
||||
decoded := `{"pubKey":"` + pk + `","name":"ZH-Analytics","type":"ADVERT"}`
|
||||
|
||||
// First: a flood advert with hashSize=2 (reliable, multi-hop)
|
||||
// header 0x11 = route_type 1 (flood), payload_type 4
|
||||
// pathByte 0x41 = hashSize bits 01 → size 2, hop_count 1
|
||||
db.conn.Exec(`INSERT INTO transmissions (raw_hex, hash, first_seen, route_type, payload_type, decoded_json)
|
||||
VALUES ('1141aabbccdd', 'az_flood', ?, 1, 4, ?)`, recent, decoded)
|
||||
db.conn.Exec(`INSERT INTO observations (transmission_id, observer_idx, snr, rssi, path_json, timestamp)
|
||||
VALUES (1, 1, 10.0, -90, '["aabb"]', ?)`, recentEpoch)
|
||||
|
||||
// Second: a direct zero-hop advert with pathByte=0x00 → would give hashSize=1
|
||||
// header 0x12 = route_type 2 (direct), payload_type 4
|
||||
db.conn.Exec(`INSERT INTO transmissions (raw_hex, hash, first_seen, route_type, payload_type, decoded_json)
|
||||
VALUES ('1200aabbccdd', 'az_direct', ?, 2, 4, ?)`, recent, decoded)
|
||||
db.conn.Exec(`INSERT INTO observations (transmission_id, observer_idx, snr, rssi, path_json, timestamp)
|
||||
VALUES (2, 1, 10.0, -90, '[]', ?)`, recentEpoch)
|
||||
|
||||
store := NewPacketStore(db, nil)
|
||||
store.Load()
|
||||
|
||||
result := store.GetAnalyticsHashSizes("")
|
||||
|
||||
// The node should appear in multiByteNodes (hashSize=2 from the flood advert)
|
||||
// If the zero-hop bug is present, hashSize would be 1 and the node would NOT
|
||||
// appear in multiByteNodes.
|
||||
multiByteNodes, ok := result["multiByteNodes"].([]map[string]interface{})
|
||||
if !ok {
|
||||
t.Fatal("expected multiByteNodes slice in analytics hash sizes")
|
||||
}
|
||||
|
||||
found := false
|
||||
for _, n := range multiByteNodes {
|
||||
if n["pubkey"] == pk {
|
||||
found = true
|
||||
if hs, ok := n["hashSize"].(int); ok && hs != 2 {
|
||||
t.Errorf("expected hashSize=2 from flood advert, got %d", hs)
|
||||
}
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Error("node should appear in multiByteNodes with hashSize=2; zero-hop advert should not overwrite to 1")
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleResolveHopsEdgeCases(t *testing.T) {
|
||||
db := setupTestDB(t)
|
||||
defer db.Close()
|
||||
@@ -4319,50 +4086,6 @@ func TestIndexByNodePreCheck(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
// TestIndexByNodeResolvedPath tests that indexByNode only indexes decoded JSON pubkeys.
|
||||
// After #800, resolved_path entries are handled via the decode-window, not indexByNode.
|
||||
func TestIndexByNodeResolvedPath(t *testing.T) {
|
||||
store := &PacketStore{
|
||||
byNode: make(map[string][]*StoreTx),
|
||||
nodeHashes: make(map[string]map[string]bool),
|
||||
}
|
||||
|
||||
t.Run("decoded JSON pubkeys still indexed", func(t *testing.T) {
|
||||
pk := "aabb1122334455ff"
|
||||
tx := &StoreTx{
|
||||
Hash: "rp1",
|
||||
DecodedJSON: `{"pubKey":"` + pk + `"}`,
|
||||
}
|
||||
store.indexByNode(tx)
|
||||
if len(store.byNode[pk]) != 1 {
|
||||
t.Errorf("expected decoded pubkey indexed, got %d", len(store.byNode[pk]))
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("resolved path pubkeys NOT indexed by indexByNode", func(t *testing.T) {
|
||||
// After #800, indexByNode only handles decoded JSON fields.
|
||||
// Resolved path pubkeys are handled by the decode-window.
|
||||
tx := &StoreTx{
|
||||
Hash: "rp2",
|
||||
DecodedJSON: `{"type":"CHAN","text":"hello"}`, // no pubKey fields
|
||||
}
|
||||
store.indexByNode(tx)
|
||||
// No new entries expected since there are no decoded pubkeys
|
||||
})
|
||||
|
||||
t.Run("dedup within decoded JSON", func(t *testing.T) {
|
||||
pk := "dedup0test0pk1234"
|
||||
tx := &StoreTx{
|
||||
Hash: "rp4",
|
||||
DecodedJSON: `{"pubKey":"` + pk + `","destPubKey":"` + pk + `"}`,
|
||||
}
|
||||
store.indexByNode(tx)
|
||||
if len(store.byNode[pk]) != 1 {
|
||||
t.Errorf("expected dedup to keep 1 entry, got %d", len(store.byNode[pk]))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// BenchmarkIndexByNode measures indexByNode performance with and without pubkey
|
||||
// fields to demonstrate the strings.Contains pre-check optimization.
|
||||
func BenchmarkIndexByNode(b *testing.B) {
|
||||
@@ -4616,53 +4339,3 @@ func TestHandleBatchObservations(t *testing.T) {
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// TestIngestTraceBroadcastIncludesPath verifies that TRACE packet broadcasts
|
||||
// include decoded.path with hopsCompleted (#683).
|
||||
func TestIngestTraceBroadcastIncludesPath(t *testing.T) {
|
||||
db := setupTestDB(t)
|
||||
defer db.Close()
|
||||
store := NewPacketStore(db, nil)
|
||||
store.Load()
|
||||
|
||||
initialMax := store.MaxTransmissionID()
|
||||
|
||||
// TRACE packet: header=0x25, path_byte=0x02 (2 SNR bytes), 2 SNR bytes,
|
||||
// then payload: tag(4) + authCode(4) + flags(1) + 4 hop hashes (1-byte each)
|
||||
traceHex := "2502AABB010000000200000000DEADBEEF"
|
||||
now := time.Now().UTC().Format(time.RFC3339)
|
||||
db.conn.Exec(`INSERT INTO transmissions (raw_hex, hash, first_seen, route_type, payload_type, decoded_json)
|
||||
VALUES (?, 'tracehash683test', ?, 1, 9, '')`, traceHex, now)
|
||||
newTxID := 0
|
||||
db.conn.QueryRow("SELECT MAX(id) FROM transmissions").Scan(&newTxID)
|
||||
db.conn.Exec(`INSERT INTO observations (transmission_id, observer_idx, snr, rssi, path_json, timestamp)
|
||||
VALUES (?, 1, 5.0, -100, '["aa"]', ?)`, newTxID, time.Now().Unix())
|
||||
|
||||
broadcastMaps, _ := store.IngestNewFromDB(initialMax, 100)
|
||||
if len(broadcastMaps) < 1 {
|
||||
t.Fatal("expected >=1 broadcast maps")
|
||||
}
|
||||
|
||||
bm := broadcastMaps[0]
|
||||
decoded, ok := bm["decoded"].(map[string]interface{})
|
||||
if !ok {
|
||||
t.Fatal("broadcast map missing 'decoded'")
|
||||
}
|
||||
|
||||
pathObj, ok := decoded["path"]
|
||||
if !ok {
|
||||
t.Fatal("decoded missing 'path' for TRACE packet — hopsCompleted not delivered to frontend (#683)")
|
||||
}
|
||||
|
||||
// The path should be a Path struct with HopsCompleted = 2
|
||||
pathStruct, ok := pathObj.(Path)
|
||||
if !ok {
|
||||
t.Fatalf("expected Path struct, got %T", pathObj)
|
||||
}
|
||||
if pathStruct.HopsCompleted == nil {
|
||||
t.Fatal("path.HopsCompleted is nil for TRACE packet")
|
||||
}
|
||||
if *pathStruct.HopsCompleted != 2 {
|
||||
t.Errorf("expected hopsCompleted=2, got %d", *pathStruct.HopsCompleted)
|
||||
}
|
||||
}
|
||||
|
||||
+80
-352
@@ -8,7 +8,6 @@ import (
|
||||
"math"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
_ "modernc.org/sqlite"
|
||||
@@ -20,13 +19,6 @@ type DB struct {
|
||||
path string // filesystem path to the database file
|
||||
isV3 bool // v3 schema: observer_idx in observations (vs observer_id in v2)
|
||||
hasResolvedPath bool // observations table has resolved_path column
|
||||
hasObsRawHex bool // observations table has raw_hex column (#881)
|
||||
|
||||
// Channel list cache (60s TTL) — avoids repeated GROUP BY scans (#762)
|
||||
channelsCacheMu sync.Mutex
|
||||
channelsCacheKey string
|
||||
channelsCacheRes []map[string]interface{}
|
||||
channelsCacheExp time.Time
|
||||
}
|
||||
|
||||
// OpenDB opens a read-only SQLite connection with WAL mode.
|
||||
@@ -77,9 +69,6 @@ func (db *DB) detectSchema() {
|
||||
if colName == "resolved_path" {
|
||||
db.hasResolvedPath = true
|
||||
}
|
||||
if colName == "raw_hex" {
|
||||
db.hasObsRawHex = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -388,7 +377,6 @@ type PacketQuery struct {
|
||||
Until string
|
||||
Region string
|
||||
Node string
|
||||
Channel string // channel_hash filter (#812). Plain names like "#test"/"public" or "enc_<HEX>" for encrypted
|
||||
Order string // ASC or DESC
|
||||
ExpandObservations bool // when true, include observation sub-maps in txToMap output
|
||||
}
|
||||
@@ -625,11 +613,6 @@ func (db *DB) buildTransmissionWhere(q PacketQuery) ([]string, []interface{}) {
|
||||
where = append(where, "t.decoded_json LIKE ?")
|
||||
args = append(args, "%"+pk+"%")
|
||||
}
|
||||
if q.Channel != "" {
|
||||
// channel_hash column is indexed for payload_type = 5; filter is exact match.
|
||||
where = append(where, "t.channel_hash = ?")
|
||||
args = append(args, q.Channel)
|
||||
}
|
||||
if q.Observer != "" {
|
||||
ids := strings.Split(q.Observer, ",")
|
||||
placeholders := strings.Repeat("?,", len(ids))
|
||||
@@ -696,20 +679,6 @@ func (db *DB) GetPacketByHash(hash string) (map[string]interface{}, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// GetObservationsForHash returns all observations for the transmission with
|
||||
// the given content hash. Used as a fallback by the packet-detail handler
|
||||
// when the in-memory PacketStore has pruned the entry but the DB still has it.
|
||||
func (db *DB) GetObservationsForHash(hash string) []map[string]interface{} {
|
||||
var txID int
|
||||
err := db.conn.QueryRow("SELECT id FROM transmissions WHERE hash = ?",
|
||||
strings.ToLower(hash)).Scan(&txID)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
obsByTx := db.getObservationsForTransmissions([]int{txID})
|
||||
return obsByTx[txID]
|
||||
}
|
||||
|
||||
|
||||
// GetNodes returns filtered, paginated node list.
|
||||
func (db *DB) GetNodes(limit, offset int, role, search, before, lastHeard, sortBy, region string) ([]map[string]interface{}, int, map[string]int, error) {
|
||||
@@ -1184,219 +1153,69 @@ func (db *DB) GetTraces(hash string) ([]map[string]interface{}, error) {
|
||||
// Queries transmissions directly (not a VIEW) to avoid observation-level
|
||||
// duplicates that could cause stale lastMessage when an older message has
|
||||
// a later re-observation timestamp.
|
||||
func (db *DB) GetChannels(region ...string) ([]map[string]interface{}, error) {
|
||||
regionParam := ""
|
||||
if len(region) > 0 {
|
||||
regionParam = region[0]
|
||||
}
|
||||
|
||||
// Check cache (60s TTL)
|
||||
db.channelsCacheMu.Lock()
|
||||
if db.channelsCacheRes != nil && db.channelsCacheKey == regionParam && time.Now().Before(db.channelsCacheExp) {
|
||||
res := db.channelsCacheRes
|
||||
db.channelsCacheMu.Unlock()
|
||||
return res, nil
|
||||
}
|
||||
db.channelsCacheMu.Unlock()
|
||||
|
||||
regionCodes := normalizeRegionCodes(regionParam)
|
||||
|
||||
var querySQL string
|
||||
args := make([]interface{}, 0, len(regionCodes))
|
||||
|
||||
if len(regionCodes) > 0 {
|
||||
placeholders := make([]string, len(regionCodes))
|
||||
for i, code := range regionCodes {
|
||||
placeholders[i] = "?"
|
||||
args = append(args, code)
|
||||
}
|
||||
regionPlaceholder := strings.Join(placeholders, ",")
|
||||
if db.isV3 {
|
||||
querySQL = fmt.Sprintf(`SELECT t.channel_hash,
|
||||
COUNT(*) AS msg_count,
|
||||
MAX(t.first_seen) AS last_activity,
|
||||
(SELECT t2.decoded_json FROM transmissions t2
|
||||
WHERE t2.channel_hash = t.channel_hash AND t2.payload_type = 5
|
||||
ORDER BY t2.first_seen DESC LIMIT 1) AS sample_json
|
||||
FROM transmissions t
|
||||
JOIN observations o ON o.transmission_id = t.id
|
||||
LEFT JOIN observers obs ON obs.rowid = o.observer_idx
|
||||
WHERE t.payload_type = 5
|
||||
AND t.channel_hash IS NOT NULL
|
||||
AND t.channel_hash NOT LIKE 'enc_%%'
|
||||
AND obs.rowid IS NOT NULL AND UPPER(TRIM(obs.iata)) IN (%s)
|
||||
GROUP BY t.channel_hash
|
||||
ORDER BY last_activity DESC`, regionPlaceholder)
|
||||
} else {
|
||||
querySQL = fmt.Sprintf(`SELECT t.channel_hash,
|
||||
COUNT(*) AS msg_count,
|
||||
MAX(t.first_seen) AS last_activity,
|
||||
(SELECT t2.decoded_json FROM transmissions t2
|
||||
WHERE t2.channel_hash = t.channel_hash AND t2.payload_type = 5
|
||||
ORDER BY t2.first_seen DESC LIMIT 1) AS sample_json
|
||||
FROM transmissions t
|
||||
JOIN observations o ON o.transmission_id = t.id
|
||||
WHERE t.payload_type = 5
|
||||
AND t.channel_hash IS NOT NULL
|
||||
AND t.channel_hash NOT LIKE 'enc_%%'
|
||||
AND EXISTS (
|
||||
SELECT 1 FROM observers obs
|
||||
WHERE obs.id = o.observer_id
|
||||
AND UPPER(TRIM(obs.iata)) IN (%s)
|
||||
)
|
||||
GROUP BY t.channel_hash
|
||||
ORDER BY last_activity DESC`, regionPlaceholder)
|
||||
}
|
||||
} else {
|
||||
querySQL = `SELECT channel_hash,
|
||||
COUNT(*) AS msg_count,
|
||||
MAX(first_seen) AS last_activity,
|
||||
(SELECT t2.decoded_json FROM transmissions t2
|
||||
WHERE t2.channel_hash = t.channel_hash AND t2.payload_type = 5
|
||||
ORDER BY t2.first_seen DESC LIMIT 1) AS sample_json
|
||||
FROM transmissions t
|
||||
WHERE payload_type = 5
|
||||
AND channel_hash IS NOT NULL
|
||||
AND channel_hash NOT LIKE 'enc_%%'
|
||||
GROUP BY channel_hash
|
||||
ORDER BY last_activity DESC`
|
||||
}
|
||||
|
||||
rows, err := db.conn.Query(querySQL, args...)
|
||||
func (db *DB) GetChannels() ([]map[string]interface{}, error) {
|
||||
rows, err := db.conn.Query(`SELECT decoded_json, first_seen FROM transmissions WHERE payload_type = 5 ORDER BY first_seen ASC`)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
channels := make([]map[string]interface{}, 0)
|
||||
channelMap := map[string]map[string]interface{}{}
|
||||
for rows.Next() {
|
||||
var chHash, lastActivity, sampleJSON sql.NullString
|
||||
var msgCount int
|
||||
if err := rows.Scan(&chHash, &msgCount, &lastActivity, &sampleJSON); err != nil {
|
||||
var dj, fs sql.NullString
|
||||
rows.Scan(&dj, &fs)
|
||||
if !dj.Valid {
|
||||
continue
|
||||
}
|
||||
channelName := nullStr(chHash)
|
||||
var decoded map[string]interface{}
|
||||
if json.Unmarshal([]byte(dj.String), &decoded) != nil {
|
||||
continue
|
||||
}
|
||||
dtype, _ := decoded["type"].(string)
|
||||
if dtype != "CHAN" {
|
||||
continue
|
||||
}
|
||||
// Filter out garbage-decrypted channel names/messages (pre-#197 data still in DB)
|
||||
chanStr, _ := decoded["channel"].(string)
|
||||
textStr, _ := decoded["text"].(string)
|
||||
if hasGarbageChars(chanStr) || hasGarbageChars(textStr) {
|
||||
continue
|
||||
}
|
||||
channelName, _ := decoded["channel"].(string)
|
||||
if channelName == "" {
|
||||
continue
|
||||
channelName = "unknown"
|
||||
}
|
||||
key := channelName
|
||||
|
||||
var lastMessage, lastSender interface{}
|
||||
if sampleJSON.Valid {
|
||||
var decoded map[string]interface{}
|
||||
if json.Unmarshal([]byte(sampleJSON.String), &decoded) == nil {
|
||||
if text, ok := decoded["text"].(string); ok && text != "" {
|
||||
idx := strings.Index(text, ": ")
|
||||
if idx > 0 {
|
||||
lastMessage = text[idx+2:]
|
||||
} else {
|
||||
lastMessage = text
|
||||
}
|
||||
if sender, ok := decoded["sender"].(string); ok {
|
||||
lastSender = sender
|
||||
}
|
||||
}
|
||||
ch, exists := channelMap[key]
|
||||
if !exists {
|
||||
ch = map[string]interface{}{
|
||||
"hash": key, "name": channelName,
|
||||
"lastMessage": nil, "lastSender": nil,
|
||||
"messageCount": 0, "lastActivity": nullStr(fs),
|
||||
}
|
||||
channelMap[key] = ch
|
||||
}
|
||||
ch["messageCount"] = ch["messageCount"].(int) + 1
|
||||
if fs.Valid {
|
||||
ch["lastActivity"] = fs.String
|
||||
}
|
||||
if text, ok := decoded["text"].(string); ok && text != "" {
|
||||
idx := strings.Index(text, ": ")
|
||||
if idx > 0 {
|
||||
ch["lastMessage"] = text[idx+2:]
|
||||
} else {
|
||||
ch["lastMessage"] = text
|
||||
}
|
||||
if sender, ok := decoded["sender"].(string); ok {
|
||||
ch["lastSender"] = sender
|
||||
}
|
||||
}
|
||||
|
||||
channels = append(channels, map[string]interface{}{
|
||||
"hash": channelName, "name": channelName,
|
||||
"lastMessage": lastMessage, "lastSender": lastSender,
|
||||
"messageCount": msgCount, "lastActivity": nullStr(lastActivity),
|
||||
})
|
||||
}
|
||||
|
||||
// Store in cache (60s TTL)
|
||||
db.channelsCacheMu.Lock()
|
||||
db.channelsCacheRes = channels
|
||||
db.channelsCacheKey = regionParam
|
||||
db.channelsCacheExp = time.Now().Add(60 * time.Second)
|
||||
db.channelsCacheMu.Unlock()
|
||||
|
||||
return channels, nil
|
||||
}
|
||||
|
||||
// GetEncryptedChannels returns channels where all messages are undecryptable (no key).
|
||||
// Uses channel_hash column (prefixed with 'enc_') for fast grouped queries.
|
||||
func (db *DB) GetEncryptedChannels(region ...string) ([]map[string]interface{}, error) {
|
||||
regionParam := ""
|
||||
if len(region) > 0 {
|
||||
regionParam = region[0]
|
||||
}
|
||||
regionCodes := normalizeRegionCodes(regionParam)
|
||||
|
||||
var querySQL string
|
||||
args := make([]interface{}, 0, len(regionCodes))
|
||||
|
||||
if len(regionCodes) > 0 {
|
||||
placeholders := make([]string, len(regionCodes))
|
||||
for i, code := range regionCodes {
|
||||
placeholders[i] = "?"
|
||||
args = append(args, code)
|
||||
}
|
||||
regionPlaceholder := strings.Join(placeholders, ",")
|
||||
if db.isV3 {
|
||||
querySQL = fmt.Sprintf(`SELECT t.channel_hash,
|
||||
COUNT(*) AS msg_count,
|
||||
MAX(t.first_seen) AS last_activity
|
||||
FROM transmissions t
|
||||
JOIN observations o ON o.transmission_id = t.id
|
||||
LEFT JOIN observers obs ON obs.rowid = o.observer_idx
|
||||
WHERE t.payload_type = 5
|
||||
AND t.channel_hash LIKE 'enc_%%'
|
||||
AND obs.rowid IS NOT NULL AND UPPER(TRIM(obs.iata)) IN (%s)
|
||||
GROUP BY t.channel_hash
|
||||
ORDER BY last_activity DESC`, regionPlaceholder)
|
||||
} else {
|
||||
querySQL = fmt.Sprintf(`SELECT t.channel_hash,
|
||||
COUNT(*) AS msg_count,
|
||||
MAX(t.first_seen) AS last_activity
|
||||
FROM transmissions t
|
||||
JOIN observations o ON o.transmission_id = t.id
|
||||
WHERE t.payload_type = 5
|
||||
AND t.channel_hash LIKE 'enc_%%'
|
||||
AND EXISTS (
|
||||
SELECT 1 FROM observers obs
|
||||
WHERE obs.id = o.observer_id
|
||||
AND UPPER(TRIM(obs.iata)) IN (%s)
|
||||
)
|
||||
GROUP BY t.channel_hash
|
||||
ORDER BY last_activity DESC`, regionPlaceholder)
|
||||
}
|
||||
} else {
|
||||
querySQL = `SELECT channel_hash,
|
||||
COUNT(*) AS msg_count,
|
||||
MAX(first_seen) AS last_activity
|
||||
FROM transmissions
|
||||
WHERE payload_type = 5
|
||||
AND channel_hash LIKE 'enc_%%'
|
||||
GROUP BY channel_hash
|
||||
ORDER BY last_activity DESC`
|
||||
}
|
||||
|
||||
rows, err := db.conn.Query(querySQL, args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
channels := make([]map[string]interface{}, 0)
|
||||
for rows.Next() {
|
||||
var chHash, lastActivity sql.NullString
|
||||
var msgCount int
|
||||
if err := rows.Scan(&chHash, &msgCount, &lastActivity); err != nil {
|
||||
continue
|
||||
}
|
||||
fullHash := nullStrVal(chHash) // e.g. "enc_3A"
|
||||
hexPart := strings.TrimPrefix(fullHash, "enc_")
|
||||
channels = append(channels, map[string]interface{}{
|
||||
"hash": fullHash,
|
||||
"name": "Encrypted (0x" + hexPart + ")",
|
||||
"lastMessage": nil,
|
||||
"lastSender": nil,
|
||||
"messageCount": msgCount,
|
||||
"lastActivity": nullStr(lastActivity),
|
||||
"encrypted": true,
|
||||
})
|
||||
channels := make([]map[string]interface{}, 0, len(channelMap))
|
||||
for _, ch := range channelMap {
|
||||
channels = append(channels, ch)
|
||||
}
|
||||
return channels, nil
|
||||
}
|
||||
@@ -1425,16 +1244,15 @@ func (db *DB) GetChannelMessages(channelHash string, limit, offset int, region .
|
||||
regionPlaceholders = strings.Join(placeholders, ",")
|
||||
}
|
||||
|
||||
// Fetch messages with channel_hash filter (pagination applied in Go after dedup)
|
||||
var querySQL string
|
||||
args := []interface{}{channelHash}
|
||||
args := make([]interface{}, 0, len(regionArgs))
|
||||
if db.isV3 {
|
||||
querySQL = `SELECT o.id, t.hash, t.decoded_json, t.first_seen,
|
||||
obs.id, obs.name, o.snr, o.path_json
|
||||
FROM observations o
|
||||
JOIN transmissions t ON t.id = o.transmission_id
|
||||
LEFT JOIN observers obs ON obs.rowid = o.observer_idx
|
||||
WHERE t.channel_hash = ? AND t.payload_type = 5`
|
||||
WHERE t.payload_type = 5`
|
||||
if len(regionCodes) > 0 {
|
||||
querySQL += fmt.Sprintf(" AND obs.rowid IS NOT NULL AND UPPER(TRIM(obs.iata)) IN (%s)", regionPlaceholders)
|
||||
args = append(args, regionArgs...)
|
||||
@@ -1446,11 +1264,14 @@ func (db *DB) GetChannelMessages(channelHash string, limit, offset int, region .
|
||||
o.observer_id, o.observer_name, o.snr, o.path_json
|
||||
FROM observations o
|
||||
JOIN transmissions t ON t.id = o.transmission_id
|
||||
WHERE t.channel_hash = ? AND t.payload_type = 5`
|
||||
WHERE t.payload_type = 5`
|
||||
if len(regionCodes) > 0 {
|
||||
querySQL += fmt.Sprintf(` AND EXISTS (
|
||||
SELECT 1 FROM observers obs WHERE obs.id = o.observer_id
|
||||
AND UPPER(TRIM(obs.iata)) IN (%s))`, regionPlaceholders)
|
||||
SELECT 1
|
||||
FROM observers obs
|
||||
WHERE obs.id = o.observer_id
|
||||
AND UPPER(TRIM(obs.iata)) IN (%s)
|
||||
)`, regionPlaceholders)
|
||||
args = append(args, regionArgs...)
|
||||
}
|
||||
querySQL += `
|
||||
@@ -1482,6 +1303,17 @@ func (db *DB) GetChannelMessages(channelHash string, limit, offset int, region .
|
||||
if json.Unmarshal([]byte(dj.String), &decoded) != nil {
|
||||
continue
|
||||
}
|
||||
dtype, _ := decoded["type"].(string)
|
||||
if dtype != "CHAN" {
|
||||
continue
|
||||
}
|
||||
ch, _ := decoded["channel"].(string)
|
||||
if ch == "" {
|
||||
ch = "unknown"
|
||||
}
|
||||
if ch != channelHash {
|
||||
continue
|
||||
}
|
||||
|
||||
text, _ := decoded["text"].(string)
|
||||
sender, _ := decoded["sender"].(string)
|
||||
@@ -1541,18 +1373,18 @@ func (db *DB) GetChannelMessages(channelHash string, limit, offset int, region .
|
||||
}
|
||||
}
|
||||
|
||||
// Return latest messages (tail) with pagination
|
||||
msgTotal := len(msgOrder)
|
||||
start := msgTotal - limit - offset
|
||||
total := len(msgOrder)
|
||||
// Return latest messages (tail)
|
||||
start := total - limit - offset
|
||||
if start < 0 {
|
||||
start = 0
|
||||
}
|
||||
end := msgTotal - offset
|
||||
end := total - offset
|
||||
if end < 0 {
|
||||
end = 0
|
||||
}
|
||||
if end > msgTotal {
|
||||
end = msgTotal
|
||||
if end > total {
|
||||
end = total
|
||||
}
|
||||
|
||||
messages := make([]map[string]interface{}, 0)
|
||||
@@ -1563,7 +1395,7 @@ func (db *DB) GetChannelMessages(channelHash string, limit, offset int, region .
|
||||
messages = append(messages, m.Data)
|
||||
}
|
||||
|
||||
return messages, msgTotal, nil
|
||||
return messages, total, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -1872,10 +1704,12 @@ func nullInt(ni sql.NullInt64) interface{} {
|
||||
// Returns the number of transmissions deleted.
|
||||
// Opens a separate read-write connection since the main connection is read-only.
|
||||
func (db *DB) PruneOldPackets(days int) (int64, error) {
|
||||
rw, err := openRW(db.path)
|
||||
dsn := fmt.Sprintf("file:%s?_journal_mode=WAL&_busy_timeout=10000", db.path)
|
||||
rw, err := sql.Open("sqlite", dsn)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
rw.SetMaxOpenConns(1)
|
||||
defer rw.Close()
|
||||
|
||||
cutoff := time.Now().UTC().AddDate(0, 0, -days).Format(time.RFC3339)
|
||||
@@ -2219,10 +2053,12 @@ func (db *DB) GetMetricsSummary(since string) ([]MetricsSummaryRow, error) {
|
||||
|
||||
// PruneOldMetrics deletes observer_metrics rows older than retentionDays.
|
||||
func (db *DB) PruneOldMetrics(retentionDays int) (int64, error) {
|
||||
rw, err := openRW(db.path)
|
||||
dsn := fmt.Sprintf("file:%s?_journal_mode=WAL&_busy_timeout=10000", db.path)
|
||||
rw, err := sql.Open("sqlite", dsn)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
rw.SetMaxOpenConns(1)
|
||||
defer rw.Close()
|
||||
|
||||
cutoff := time.Now().UTC().AddDate(0, 0, -retentionDays).Format(time.RFC3339)
|
||||
@@ -2236,111 +2072,3 @@ func (db *DB) PruneOldMetrics(retentionDays int) (int64, error) {
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// RemoveStaleObservers marks observers that have not actively sent data in observerDays
|
||||
// as inactive (soft-delete). This preserves JOIN integrity for observations.observer_idx
|
||||
// and observer_metrics.observer_id — historical data still references the correct observer.
|
||||
// An observer must actively send data to stay listed — being seen by another node does not count.
|
||||
// observerDays <= -1 means never remove (keep forever).
|
||||
func (db *DB) RemoveStaleObservers(observerDays int) (int64, error) {
|
||||
if observerDays <= -1 {
|
||||
return 0, nil // keep forever
|
||||
}
|
||||
rw, err := openRW(db.path)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer rw.Close()
|
||||
|
||||
cutoff := time.Now().UTC().AddDate(0, 0, -observerDays).Format(time.RFC3339)
|
||||
res, err := rw.Exec(`UPDATE observers SET inactive = 1 WHERE last_seen < ? AND (inactive IS NULL OR inactive = 0)`, cutoff)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
n, _ := res.RowsAffected()
|
||||
if n > 0 {
|
||||
// Clean up orphaned metrics for now-inactive observers
|
||||
rw.Exec(`DELETE FROM observer_metrics WHERE observer_id IN (SELECT id FROM observers WHERE inactive = 1)`)
|
||||
log.Printf("[observers] Marked %d observer(s) as inactive (not seen in %d days)", n, observerDays)
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// TouchNodeLastSeen updates last_seen for a node identified by full public key.
|
||||
// Only updates if the new timestamp is newer than the existing value (or NULL).
|
||||
// Returns nil even if no rows are affected (node doesn't exist).
|
||||
func (db *DB) TouchNodeLastSeen(pubkey string, timestamp string) error {
|
||||
_, err := db.conn.Exec(
|
||||
"UPDATE nodes SET last_seen = ? WHERE public_key = ? AND (last_seen IS NULL OR last_seen < ?)",
|
||||
timestamp, pubkey, timestamp,
|
||||
)
|
||||
return err
|
||||
}
|
||||
|
||||
// GetDroppedPackets returns recently dropped packets, newest first.
|
||||
func (db *DB) GetDroppedPackets(limit int, observerID, nodePubkey string) ([]map[string]interface{}, error) {
|
||||
if limit <= 0 || limit > 500 {
|
||||
limit = 100
|
||||
}
|
||||
query := `SELECT id, hash, raw_hex, reason, observer_id, observer_name, node_pubkey, node_name, dropped_at FROM dropped_packets`
|
||||
var conditions []string
|
||||
var args []interface{}
|
||||
if observerID != "" {
|
||||
conditions = append(conditions, "observer_id = ?")
|
||||
args = append(args, observerID)
|
||||
}
|
||||
if nodePubkey != "" {
|
||||
conditions = append(conditions, "node_pubkey = ?")
|
||||
args = append(args, nodePubkey)
|
||||
}
|
||||
if len(conditions) > 0 {
|
||||
query += " WHERE " + strings.Join(conditions, " AND ")
|
||||
}
|
||||
query += " ORDER BY dropped_at DESC LIMIT ?"
|
||||
args = append(args, limit)
|
||||
|
||||
rows, err := db.conn.Query(query, args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var results []map[string]interface{}
|
||||
for rows.Next() {
|
||||
var id int
|
||||
var hash, rawHex, reason, obsID, obsName, pubkey, name, droppedAt sql.NullString
|
||||
if err := rows.Scan(&id, &hash, &rawHex, &reason, &obsID, &obsName, &pubkey, &name, &droppedAt); err != nil {
|
||||
continue
|
||||
}
|
||||
row := map[string]interface{}{
|
||||
"id": id,
|
||||
"hash": nullStr(hash),
|
||||
"reason": nullStr(reason),
|
||||
"observer_id": nullStr(obsID),
|
||||
"observer_name": nullStr(obsName),
|
||||
"node_pubkey": nullStr(pubkey),
|
||||
"node_name": nullStr(name),
|
||||
"dropped_at": nullStr(droppedAt),
|
||||
}
|
||||
// Only include raw_hex if explicitly requested (it's large)
|
||||
if rawHex.Valid {
|
||||
row["raw_hex"] = rawHex.String
|
||||
}
|
||||
results = append(results, row)
|
||||
}
|
||||
if results == nil {
|
||||
results = []map[string]interface{}{}
|
||||
}
|
||||
return results, nil
|
||||
}
|
||||
|
||||
// GetSignatureDropCount returns the total number of dropped packets.
|
||||
func (db *DB) GetSignatureDropCount() int64 {
|
||||
var count int64
|
||||
// Table may not exist yet if ingestor hasn't run the migration
|
||||
err := db.conn.QueryRow("SELECT COUNT(*) FROM dropped_packets").Scan(&count)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
+33
-150
@@ -60,7 +60,6 @@ func setupTestDB(t *testing.T) *DB {
|
||||
payload_type INTEGER,
|
||||
payload_version INTEGER,
|
||||
decoded_json TEXT,
|
||||
channel_hash TEXT DEFAULT NULL,
|
||||
created_at TEXT DEFAULT (datetime('now'))
|
||||
);
|
||||
|
||||
@@ -73,9 +72,7 @@ func setupTestDB(t *testing.T) *DB {
|
||||
rssi REAL,
|
||||
score INTEGER,
|
||||
path_json TEXT,
|
||||
timestamp INTEGER NOT NULL,
|
||||
resolved_path TEXT,
|
||||
raw_hex TEXT
|
||||
timestamp INTEGER NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS observer_metrics (
|
||||
@@ -98,7 +95,7 @@ func setupTestDB(t *testing.T) *DB {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
return &DB{conn: conn, isV3: true, hasResolvedPath: true}
|
||||
return &DB{conn: conn, isV3: true}
|
||||
}
|
||||
|
||||
func seedTestData(t *testing.T, db *DB) {
|
||||
@@ -126,24 +123,23 @@ func seedTestData(t *testing.T, db *DB) {
|
||||
VALUES ('1122334455667788', 'TestRoom', 'room', 37.4, -121.9, ?, '2026-01-01T00:00:00Z', 5)`, twoDaysAgo)
|
||||
|
||||
// Seed transmissions
|
||||
db.conn.Exec(`INSERT INTO transmissions (raw_hex, hash, first_seen, route_type, payload_type, decoded_json, channel_hash)
|
||||
VALUES ('AABB', 'abc123def4567890', ?, 1, 4, '{"pubKey":"aabbccdd11223344","name":"TestRepeater","type":"ADVERT","timestamp":1700000000,"timestampISO":"2023-11-14T22:13:20.000Z","signature":"abcdef","flags":{"isRepeater":true},"lat":37.5,"lon":-122.0}', '#test')`, recent)
|
||||
db.conn.Exec(`INSERT INTO transmissions (raw_hex, hash, first_seen, route_type, payload_type, decoded_json, channel_hash)
|
||||
VALUES ('CCDD', '1234567890abcdef', ?, 1, 5, '{"type":"CHAN","channel":"#test","text":"Hello: World","sender":"TestUser"}', '#test')`, yesterday)
|
||||
db.conn.Exec(`INSERT INTO transmissions (raw_hex, hash, first_seen, route_type, payload_type, decoded_json)
|
||||
VALUES ('AABB', 'abc123def4567890', ?, 1, 4, '{"pubKey":"aabbccdd11223344","name":"TestRepeater","type":"ADVERT","timestamp":1700000000,"timestampISO":"2023-11-14T22:13:20.000Z","signature":"abcdef","flags":{"isRepeater":true},"lat":37.5,"lon":-122.0}')`, recent)
|
||||
db.conn.Exec(`INSERT INTO transmissions (raw_hex, hash, first_seen, route_type, payload_type, decoded_json)
|
||||
VALUES ('CCDD', '1234567890abcdef', ?, 1, 5, '{"type":"CHAN","channel":"#test","text":"Hello: World","sender":"TestUser"}')`, yesterday)
|
||||
// Second ADVERT for same node with different hash_size (raw_hex byte 0x1F → hs=1 vs 0xBB → hs=3)
|
||||
db.conn.Exec(`INSERT INTO transmissions (raw_hex, hash, first_seen, route_type, payload_type, decoded_json)
|
||||
VALUES ('AA1F', 'def456abc1230099', ?, 1, 4, '{"pubKey":"aabbccdd11223344","name":"TestRepeater","type":"ADVERT","timestamp":1700000100,"timestampISO":"2023-11-14T22:14:40.000Z","signature":"fedcba","flags":{"isRepeater":true},"lat":37.5,"lon":-122.0}')`, yesterday)
|
||||
|
||||
// Seed observations (use unix timestamps)
|
||||
// resolved_path contains full pubkeys parallel to path_json hops
|
||||
db.conn.Exec(`INSERT INTO observations (transmission_id, observer_idx, snr, rssi, path_json, timestamp, resolved_path)
|
||||
VALUES (1, 1, 12.5, -90, '["aa","bb"]', ?, '["aabbccdd11223344","eeff00112233aabb"]')`, recentEpoch)
|
||||
db.conn.Exec(`INSERT INTO observations (transmission_id, observer_idx, snr, rssi, path_json, timestamp, resolved_path)
|
||||
VALUES (1, 2, 8.0, -95, '["aa"]', ?, '["aabbccdd11223344"]')`, recentEpoch-100)
|
||||
db.conn.Exec(`INSERT INTO observations (transmission_id, observer_idx, snr, rssi, path_json, timestamp)
|
||||
VALUES (1, 1, 12.5, -90, '["aa","bb"]', ?)`, recentEpoch)
|
||||
db.conn.Exec(`INSERT INTO observations (transmission_id, observer_idx, snr, rssi, path_json, timestamp)
|
||||
VALUES (1, 2, 8.0, -95, '["aa"]', ?)`, recentEpoch-100)
|
||||
db.conn.Exec(`INSERT INTO observations (transmission_id, observer_idx, snr, rssi, path_json, timestamp)
|
||||
VALUES (2, 1, 15.0, -85, '[]', ?)`, yesterdayEpoch)
|
||||
db.conn.Exec(`INSERT INTO observations (transmission_id, observer_idx, snr, rssi, path_json, timestamp, resolved_path)
|
||||
VALUES (3, 1, 10.0, -92, '["cc"]', ?, '["1122334455667788"]')`, yesterdayEpoch)
|
||||
db.conn.Exec(`INSERT INTO observations (transmission_id, observer_idx, snr, rssi, path_json, timestamp)
|
||||
VALUES (3, 1, 10.0, -92, '["cc"]', ?)`, yesterdayEpoch)
|
||||
}
|
||||
|
||||
func TestGetStats(t *testing.T) {
|
||||
@@ -737,12 +733,12 @@ func TestGetChannelMessagesRegionFiltering(t *testing.T) {
|
||||
|
||||
db.conn.Exec(`INSERT INTO observers (id, name, iata) VALUES ('obs1', 'Observer One', 'SJC')`)
|
||||
db.conn.Exec(`INSERT INTO observers (id, name, iata) VALUES ('obs2', 'Observer Two', ' sfo ')`)
|
||||
db.conn.Exec(`INSERT INTO transmissions (raw_hex, hash, first_seen, route_type, payload_type, decoded_json, channel_hash)
|
||||
db.conn.Exec(`INSERT INTO transmissions (raw_hex, hash, first_seen, route_type, payload_type, decoded_json)
|
||||
VALUES ('AA', 'chanregion0001', ?, 1, 5,
|
||||
'{"type":"CHAN","channel":"#region","text":"SjcUser: One","sender":"SjcUser"}', '#region')`, ts1)
|
||||
db.conn.Exec(`INSERT INTO transmissions (raw_hex, hash, first_seen, route_type, payload_type, decoded_json, channel_hash)
|
||||
'{"type":"CHAN","channel":"#region","text":"SjcUser: One","sender":"SjcUser"}')`, ts1)
|
||||
db.conn.Exec(`INSERT INTO transmissions (raw_hex, hash, first_seen, route_type, payload_type, decoded_json)
|
||||
VALUES ('BB', 'chanregion0002', ?, 1, 5,
|
||||
'{"type":"CHAN","channel":"#region","text":"SfoUser: Two","sender":"SfoUser"}', '#region')`, ts2)
|
||||
'{"type":"CHAN","channel":"#region","text":"SfoUser: Two","sender":"SfoUser"}')`, ts2)
|
||||
db.conn.Exec(`INSERT INTO observations (transmission_id, observer_idx, snr, rssi, path_json, timestamp)
|
||||
VALUES (1, 1, 10.0, -90, '[]', ?)`, epoch1)
|
||||
db.conn.Exec(`INSERT INTO observations (transmission_id, observer_idx, snr, rssi, path_json, timestamp)
|
||||
@@ -1121,7 +1117,6 @@ func setupTestDBV2(t *testing.T) *DB {
|
||||
payload_type INTEGER,
|
||||
payload_version INTEGER,
|
||||
decoded_json TEXT,
|
||||
channel_hash TEXT DEFAULT NULL,
|
||||
created_at TEXT DEFAULT (datetime('now'))
|
||||
);
|
||||
|
||||
@@ -1135,8 +1130,7 @@ func setupTestDBV2(t *testing.T) *DB {
|
||||
rssi REAL,
|
||||
score INTEGER,
|
||||
path_json TEXT,
|
||||
timestamp INTEGER NOT NULL,
|
||||
raw_hex TEXT
|
||||
timestamp INTEGER NOT NULL
|
||||
);
|
||||
`
|
||||
if _, err := conn.Exec(schema); err != nil {
|
||||
@@ -1206,12 +1200,12 @@ func TestGetChannelMessagesDedup(t *testing.T) {
|
||||
db.conn.Exec(`INSERT INTO observers (id, name, iata) VALUES ('obs2', 'Observer Two', 'SFO')`)
|
||||
|
||||
// Insert two transmissions with same hash to test dedup
|
||||
db.conn.Exec(`INSERT INTO transmissions (raw_hex, hash, first_seen, route_type, payload_type, decoded_json, channel_hash)
|
||||
db.conn.Exec(`INSERT INTO transmissions (raw_hex, hash, first_seen, route_type, payload_type, decoded_json)
|
||||
VALUES ('AA', 'chanmsg00000001', '2026-01-15T10:00:00Z', 1, 5,
|
||||
'{"type":"CHAN","channel":"#general","text":"User1: Hello","sender":"User1"}', '#general')`)
|
||||
db.conn.Exec(`INSERT INTO transmissions (raw_hex, hash, first_seen, route_type, payload_type, decoded_json, channel_hash)
|
||||
'{"type":"CHAN","channel":"#general","text":"User1: Hello","sender":"User1"}')`)
|
||||
db.conn.Exec(`INSERT INTO transmissions (raw_hex, hash, first_seen, route_type, payload_type, decoded_json)
|
||||
VALUES ('BB', 'chanmsg00000002', '2026-01-15T10:01:00Z', 1, 5,
|
||||
'{"type":"CHAN","channel":"#general","text":"User2: World","sender":"User2"}', '#general')`)
|
||||
'{"type":"CHAN","channel":"#general","text":"User2: World","sender":"User2"}')`)
|
||||
|
||||
// Observations: first msg seen by two observers (dedup), second by one
|
||||
db.conn.Exec(`INSERT INTO observations (transmission_id, observer_idx, snr, rssi, path_json, timestamp)
|
||||
@@ -1255,9 +1249,9 @@ func TestGetChannelMessagesNoSender(t *testing.T) {
|
||||
defer db.Close()
|
||||
|
||||
db.conn.Exec(`INSERT INTO observers (id, name, iata) VALUES ('obs1', 'Observer One', 'SJC')`)
|
||||
db.conn.Exec(`INSERT INTO transmissions (raw_hex, hash, first_seen, route_type, payload_type, decoded_json, channel_hash)
|
||||
db.conn.Exec(`INSERT INTO transmissions (raw_hex, hash, first_seen, route_type, payload_type, decoded_json)
|
||||
VALUES ('CC', 'chanmsg00000003', '2026-01-15T10:02:00Z', 1, 5,
|
||||
'{"type":"CHAN","channel":"#noname","text":"plain text no colon"}', '#noname')`)
|
||||
'{"type":"CHAN","channel":"#noname","text":"plain text no colon"}')`)
|
||||
db.conn.Exec(`INSERT INTO observations (transmission_id, observer_idx, snr, rssi, path_json, timestamp)
|
||||
VALUES (1, 1, 12.0, -90, null, 1736935300)`)
|
||||
|
||||
@@ -1360,9 +1354,9 @@ func TestGetChannelMessagesObserverFallback(t *testing.T) {
|
||||
defer db.Close()
|
||||
|
||||
// Observer with ID but no name entry (observer_idx won't match)
|
||||
db.conn.Exec(`INSERT INTO transmissions (raw_hex, hash, first_seen, route_type, payload_type, decoded_json, channel_hash)
|
||||
db.conn.Exec(`INSERT INTO transmissions (raw_hex, hash, first_seen, route_type, payload_type, decoded_json)
|
||||
VALUES ('AA', 'chanmsg00000004', '2026-01-15T10:00:00Z', 1, 5,
|
||||
'{"type":"CHAN","channel":"#obs","text":"Sender: Test","sender":"Sender"}', '#obs')`)
|
||||
'{"type":"CHAN","channel":"#obs","text":"Sender: Test","sender":"Sender"}')`)
|
||||
// Observation without observer (observer_idx = NULL)
|
||||
db.conn.Exec(`INSERT INTO observations (transmission_id, observer_idx, snr, rssi, path_json, timestamp)
|
||||
VALUES (1, NULL, 12.0, -90, null, 1736935200)`)
|
||||
@@ -1384,12 +1378,12 @@ func TestGetChannelsMultiple(t *testing.T) {
|
||||
defer db.Close()
|
||||
|
||||
db.conn.Exec(`INSERT INTO observers (id, name, iata) VALUES ('obs1', 'Observer', 'SJC')`)
|
||||
db.conn.Exec(`INSERT INTO transmissions (raw_hex, hash, first_seen, route_type, payload_type, decoded_json, channel_hash)
|
||||
db.conn.Exec(`INSERT INTO transmissions (raw_hex, hash, first_seen, route_type, payload_type, decoded_json)
|
||||
VALUES ('AA', 'chan1hash', '2026-01-15T10:00:00Z', 1, 5,
|
||||
'{"type":"CHAN","channel":"#alpha","text":"Alice: Hello","sender":"Alice"}', '#alpha')`)
|
||||
db.conn.Exec(`INSERT INTO transmissions (raw_hex, hash, first_seen, route_type, payload_type, decoded_json, channel_hash)
|
||||
'{"type":"CHAN","channel":"#alpha","text":"Alice: Hello","sender":"Alice"}')`)
|
||||
db.conn.Exec(`INSERT INTO transmissions (raw_hex, hash, first_seen, route_type, payload_type, decoded_json)
|
||||
VALUES ('BB', 'chan2hash', '2026-01-15T10:01:00Z', 1, 5,
|
||||
'{"type":"CHAN","channel":"#beta","text":"Bob: World","sender":"Bob"}', '#beta')`)
|
||||
'{"type":"CHAN","channel":"#beta","text":"Bob: World","sender":"Bob"}')`)
|
||||
db.conn.Exec(`INSERT INTO transmissions (raw_hex, hash, first_seen, route_type, payload_type, decoded_json)
|
||||
VALUES ('CC', 'chan3hash', '2026-01-15T10:02:00Z', 1, 5,
|
||||
'{"type":"CHAN","channel":"","text":"No channel"}')`)
|
||||
@@ -1472,13 +1466,13 @@ func TestGetChannelsStaleMessage(t *testing.T) {
|
||||
db.conn.Exec(`INSERT INTO observers (id, name, iata) VALUES ('obs2', 'Observer2', 'SFO')`)
|
||||
|
||||
// Older message (first_seen T1)
|
||||
db.conn.Exec(`INSERT INTO transmissions (raw_hex, hash, first_seen, route_type, payload_type, decoded_json, channel_hash)
|
||||
db.conn.Exec(`INSERT INTO transmissions (raw_hex, hash, first_seen, route_type, payload_type, decoded_json)
|
||||
VALUES ('AA', 'oldhash1', '2026-01-15T10:00:00Z', 1, 5,
|
||||
'{"type":"CHAN","channel":"#test","text":"Alice: Old message","sender":"Alice"}', '#test')`)
|
||||
'{"type":"CHAN","channel":"#test","text":"Alice: Old message","sender":"Alice"}')`)
|
||||
// Newer message (first_seen T2 > T1)
|
||||
db.conn.Exec(`INSERT INTO transmissions (raw_hex, hash, first_seen, route_type, payload_type, decoded_json, channel_hash)
|
||||
db.conn.Exec(`INSERT INTO transmissions (raw_hex, hash, first_seen, route_type, payload_type, decoded_json)
|
||||
VALUES ('BB', 'newhash2', '2026-01-15T10:05:00Z', 1, 5,
|
||||
'{"type":"CHAN","channel":"#test","text":"Bob: New message","sender":"Bob"}', '#test')`)
|
||||
'{"type":"CHAN","channel":"#test","text":"Bob: New message","sender":"Bob"}')`)
|
||||
|
||||
// Observations: older message re-observed AFTER newer message (stale scenario)
|
||||
db.conn.Exec(`INSERT INTO observations (transmission_id, observer_idx, snr, rssi, timestamp)
|
||||
@@ -1508,61 +1502,6 @@ func TestGetChannelsStaleMessage(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetChannelsRegionFiltering(t *testing.T) {
|
||||
db := setupTestDB(t)
|
||||
defer db.Close()
|
||||
|
||||
db.conn.Exec(`INSERT INTO observers (id, name, iata) VALUES ('obs1', 'Observer1', 'SJC')`)
|
||||
db.conn.Exec(`INSERT INTO observers (id, name, iata) VALUES ('obs2', 'Observer2', 'SFO')`)
|
||||
|
||||
// Channel message seen only in SJC
|
||||
db.conn.Exec(`INSERT INTO transmissions (raw_hex, hash, first_seen, route_type, payload_type, decoded_json, channel_hash)
|
||||
VALUES ('AA', 'hash1', '2026-01-15T10:00:00Z', 1, 5,
|
||||
'{"type":"CHAN","channel":"#sjc-only","text":"Alice: Hello SJC","sender":"Alice"}', '#sjc-only')`)
|
||||
db.conn.Exec(`INSERT INTO observations (transmission_id, observer_idx, snr, rssi, timestamp)
|
||||
VALUES (1, 1, 12.0, -90, 1736935200)`)
|
||||
|
||||
// Channel message seen only in SFO
|
||||
db.conn.Exec(`INSERT INTO transmissions (raw_hex, hash, first_seen, route_type, payload_type, decoded_json, channel_hash)
|
||||
VALUES ('BB', 'hash2', '2026-01-15T10:05:00Z', 1, 5,
|
||||
'{"type":"CHAN","channel":"#sfo-only","text":"Bob: Hello SFO","sender":"Bob"}', '#sfo-only')`)
|
||||
db.conn.Exec(`INSERT INTO observations (transmission_id, observer_idx, snr, rssi, timestamp)
|
||||
VALUES (2, 2, 14.0, -88, 1736935500)`)
|
||||
|
||||
// No region filter — both channels
|
||||
all, err := db.GetChannels()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(all) != 2 {
|
||||
t.Fatalf("expected 2 channels without region filter, got %d", len(all))
|
||||
}
|
||||
|
||||
// Filter SJC — only #sjc-only
|
||||
sjc, err := db.GetChannels("SJC")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(sjc) != 1 {
|
||||
t.Fatalf("expected 1 channel for SJC, got %d", len(sjc))
|
||||
}
|
||||
if sjc[0]["name"] != "#sjc-only" {
|
||||
t.Errorf("expected channel '#sjc-only', got %q", sjc[0]["name"])
|
||||
}
|
||||
|
||||
// Filter SFO — only #sfo-only
|
||||
sfo, err := db.GetChannels("SFO")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(sfo) != 1 {
|
||||
t.Fatalf("expected 1 channel for SFO, got %d", len(sfo))
|
||||
}
|
||||
if sfo[0]["name"] != "#sfo-only" {
|
||||
t.Errorf("expected channel '#sfo-only', got %q", sfo[0]["name"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestNodeTelemetryFields(t *testing.T) {
|
||||
db := setupTestDB(t)
|
||||
defer db.Close()
|
||||
@@ -1977,59 +1916,3 @@ func TestParseWindowDuration(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestPerObservationRawHexEnrich verifies enrichObs returns per-observation raw_hex
|
||||
// when available, falling back to transmission raw_hex when NULL (#881).
|
||||
func TestPerObservationRawHexEnrich(t *testing.T) {
|
||||
db := setupTestDB(t)
|
||||
defer db.Close()
|
||||
|
||||
// Insert observers
|
||||
db.conn.Exec(`INSERT INTO observers (id, name) VALUES ('obs-a', 'Observer A')`)
|
||||
db.conn.Exec(`INSERT INTO observers (id, name) VALUES ('obs-b', 'Observer B')`)
|
||||
|
||||
var rowA, rowB int64
|
||||
db.conn.QueryRow(`SELECT rowid FROM observers WHERE id='obs-a'`).Scan(&rowA)
|
||||
db.conn.QueryRow(`SELECT rowid FROM observers WHERE id='obs-b'`).Scan(&rowB)
|
||||
|
||||
// Insert transmission with raw_hex
|
||||
txHex := "deadbeef"
|
||||
db.conn.Exec(`INSERT INTO transmissions (raw_hex, hash, first_seen) VALUES (?, 'hash1', '2026-04-21T10:00:00Z')`, txHex)
|
||||
|
||||
// Insert two observations: A has its own raw_hex, B has NULL (historical)
|
||||
obsAHex := "c0ffee01"
|
||||
db.conn.Exec(`INSERT INTO observations (transmission_id, observer_idx, snr, rssi, path_json, timestamp, raw_hex)
|
||||
VALUES (1, ?, -5.0, -90.0, '[]', 1745236800, ?)`, rowA, obsAHex)
|
||||
db.conn.Exec(`INSERT INTO observations (transmission_id, observer_idx, snr, rssi, path_json, timestamp)
|
||||
VALUES (1, ?, -3.0, -85.0, '["aabb"]', 1745236801)`, rowB)
|
||||
|
||||
store := NewPacketStore(db, nil)
|
||||
if err := store.Load(); err != nil {
|
||||
t.Fatalf("store load: %v", err)
|
||||
}
|
||||
|
||||
tx := store.byHash["hash1"]
|
||||
if tx == nil {
|
||||
t.Fatal("transmission not loaded")
|
||||
}
|
||||
if len(tx.Observations) < 2 {
|
||||
t.Fatalf("expected 2 observations, got %d", len(tx.Observations))
|
||||
}
|
||||
|
||||
// Check enriched observations
|
||||
for _, obs := range tx.Observations {
|
||||
m := store.enrichObs(obs)
|
||||
rh, _ := m["raw_hex"].(string)
|
||||
if obs.RawHex != "" {
|
||||
// Observer A: should get per-observation raw_hex
|
||||
if rh != obsAHex {
|
||||
t.Errorf("obs with own raw_hex: got %q, want %q", rh, obsAHex)
|
||||
}
|
||||
} else {
|
||||
// Observer B: should fall back to transmission raw_hex
|
||||
if rh != txHex {
|
||||
t.Errorf("obs without raw_hex: got %q, want %q (tx fallback)", rh, txHex)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,262 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
_ "modernc.org/sqlite"
|
||||
)
|
||||
|
||||
// createFreshIngestorDB creates a SQLite DB using the ingestor's applySchema logic
|
||||
// (simulated here) with auto_vacuum=INCREMENTAL set before tables.
|
||||
func createFreshDBWithAutoVacuum(t *testing.T, path string) *sql.DB {
|
||||
t.Helper()
|
||||
// auto_vacuum must be set via DSN before journal_mode creates the DB file
|
||||
db, err := sql.Open("sqlite", path+"?_pragma=auto_vacuum(INCREMENTAL)&_pragma=journal_mode(WAL)&_pragma=busy_timeout(5000)")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
db.SetMaxOpenConns(1)
|
||||
|
||||
// Create minimal schema
|
||||
_, err = db.Exec(`
|
||||
CREATE TABLE transmissions (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
raw_hex TEXT NOT NULL,
|
||||
hash TEXT NOT NULL UNIQUE,
|
||||
first_seen TEXT NOT NULL,
|
||||
route_type INTEGER,
|
||||
payload_type INTEGER,
|
||||
payload_version INTEGER,
|
||||
decoded_json TEXT,
|
||||
created_at TEXT DEFAULT (datetime('now')),
|
||||
channel_hash TEXT
|
||||
);
|
||||
CREATE TABLE observations (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
transmission_id INTEGER NOT NULL REFERENCES transmissions(id),
|
||||
observer_idx INTEGER,
|
||||
direction TEXT,
|
||||
snr REAL,
|
||||
rssi REAL,
|
||||
score INTEGER,
|
||||
path_json TEXT,
|
||||
timestamp INTEGER NOT NULL
|
||||
);
|
||||
`)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return db
|
||||
}
|
||||
|
||||
func TestNewDBHasIncrementalAutoVacuum(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
path := filepath.Join(dir, "test.db")
|
||||
|
||||
db := createFreshDBWithAutoVacuum(t, path)
|
||||
defer db.Close()
|
||||
|
||||
var autoVacuum int
|
||||
if err := db.QueryRow("PRAGMA auto_vacuum").Scan(&autoVacuum); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if autoVacuum != 2 {
|
||||
t.Fatalf("expected auto_vacuum=2 (INCREMENTAL), got %d", autoVacuum)
|
||||
}
|
||||
}
|
||||
|
||||
func TestExistingDBHasAutoVacuumNone(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
path := filepath.Join(dir, "test.db")
|
||||
|
||||
// Create DB WITHOUT setting auto_vacuum (simulates old DB)
|
||||
db, err := sql.Open("sqlite", path+"?_pragma=journal_mode(WAL)")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
db.SetMaxOpenConns(1)
|
||||
_, err = db.Exec("CREATE TABLE dummy (id INTEGER PRIMARY KEY)")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var autoVacuum int
|
||||
if err := db.QueryRow("PRAGMA auto_vacuum").Scan(&autoVacuum); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
db.Close()
|
||||
|
||||
if autoVacuum != 0 {
|
||||
t.Fatalf("expected auto_vacuum=0 (NONE) for old DB, got %d", autoVacuum)
|
||||
}
|
||||
}
|
||||
|
||||
func TestVacuumOnStartupMigratesDB(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
path := filepath.Join(dir, "test.db")
|
||||
|
||||
// Create DB without auto_vacuum (old DB)
|
||||
db, err := sql.Open("sqlite", path+"?_pragma=journal_mode(WAL)")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
db.SetMaxOpenConns(1)
|
||||
_, err = db.Exec("CREATE TABLE dummy (id INTEGER PRIMARY KEY)")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var before int
|
||||
db.QueryRow("PRAGMA auto_vacuum").Scan(&before)
|
||||
if before != 0 {
|
||||
t.Fatalf("precondition: expected auto_vacuum=0, got %d", before)
|
||||
}
|
||||
db.Close()
|
||||
|
||||
// Simulate vacuumOnStartup migration using openRW
|
||||
rw, err := openRW(path)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := rw.Exec("PRAGMA auto_vacuum = INCREMENTAL"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := rw.Exec("VACUUM"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
rw.Close()
|
||||
|
||||
// Verify migration
|
||||
db2, err := sql.Open("sqlite", path+"?mode=ro")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer db2.Close()
|
||||
|
||||
var after int
|
||||
if err := db2.QueryRow("PRAGMA auto_vacuum").Scan(&after); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if after != 2 {
|
||||
t.Fatalf("expected auto_vacuum=2 after VACUUM migration, got %d", after)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIncrementalVacuumReducesFreelist(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
path := filepath.Join(dir, "test.db")
|
||||
|
||||
db := createFreshDBWithAutoVacuum(t, path)
|
||||
|
||||
// Insert a bunch of data
|
||||
now := time.Now().UTC().Format(time.RFC3339)
|
||||
for i := 0; i < 500; i++ {
|
||||
_, err := db.Exec(
|
||||
"INSERT INTO transmissions (raw_hex, hash, first_seen) VALUES (?, ?, ?)",
|
||||
strings.Repeat("AA", 200), // ~400 bytes each
|
||||
"hash_"+string(rune('A'+i%26))+string(rune('0'+i/26)),
|
||||
now,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Get file size before delete
|
||||
db.Close()
|
||||
infoBefore, _ := os.Stat(path)
|
||||
sizeBefore := infoBefore.Size()
|
||||
|
||||
// Reopen and delete all
|
||||
db, err := sql.Open("sqlite", path+"?_pragma=journal_mode(WAL)&_pragma=busy_timeout(5000)")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
db.SetMaxOpenConns(1)
|
||||
defer db.Close()
|
||||
|
||||
_, err = db.Exec("DELETE FROM transmissions")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Check freelist before vacuum
|
||||
var freelistBefore int64
|
||||
db.QueryRow("PRAGMA freelist_count").Scan(&freelistBefore)
|
||||
if freelistBefore == 0 {
|
||||
t.Fatal("expected non-zero freelist after DELETE")
|
||||
}
|
||||
|
||||
// Run incremental vacuum
|
||||
_, err = db.Exec("PRAGMA incremental_vacuum(10000)")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Check freelist after vacuum
|
||||
var freelistAfter int64
|
||||
db.QueryRow("PRAGMA freelist_count").Scan(&freelistAfter)
|
||||
if freelistAfter >= freelistBefore {
|
||||
t.Fatalf("expected freelist to shrink: before=%d after=%d", freelistBefore, freelistAfter)
|
||||
}
|
||||
|
||||
// Checkpoint WAL and check file size shrunk
|
||||
db.Exec("PRAGMA wal_checkpoint(TRUNCATE)")
|
||||
db.Close()
|
||||
infoAfter, _ := os.Stat(path)
|
||||
sizeAfter := infoAfter.Size()
|
||||
if sizeAfter >= sizeBefore {
|
||||
t.Logf("warning: file did not shrink (before=%d after=%d) — may depend on page reuse", sizeBefore, sizeAfter)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckAutoVacuumLogs(t *testing.T) {
|
||||
// This test verifies checkAutoVacuum doesn't panic on various configs
|
||||
dir := t.TempDir()
|
||||
path := filepath.Join(dir, "test.db")
|
||||
|
||||
// Create a fresh DB with auto_vacuum=INCREMENTAL
|
||||
dbConn := createFreshDBWithAutoVacuum(t, path)
|
||||
db := &DB{conn: dbConn, path: path}
|
||||
cfg := &Config{}
|
||||
|
||||
// Should not panic
|
||||
checkAutoVacuum(db, cfg, path)
|
||||
dbConn.Close()
|
||||
|
||||
// Create a DB without auto_vacuum
|
||||
path2 := filepath.Join(dir, "test2.db")
|
||||
dbConn2, _ := sql.Open("sqlite", path2+"?_pragma=journal_mode(WAL)")
|
||||
dbConn2.SetMaxOpenConns(1)
|
||||
dbConn2.Exec("CREATE TABLE dummy (id INTEGER PRIMARY KEY)")
|
||||
db2 := &DB{conn: dbConn2, path: path2}
|
||||
|
||||
// Should log warning but not panic
|
||||
checkAutoVacuum(db2, cfg, path2)
|
||||
dbConn2.Close()
|
||||
}
|
||||
|
||||
func TestConfigIncrementalVacuumPages(t *testing.T) {
|
||||
// Default
|
||||
cfg := &Config{}
|
||||
if cfg.IncrementalVacuumPages() != 1024 {
|
||||
t.Fatalf("expected default 1024, got %d", cfg.IncrementalVacuumPages())
|
||||
}
|
||||
|
||||
// Custom
|
||||
cfg.DB = &DBConfig{IncrementalVacuumPages: 512}
|
||||
if cfg.IncrementalVacuumPages() != 512 {
|
||||
t.Fatalf("expected 512, got %d", cfg.IncrementalVacuumPages())
|
||||
}
|
||||
|
||||
// Zero should return default
|
||||
cfg.DB.IncrementalVacuumPages = 0
|
||||
if cfg.IncrementalVacuumPages() != 1024 {
|
||||
t.Fatalf("expected default 1024 for zero, got %d", cfg.IncrementalVacuumPages())
|
||||
}
|
||||
}
|
||||
+116
-76
@@ -9,9 +9,6 @@ import (
|
||||
"math"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/meshcore-analyzer/packetpath"
|
||||
"github.com/meshcore-analyzer/sigvalidate"
|
||||
)
|
||||
|
||||
// Route type constants (header bits 1-0)
|
||||
@@ -63,10 +60,9 @@ type TransportCodes struct {
|
||||
|
||||
// Path holds decoded path/hop information.
|
||||
type Path struct {
|
||||
HashSize int `json:"hashSize"`
|
||||
HashCount int `json:"hashCount"`
|
||||
Hops []string `json:"hops"`
|
||||
HopsCompleted *int `json:"hopsCompleted,omitempty"`
|
||||
HashSize int `json:"hashSize"`
|
||||
HashCount int `json:"hashCount"`
|
||||
Hops []string `json:"hops"`
|
||||
}
|
||||
|
||||
// AdvertFlags holds decoded advert flag bits.
|
||||
@@ -95,7 +91,6 @@ type Payload struct {
|
||||
Timestamp uint32 `json:"timestamp,omitempty"`
|
||||
TimestampISO string `json:"timestampISO,omitempty"`
|
||||
Signature string `json:"signature,omitempty"`
|
||||
SignatureValid *bool `json:"signatureValid,omitempty"`
|
||||
Flags *AdvertFlags `json:"flags,omitempty"`
|
||||
Lat *float64 `json:"lat,omitempty"`
|
||||
Lon *float64 `json:"lon,omitempty"`
|
||||
@@ -117,7 +112,6 @@ type DecodedPacket struct {
|
||||
Path Path `json:"path"`
|
||||
Payload Payload `json:"payload"`
|
||||
Raw string `json:"raw"`
|
||||
Anomaly string `json:"anomaly,omitempty"`
|
||||
}
|
||||
|
||||
func decodeHeader(b byte) Header {
|
||||
@@ -165,9 +159,8 @@ func decodePath(pathByte byte, buf []byte, offset int) (Path, int) {
|
||||
}, totalBytes
|
||||
}
|
||||
|
||||
// isTransportRoute delegates to packetpath.IsTransportRoute.
|
||||
func isTransportRoute(routeType int) bool {
|
||||
return packetpath.IsTransportRoute(routeType)
|
||||
return routeType == RouteTransportFlood || routeType == RouteTransportDirect
|
||||
}
|
||||
|
||||
func decodeEncryptedPayload(typeName string, buf []byte) Payload {
|
||||
@@ -194,7 +187,7 @@ func decodeAck(buf []byte) Payload {
|
||||
}
|
||||
}
|
||||
|
||||
func decodeAdvert(buf []byte, validateSignatures bool) Payload {
|
||||
func decodeAdvert(buf []byte) Payload {
|
||||
if len(buf) < 100 {
|
||||
return Payload{Type: "ADVERT", Error: "too short for advert", RawHex: hex.EncodeToString(buf)}
|
||||
}
|
||||
@@ -212,16 +205,6 @@ func decodeAdvert(buf []byte, validateSignatures bool) Payload {
|
||||
Signature: signature,
|
||||
}
|
||||
|
||||
if validateSignatures {
|
||||
valid, err := sigvalidate.ValidateAdvert(buf[0:32], buf[36:100], timestamp, appdata)
|
||||
if err != nil {
|
||||
f := false
|
||||
p.SignatureValid = &f
|
||||
} else {
|
||||
p.SignatureValid = &valid
|
||||
}
|
||||
}
|
||||
|
||||
if len(appdata) > 0 {
|
||||
flags := appdata[0]
|
||||
advType := int(flags & 0x0F)
|
||||
@@ -324,7 +307,7 @@ func decodeTrace(buf []byte) Payload {
|
||||
return p
|
||||
}
|
||||
|
||||
func decodePayload(payloadType int, buf []byte, validateSignatures bool) Payload {
|
||||
func decodePayload(payloadType int, buf []byte) Payload {
|
||||
switch payloadType {
|
||||
case PayloadREQ:
|
||||
return decodeEncryptedPayload("REQ", buf)
|
||||
@@ -335,7 +318,7 @@ func decodePayload(payloadType int, buf []byte, validateSignatures bool) Payload
|
||||
case PayloadACK:
|
||||
return decodeAck(buf)
|
||||
case PayloadADVERT:
|
||||
return decodeAdvert(buf, validateSignatures)
|
||||
return decodeAdvert(buf)
|
||||
case PayloadGRP_TXT:
|
||||
return decodeGrpTxt(buf)
|
||||
case PayloadANON_REQ:
|
||||
@@ -350,7 +333,7 @@ func decodePayload(payloadType int, buf []byte, validateSignatures bool) Payload
|
||||
}
|
||||
|
||||
// DecodePacket decodes a hex-encoded MeshCore packet.
|
||||
func DecodePacket(hexString string, validateSignatures bool) (*DecodedPacket, error) {
|
||||
func DecodePacket(hexString string) (*DecodedPacket, error) {
|
||||
hexString = strings.ReplaceAll(hexString, " ", "")
|
||||
hexString = strings.ReplaceAll(hexString, "\n", "")
|
||||
hexString = strings.ReplaceAll(hexString, "\r", "")
|
||||
@@ -388,65 +371,133 @@ func DecodePacket(hexString string, validateSignatures bool) (*DecodedPacket, er
|
||||
offset += bytesConsumed
|
||||
|
||||
payloadBuf := buf[offset:]
|
||||
payload := decodePayload(header.PayloadType, payloadBuf, validateSignatures)
|
||||
payload := decodePayload(header.PayloadType, payloadBuf)
|
||||
|
||||
// TRACE packets store hop IDs in the payload (buf[9:]) rather than the header
|
||||
// path field. Firmware always sends TRACE as DIRECT (route_type 2 or 3);
|
||||
// FLOOD-routed TRACEs are anomalous but handled gracefully (parsed, but
|
||||
// flagged). The TRACE flags byte (payload offset 8) encodes path_sz in
|
||||
// bits 0-1 as a power-of-two exponent: hash_bytes = 1 << path_sz.
|
||||
// NOT the header path byte's hash_size bits. The header path contains SNR
|
||||
// bytes — one per hop that actually forwarded.
|
||||
// We expose hopsCompleted (count of SNR bytes) so consumers can distinguish
|
||||
// how far the trace got vs the full intended route.
|
||||
var anomaly string
|
||||
// path field. The header path byte still encodes hashSize in bits 6-7, which
|
||||
// we use to split the payload path data into individual hop prefixes.
|
||||
if header.PayloadType == PayloadTRACE && payload.PathData != "" {
|
||||
// Flag anomalous routing — firmware only sends TRACE as DIRECT
|
||||
if header.RouteType != RouteDirect && header.RouteType != RouteTransportDirect {
|
||||
anomaly = "TRACE packet with non-DIRECT routing (expected DIRECT or TRANSPORT_DIRECT)"
|
||||
}
|
||||
// The header path hops count represents SNR entries = completed hops
|
||||
hopsCompleted := path.HashCount
|
||||
pathBytes, err := hex.DecodeString(payload.PathData)
|
||||
if err == nil && payload.TraceFlags != nil {
|
||||
// path_sz from flags byte is a power-of-two exponent per firmware:
|
||||
// hash_bytes = 1 << (flags & 0x03)
|
||||
pathSz := 1 << (*payload.TraceFlags & 0x03)
|
||||
hops := make([]string, 0, len(pathBytes)/pathSz)
|
||||
for i := 0; i+pathSz <= len(pathBytes); i += pathSz {
|
||||
hops = append(hops, strings.ToUpper(hex.EncodeToString(pathBytes[i:i+pathSz])))
|
||||
if err == nil && path.HashSize > 0 {
|
||||
hops := make([]string, 0, len(pathBytes)/path.HashSize)
|
||||
for i := 0; i+path.HashSize <= len(pathBytes); i += path.HashSize {
|
||||
hops = append(hops, strings.ToUpper(hex.EncodeToString(pathBytes[i:i+path.HashSize])))
|
||||
}
|
||||
path.Hops = hops
|
||||
path.HashCount = len(hops)
|
||||
path.HashSize = pathSz
|
||||
path.HopsCompleted = &hopsCompleted
|
||||
}
|
||||
}
|
||||
|
||||
// Zero-hop direct packets have hash_count=0 (lower 6 bits of pathByte),
|
||||
// which makes the generic formula yield a bogus hashSize. Reset to 0
|
||||
// (unknown) so API consumers get correct data. We mask with 0x3F to check
|
||||
// only hash_count, matching the JS frontend approach — the upper hash_size
|
||||
// bits are meaningless when there are no hops. Skip TRACE packets — they
|
||||
// use hashSize to parse hops from the payload above.
|
||||
if (header.RouteType == RouteDirect || header.RouteType == RouteTransportDirect) && pathByte&0x3F == 0 && header.PayloadType != PayloadTRACE {
|
||||
path.HashSize = 0
|
||||
}
|
||||
|
||||
return &DecodedPacket{
|
||||
Header: header,
|
||||
TransportCodes: tc,
|
||||
Path: path,
|
||||
Payload: payload,
|
||||
Raw: strings.ToUpper(hexString),
|
||||
Anomaly: anomaly,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// HexRange represents a labeled byte range for the hex breakdown visualization.
|
||||
type HexRange struct {
|
||||
Start int `json:"start"`
|
||||
End int `json:"end"`
|
||||
Label string `json:"label"`
|
||||
}
|
||||
|
||||
// Breakdown holds colored byte ranges returned by the packet detail endpoint.
|
||||
type Breakdown struct {
|
||||
Ranges []HexRange `json:"ranges"`
|
||||
}
|
||||
|
||||
// BuildBreakdown computes labeled byte ranges for each section of a MeshCore packet.
|
||||
// The returned ranges are consumed by createColoredHexDump() and buildHexLegend()
|
||||
// in the frontend (public/app.js).
|
||||
func BuildBreakdown(hexString string) *Breakdown {
|
||||
hexString = strings.ReplaceAll(hexString, " ", "")
|
||||
hexString = strings.ReplaceAll(hexString, "\n", "")
|
||||
hexString = strings.ReplaceAll(hexString, "\r", "")
|
||||
buf, err := hex.DecodeString(hexString)
|
||||
if err != nil || len(buf) < 2 {
|
||||
return &Breakdown{Ranges: []HexRange{}}
|
||||
}
|
||||
|
||||
var ranges []HexRange
|
||||
offset := 0
|
||||
|
||||
// Byte 0: Header
|
||||
ranges = append(ranges, HexRange{Start: 0, End: 0, Label: "Header"})
|
||||
offset = 1
|
||||
|
||||
header := decodeHeader(buf[0])
|
||||
|
||||
// Bytes 1-4: Transport Codes (TRANSPORT_FLOOD / TRANSPORT_DIRECT only)
|
||||
if isTransportRoute(header.RouteType) {
|
||||
if len(buf) < offset+4 {
|
||||
return &Breakdown{Ranges: ranges}
|
||||
}
|
||||
ranges = append(ranges, HexRange{Start: offset, End: offset + 3, Label: "Transport Codes"})
|
||||
offset += 4
|
||||
}
|
||||
|
||||
if offset >= len(buf) {
|
||||
return &Breakdown{Ranges: ranges}
|
||||
}
|
||||
|
||||
// Next byte: Path Length (bits 7-6 = hashSize-1, bits 5-0 = hashCount)
|
||||
ranges = append(ranges, HexRange{Start: offset, End: offset, Label: "Path Length"})
|
||||
pathByte := buf[offset]
|
||||
offset++
|
||||
|
||||
hashSize := int(pathByte>>6) + 1
|
||||
hashCount := int(pathByte & 0x3F)
|
||||
pathBytes := hashSize * hashCount
|
||||
|
||||
// Path hops
|
||||
if hashCount > 0 && offset+pathBytes <= len(buf) {
|
||||
ranges = append(ranges, HexRange{Start: offset, End: offset + pathBytes - 1, Label: "Path"})
|
||||
}
|
||||
offset += pathBytes
|
||||
|
||||
if offset >= len(buf) {
|
||||
return &Breakdown{Ranges: ranges}
|
||||
}
|
||||
|
||||
payloadStart := offset
|
||||
|
||||
// Payload — break ADVERT into named sub-fields; everything else is one Payload range
|
||||
if header.PayloadType == PayloadADVERT && len(buf)-payloadStart >= 100 {
|
||||
ranges = append(ranges, HexRange{Start: payloadStart, End: payloadStart + 31, Label: "PubKey"})
|
||||
ranges = append(ranges, HexRange{Start: payloadStart + 32, End: payloadStart + 35, Label: "Timestamp"})
|
||||
ranges = append(ranges, HexRange{Start: payloadStart + 36, End: payloadStart + 99, Label: "Signature"})
|
||||
|
||||
appStart := payloadStart + 100
|
||||
if appStart < len(buf) {
|
||||
ranges = append(ranges, HexRange{Start: appStart, End: appStart, Label: "Flags"})
|
||||
appFlags := buf[appStart]
|
||||
fOff := appStart + 1
|
||||
if appFlags&0x10 != 0 && fOff+8 <= len(buf) {
|
||||
ranges = append(ranges, HexRange{Start: fOff, End: fOff + 3, Label: "Latitude"})
|
||||
ranges = append(ranges, HexRange{Start: fOff + 4, End: fOff + 7, Label: "Longitude"})
|
||||
fOff += 8
|
||||
}
|
||||
if appFlags&0x20 != 0 && fOff+2 <= len(buf) {
|
||||
fOff += 2
|
||||
}
|
||||
if appFlags&0x40 != 0 && fOff+2 <= len(buf) {
|
||||
fOff += 2
|
||||
}
|
||||
if appFlags&0x80 != 0 && fOff < len(buf) {
|
||||
ranges = append(ranges, HexRange{Start: fOff, End: len(buf) - 1, Label: "Name"})
|
||||
}
|
||||
}
|
||||
} else {
|
||||
ranges = append(ranges, HexRange{Start: payloadStart, End: len(buf) - 1, Label: "Payload"})
|
||||
}
|
||||
|
||||
return &Breakdown{Ranges: ranges}
|
||||
}
|
||||
|
||||
// ComputeContentHash computes the SHA-256-based content hash (first 16 hex chars).
|
||||
// It hashes the payload-type nibble + payload (skipping path bytes) to produce a
|
||||
// route-independent identifier for the same logical packet. For TRACE packets,
|
||||
// path_len is included in the hash to match firmware behavior.
|
||||
func ComputeContentHash(rawHex string) string {
|
||||
buf, err := hex.DecodeString(rawHex)
|
||||
if err != nil || len(buf) < 2 {
|
||||
@@ -482,18 +533,7 @@ func ComputeContentHash(rawHex string) string {
|
||||
}
|
||||
|
||||
payload := buf[payloadStart:]
|
||||
|
||||
// Hash payload-type byte only (bits 2-5 of header), not the full header.
|
||||
// Firmware: SHA256(payload_type + [path_len for TRACE] + payload)
|
||||
// Using the full header caused different hashes for the same logical packet
|
||||
// when route type or version bits differed. See issue #786.
|
||||
payloadType := (headerByte >> 2) & 0x0F
|
||||
toHash := []byte{payloadType}
|
||||
if int(payloadType) == PayloadTRACE {
|
||||
// Firmware uses uint16_t path_len (2 bytes, little-endian)
|
||||
toHash = append(toHash, pathByte, 0x00)
|
||||
}
|
||||
toHash = append(toHash, payload...)
|
||||
toHash := append([]byte{headerByte}, payload...)
|
||||
|
||||
h := sha256.Sum256(toHash)
|
||||
return hex.EncodeToString(h[:])[:16]
|
||||
|
||||
+125
-323
@@ -1,9 +1,6 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/ed25519"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"testing"
|
||||
)
|
||||
|
||||
@@ -68,7 +65,7 @@ func TestDecodePacket_TransportFloodHasCodes(t *testing.T) {
|
||||
// Path byte: 0x00 (hashSize=1, hashCount=0)
|
||||
// Payload: at least some bytes for GRP_TXT
|
||||
hex := "14AABBCCDD00112233445566778899"
|
||||
pkt, err := DecodePacket(hex, false)
|
||||
pkt, err := DecodePacket(hex)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
@@ -88,7 +85,7 @@ func TestDecodePacket_FloodHasNoCodes(t *testing.T) {
|
||||
// Path byte: 0x00 (no hops)
|
||||
// Some payload bytes
|
||||
hex := "110011223344556677889900AABBCCDD"
|
||||
pkt, err := DecodePacket(hex, false)
|
||||
pkt, err := DecodePacket(hex)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
@@ -97,86 +94,145 @@ func TestDecodePacket_FloodHasNoCodes(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
func TestZeroHopDirectHashSize(t *testing.T) {
|
||||
// DIRECT (RouteType=2) + REQ (PayloadType=0) → header byte = 0x02
|
||||
// pathByte=0x00 → hash_count=0, hash_size bits=0 → should get HashSize=0
|
||||
// Need at least a few payload bytes after pathByte.
|
||||
hex := "02" + "00" + repeatHex("AA", 20)
|
||||
pkt, err := DecodePacket(hex, false)
|
||||
if err != nil {
|
||||
t.Fatalf("DecodePacket failed: %v", err)
|
||||
}
|
||||
if pkt.Path.HashSize != 0 {
|
||||
t.Errorf("DIRECT zero-hop: want HashSize=0, got %d", pkt.Path.HashSize)
|
||||
func TestBuildBreakdown_InvalidHex(t *testing.T) {
|
||||
b := BuildBreakdown("not-hex!")
|
||||
if len(b.Ranges) != 0 {
|
||||
t.Errorf("expected empty ranges for invalid hex, got %d", len(b.Ranges))
|
||||
}
|
||||
}
|
||||
|
||||
func TestZeroHopDirectHashSizeWithNonZeroUpperBits(t *testing.T) {
|
||||
// DIRECT (RouteType=2) + REQ (PayloadType=0) → header byte = 0x02
|
||||
// pathByte=0x40 → hash_count=0, hash_size bits=01 → should still get HashSize=0
|
||||
// because hash_count is zero (lower 6 bits are 0).
|
||||
hex := "02" + "40" + repeatHex("AA", 20)
|
||||
pkt, err := DecodePacket(hex, false)
|
||||
if err != nil {
|
||||
t.Fatalf("DecodePacket failed: %v", err)
|
||||
}
|
||||
if pkt.Path.HashSize != 0 {
|
||||
t.Errorf("DIRECT zero-hop with hash_size bits set: want HashSize=0, got %d", pkt.Path.HashSize)
|
||||
func TestBuildBreakdown_TooShort(t *testing.T) {
|
||||
b := BuildBreakdown("11") // 1 byte — no path byte
|
||||
if len(b.Ranges) != 0 {
|
||||
t.Errorf("expected empty ranges for too-short packet, got %d", len(b.Ranges))
|
||||
}
|
||||
}
|
||||
|
||||
func TestZeroHopTransportDirectHashSize(t *testing.T) {
|
||||
// TRANSPORT_DIRECT (RouteType=3) + REQ (PayloadType=0) → header byte = 0x03
|
||||
// 4 bytes transport codes + pathByte=0x00 → hash_count=0 → should get HashSize=0
|
||||
hex := "03" + "11223344" + "00" + repeatHex("AA", 20)
|
||||
pkt, err := DecodePacket(hex, false)
|
||||
if err != nil {
|
||||
t.Fatalf("DecodePacket failed: %v", err)
|
||||
}
|
||||
if pkt.Path.HashSize != 0 {
|
||||
t.Errorf("TRANSPORT_DIRECT zero-hop: want HashSize=0, got %d", pkt.Path.HashSize)
|
||||
func TestBuildBreakdown_FloodNonAdvert(t *testing.T) {
|
||||
// Header 0x15: route=1/FLOOD, payload=5/GRP_TXT
|
||||
// PathByte 0x01: 1 hop, 1-byte hash
|
||||
// PathHop: AA
|
||||
// Payload: FF0011
|
||||
b := BuildBreakdown("1501AAFFFF00")
|
||||
labels := rangeLabels(b.Ranges)
|
||||
expect := []string{"Header", "Path Length", "Path", "Payload"}
|
||||
if !equalLabels(labels, expect) {
|
||||
t.Errorf("expected labels %v, got %v", expect, labels)
|
||||
}
|
||||
// Verify byte positions
|
||||
assertRange(t, b.Ranges, "Header", 0, 0)
|
||||
assertRange(t, b.Ranges, "Path Length", 1, 1)
|
||||
assertRange(t, b.Ranges, "Path", 2, 2)
|
||||
assertRange(t, b.Ranges, "Payload", 3, 5)
|
||||
}
|
||||
|
||||
func TestZeroHopTransportDirectHashSizeWithNonZeroUpperBits(t *testing.T) {
|
||||
// TRANSPORT_DIRECT (RouteType=3) + REQ (PayloadType=0) → header byte = 0x03
|
||||
// 4 bytes transport codes + pathByte=0xC0 → hash_count=0, hash_size bits=11 → should still get HashSize=0
|
||||
hex := "03" + "11223344" + "C0" + repeatHex("AA", 20)
|
||||
pkt, err := DecodePacket(hex, false)
|
||||
if err != nil {
|
||||
t.Fatalf("DecodePacket failed: %v", err)
|
||||
}
|
||||
if pkt.Path.HashSize != 0 {
|
||||
t.Errorf("TRANSPORT_DIRECT zero-hop with hash_size bits set: want HashSize=0, got %d", pkt.Path.HashSize)
|
||||
}
|
||||
func TestBuildBreakdown_TransportFlood(t *testing.T) {
|
||||
// Header 0x14: route=0/TRANSPORT_FLOOD, payload=5/GRP_TXT
|
||||
// TransportCodes: AABBCCDD (4 bytes)
|
||||
// PathByte 0x01: 1 hop, 1-byte hash
|
||||
// PathHop: EE
|
||||
// Payload: FF00
|
||||
b := BuildBreakdown("14AABBCCDD01EEFF00")
|
||||
assertRange(t, b.Ranges, "Header", 0, 0)
|
||||
assertRange(t, b.Ranges, "Transport Codes", 1, 4)
|
||||
assertRange(t, b.Ranges, "Path Length", 5, 5)
|
||||
assertRange(t, b.Ranges, "Path", 6, 6)
|
||||
assertRange(t, b.Ranges, "Payload", 7, 8)
|
||||
}
|
||||
|
||||
func TestNonDirectZeroPathByteKeepsHashSize(t *testing.T) {
|
||||
// FLOOD (RouteType=1) + REQ (PayloadType=0) → header byte = 0x01
|
||||
// pathByte=0x00 → even though hash_count=0, non-DIRECT should keep HashSize=1
|
||||
hex := "01" + "00" + repeatHex("AA", 20)
|
||||
pkt, err := DecodePacket(hex, false)
|
||||
if err != nil {
|
||||
t.Fatalf("DecodePacket failed: %v", err)
|
||||
}
|
||||
if pkt.Path.HashSize != 1 {
|
||||
t.Errorf("FLOOD zero pathByte: want HashSize=1 (unchanged), got %d", pkt.Path.HashSize)
|
||||
func TestBuildBreakdown_FloodNoHops(t *testing.T) {
|
||||
// Header 0x15: FLOOD/GRP_TXT; PathByte 0x00: 0 hops; Payload: AABB
|
||||
b := BuildBreakdown("150000AABB")
|
||||
assertRange(t, b.Ranges, "Header", 0, 0)
|
||||
assertRange(t, b.Ranges, "Path Length", 1, 1)
|
||||
// No Path range since hashCount=0
|
||||
for _, r := range b.Ranges {
|
||||
if r.Label == "Path" {
|
||||
t.Error("expected no Path range for zero-hop packet")
|
||||
}
|
||||
}
|
||||
assertRange(t, b.Ranges, "Payload", 2, 4)
|
||||
}
|
||||
|
||||
func TestDirectNonZeroHopKeepsHashSize(t *testing.T) {
|
||||
// DIRECT (RouteType=2) + REQ (PayloadType=0) → header byte = 0x02
|
||||
// pathByte=0x01 → hash_count=1, hash_size=1 → should keep HashSize=1
|
||||
// Need 1 hop hash byte after pathByte.
|
||||
hex := "02" + "01" + repeatHex("BB", 21)
|
||||
pkt, err := DecodePacket(hex, false)
|
||||
if err != nil {
|
||||
t.Fatalf("DecodePacket failed: %v", err)
|
||||
func TestBuildBreakdown_AdvertBasic(t *testing.T) {
|
||||
// Header 0x11: FLOOD/ADVERT
|
||||
// PathByte 0x01: 1 hop, 1-byte hash
|
||||
// PathHop: AA
|
||||
// Payload: 100 bytes (PubKey32 + Timestamp4 + Signature64) + Flags=0x02 (repeater, no extras)
|
||||
pubkey := repeatHex("AB", 32)
|
||||
ts := "00000000" // 4 bytes
|
||||
sig := repeatHex("CD", 64)
|
||||
flags := "02"
|
||||
hex := "1101AA" + pubkey + ts + sig + flags
|
||||
b := BuildBreakdown(hex)
|
||||
assertRange(t, b.Ranges, "Header", 0, 0)
|
||||
assertRange(t, b.Ranges, "Path Length", 1, 1)
|
||||
assertRange(t, b.Ranges, "Path", 2, 2)
|
||||
assertRange(t, b.Ranges, "PubKey", 3, 34)
|
||||
assertRange(t, b.Ranges, "Timestamp", 35, 38)
|
||||
assertRange(t, b.Ranges, "Signature", 39, 102)
|
||||
assertRange(t, b.Ranges, "Flags", 103, 103)
|
||||
}
|
||||
|
||||
func TestBuildBreakdown_AdvertWithLocation(t *testing.T) {
|
||||
// flags=0x12: hasLocation bit set
|
||||
pubkey := repeatHex("00", 32)
|
||||
ts := "00000000"
|
||||
sig := repeatHex("00", 64)
|
||||
flags := "12" // 0x10 = hasLocation
|
||||
latBytes := "00000000"
|
||||
lonBytes := "00000000"
|
||||
hex := "1101AA" + pubkey + ts + sig + flags + latBytes + lonBytes
|
||||
b := BuildBreakdown(hex)
|
||||
assertRange(t, b.Ranges, "Latitude", 104, 107)
|
||||
assertRange(t, b.Ranges, "Longitude", 108, 111)
|
||||
}
|
||||
|
||||
func TestBuildBreakdown_AdvertWithName(t *testing.T) {
|
||||
// flags=0x82: hasName bit set
|
||||
pubkey := repeatHex("00", 32)
|
||||
ts := "00000000"
|
||||
sig := repeatHex("00", 64)
|
||||
flags := "82" // 0x80 = hasName
|
||||
name := "4E6F6465" // "Node" in hex
|
||||
hex := "1101AA" + pubkey + ts + sig + flags + name
|
||||
b := BuildBreakdown(hex)
|
||||
assertRange(t, b.Ranges, "Name", 104, 107)
|
||||
}
|
||||
|
||||
// helpers
|
||||
|
||||
func rangeLabels(ranges []HexRange) []string {
|
||||
out := make([]string, len(ranges))
|
||||
for i, r := range ranges {
|
||||
out[i] = r.Label
|
||||
}
|
||||
if pkt.Path.HashSize != 1 {
|
||||
t.Errorf("DIRECT with 1 hop: want HashSize=1, got %d", pkt.Path.HashSize)
|
||||
return out
|
||||
}
|
||||
|
||||
func equalLabels(a, b []string) bool {
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
for i := range a {
|
||||
if a[i] != b[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func assertRange(t *testing.T, ranges []HexRange, label string, wantStart, wantEnd int) {
|
||||
t.Helper()
|
||||
for _, r := range ranges {
|
||||
if r.Label == label {
|
||||
if r.Start != wantStart || r.End != wantEnd {
|
||||
t.Errorf("range %q: want [%d,%d], got [%d,%d]", label, wantStart, wantEnd, r.Start, r.End)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
t.Errorf("range %q not found in %v", label, rangeLabels(ranges))
|
||||
}
|
||||
|
||||
func repeatHex(byteHex string, n int) string {
|
||||
@@ -186,257 +242,3 @@ func repeatHex(byteHex string, n int) string {
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func TestDecodePacket_TraceHopsCompleted(t *testing.T) {
|
||||
// Build a TRACE packet:
|
||||
// header: route=FLOOD(1), payload=TRACE(9), version=0 → (0<<6)|(9<<2)|1 = 0x25
|
||||
// path_length: hash_size bits=0b00 (1-byte), hash_count=2 (2 SNR bytes) → 0x02
|
||||
// path: 2 SNR bytes: 0xAA, 0xBB
|
||||
// payload: tag(4 LE) + authCode(4 LE) + flags(1) + 4 hop hashes (1 byte each)
|
||||
hex := "2502AABB" + // header + path_length + 2 SNR bytes
|
||||
"01000000" + // tag = 1
|
||||
"02000000" + // authCode = 2
|
||||
"00" + // flags = 0
|
||||
"DEADBEEF" // 4 hops (1-byte hash each)
|
||||
|
||||
pkt, err := DecodePacket(hex, false)
|
||||
if err != nil {
|
||||
t.Fatalf("DecodePacket error: %v", err)
|
||||
}
|
||||
if pkt.Payload.Type != "TRACE" {
|
||||
t.Fatalf("expected TRACE, got %s", pkt.Payload.Type)
|
||||
}
|
||||
// Full intended route = 4 hops from payload
|
||||
if len(pkt.Path.Hops) != 4 {
|
||||
t.Errorf("expected 4 hops, got %d: %v", len(pkt.Path.Hops), pkt.Path.Hops)
|
||||
}
|
||||
// HopsCompleted = 2 (from header path SNR count)
|
||||
if pkt.Path.HopsCompleted == nil {
|
||||
t.Fatal("expected HopsCompleted to be set")
|
||||
}
|
||||
if *pkt.Path.HopsCompleted != 2 {
|
||||
t.Errorf("expected HopsCompleted=2, got %d", *pkt.Path.HopsCompleted)
|
||||
}
|
||||
// FLOOD routing for TRACE is anomalous
|
||||
if pkt.Anomaly == "" {
|
||||
t.Error("expected anomaly flag for FLOOD-routed TRACE")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecodePacket_TraceNoSNR(t *testing.T) {
|
||||
// TRACE with 0 SNR bytes (trace hasn't been forwarded yet)
|
||||
// path_length: hash_size=0b00 (1-byte), hash_count=0 → 0x00
|
||||
hex := "2500" + // header + path_length (0 hops in header)
|
||||
"01000000" + // tag
|
||||
"02000000" + // authCode
|
||||
"00" + // flags
|
||||
"AABBCC" // 3 hops intended
|
||||
|
||||
pkt, err := DecodePacket(hex, false)
|
||||
if err != nil {
|
||||
t.Fatalf("DecodePacket error: %v", err)
|
||||
}
|
||||
if pkt.Path.HopsCompleted == nil {
|
||||
t.Fatal("expected HopsCompleted to be set")
|
||||
}
|
||||
if *pkt.Path.HopsCompleted != 0 {
|
||||
t.Errorf("expected HopsCompleted=0, got %d", *pkt.Path.HopsCompleted)
|
||||
}
|
||||
if len(pkt.Path.Hops) != 3 {
|
||||
t.Errorf("expected 3 hops, got %d", len(pkt.Path.Hops))
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecodePacket_TraceFullyCompleted(t *testing.T) {
|
||||
// TRACE where all hops completed (SNR count = hop count)
|
||||
// path_length: hash_size=0b00 (1-byte), hash_count=3 → 0x03
|
||||
hex := "2503AABBCC" + // header + path_length + 3 SNR bytes
|
||||
"01000000" + // tag
|
||||
"02000000" + // authCode
|
||||
"00" + // flags
|
||||
"DDEEFF" // 3 hops intended
|
||||
|
||||
pkt, err := DecodePacket(hex, false)
|
||||
if err != nil {
|
||||
t.Fatalf("DecodePacket error: %v", err)
|
||||
}
|
||||
if pkt.Path.HopsCompleted == nil {
|
||||
t.Fatal("expected HopsCompleted to be set")
|
||||
}
|
||||
if *pkt.Path.HopsCompleted != 3 {
|
||||
t.Errorf("expected HopsCompleted=3, got %d", *pkt.Path.HopsCompleted)
|
||||
}
|
||||
if len(pkt.Path.Hops) != 3 {
|
||||
t.Errorf("expected 3 hops, got %d", len(pkt.Path.Hops))
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecodePacket_TraceFlags1_TwoBytePathSz(t *testing.T) {
|
||||
// TRACE with flags=1 → path_sz = 1 << (1 & 0x03) = 2-byte hashes
|
||||
// Firmware always sends TRACE as DIRECT (route_type=2), so header byte =
|
||||
// (0<<6)|(9<<2)|2 = 0x26. path_length 0x00 = 0 SNR bytes.
|
||||
hex := "2600" + // header (DIRECT+TRACE) + path_length (0 SNR)
|
||||
"01000000" + // tag
|
||||
"02000000" + // authCode
|
||||
"01" + // flags = 1 → path_sz = 2
|
||||
"AABBCCDD" // 4 bytes = 2 hops of 2-byte each
|
||||
|
||||
pkt, err := DecodePacket(hex, false)
|
||||
if err != nil {
|
||||
t.Fatalf("DecodePacket error: %v", err)
|
||||
}
|
||||
if len(pkt.Path.Hops) != 2 {
|
||||
t.Errorf("expected 2 hops (2-byte path_sz), got %d: %v", len(pkt.Path.Hops), pkt.Path.Hops)
|
||||
}
|
||||
if pkt.Path.HashSize != 2 {
|
||||
t.Errorf("expected HashSize=2, got %d", pkt.Path.HashSize)
|
||||
}
|
||||
if pkt.Anomaly != "" {
|
||||
t.Errorf("expected no anomaly for DIRECT TRACE, got %q", pkt.Anomaly)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecodePacket_TraceFlags2_FourBytePathSz(t *testing.T) {
|
||||
// TRACE with flags=2 → path_sz = 1 << (2 & 0x03) = 4-byte hashes
|
||||
// DIRECT route_type (0x26)
|
||||
hex := "2600" + // header (DIRECT+TRACE) + path_length (0 SNR)
|
||||
"01000000" + // tag
|
||||
"02000000" + // authCode
|
||||
"02" + // flags = 2 → path_sz = 4
|
||||
"AABBCCDD11223344" // 8 bytes = 2 hops of 4-byte each
|
||||
|
||||
pkt, err := DecodePacket(hex, false)
|
||||
if err != nil {
|
||||
t.Fatalf("DecodePacket error: %v", err)
|
||||
}
|
||||
if len(pkt.Path.Hops) != 2 {
|
||||
t.Errorf("expected 2 hops (4-byte path_sz), got %d: %v", len(pkt.Path.Hops), pkt.Path.Hops)
|
||||
}
|
||||
if pkt.Path.HashSize != 4 {
|
||||
t.Errorf("expected HashSize=4, got %d", pkt.Path.HashSize)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecodePacket_TracePathSzUnevenPayload(t *testing.T) {
|
||||
// TRACE with flags=1 → path_sz=2, but 5 bytes of path data (not evenly divisible)
|
||||
// Should produce 2 hops (4 bytes) and ignore the trailing byte
|
||||
hex := "2600" + // header (DIRECT+TRACE) + path_length (0 SNR)
|
||||
"01000000" + // tag
|
||||
"02000000" + // authCode
|
||||
"01" + // flags = 1 → path_sz = 2
|
||||
"AABBCCDDEE" // 5 bytes → 2 hops, 1 byte remainder ignored
|
||||
|
||||
pkt, err := DecodePacket(hex, false)
|
||||
if err != nil {
|
||||
t.Fatalf("DecodePacket error: %v", err)
|
||||
}
|
||||
if len(pkt.Path.Hops) != 2 {
|
||||
t.Errorf("expected 2 hops (trailing byte ignored), got %d: %v", len(pkt.Path.Hops), pkt.Path.Hops)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecodePacket_TraceTransportDirect(t *testing.T) {
|
||||
// TRACE via TRANSPORT_DIRECT (route_type=3) — includes 4 transport code bytes
|
||||
// header: (0<<6)|(9<<2)|3 = 0x27
|
||||
hex := "27" + // header (TRANSPORT_DIRECT+TRACE)
|
||||
"AABB" + "CCDD" + // transport codes (2+2 bytes)
|
||||
"02" + // path_length: hash_count=2 SNR bytes
|
||||
"EEFF" + // 2 SNR bytes
|
||||
"01000000" + // tag
|
||||
"02000000" + // authCode
|
||||
"00" + // flags = 0 → path_sz = 1
|
||||
"112233" // 3 hops (1-byte each)
|
||||
|
||||
pkt, err := DecodePacket(hex, false)
|
||||
if err != nil {
|
||||
t.Fatalf("DecodePacket error: %v", err)
|
||||
}
|
||||
if pkt.TransportCodes == nil {
|
||||
t.Fatal("expected transport codes for TRANSPORT_DIRECT")
|
||||
}
|
||||
if pkt.TransportCodes.Code1 != "AABB" {
|
||||
t.Errorf("expected Code1=AABB, got %s", pkt.TransportCodes.Code1)
|
||||
}
|
||||
if len(pkt.Path.Hops) != 3 {
|
||||
t.Errorf("expected 3 hops, got %d: %v", len(pkt.Path.Hops), pkt.Path.Hops)
|
||||
}
|
||||
if pkt.Path.HopsCompleted == nil || *pkt.Path.HopsCompleted != 2 {
|
||||
t.Errorf("expected HopsCompleted=2, got %v", pkt.Path.HopsCompleted)
|
||||
}
|
||||
if pkt.Anomaly != "" {
|
||||
t.Errorf("expected no anomaly for TRANSPORT_DIRECT TRACE, got %q", pkt.Anomaly)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecodePacket_TraceFloodRouteAnomaly(t *testing.T) {
|
||||
// TRACE via FLOOD (route_type=1) — anomalous per firmware (firmware only
|
||||
// sends TRACE as DIRECT). Should still parse but flag the anomaly.
|
||||
hex := "2500" + // header (FLOOD+TRACE) + path_length (0 SNR)
|
||||
"01000000" + // tag
|
||||
"02000000" + // authCode
|
||||
"01" + // flags = 1 → path_sz = 2
|
||||
"AABBCCDD" // 4 bytes = 2 hops of 2-byte each
|
||||
|
||||
pkt, err := DecodePacket(hex, false)
|
||||
if err != nil {
|
||||
t.Fatalf("should not crash on anomalous FLOOD+TRACE: %v", err)
|
||||
}
|
||||
if len(pkt.Path.Hops) != 2 {
|
||||
t.Errorf("expected 2 hops even for anomalous FLOOD route, got %d", len(pkt.Path.Hops))
|
||||
}
|
||||
if pkt.Anomaly == "" {
|
||||
t.Error("expected anomaly flag for FLOOD-routed TRACE, got empty string")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecodeAdvertSignatureValidation(t *testing.T) {
|
||||
pub, priv, err := ed25519.GenerateKey(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var timestamp uint32 = 1234567890
|
||||
appdata := []byte{0x02} // flags: repeater, no extras
|
||||
|
||||
// Build signed message: pubKey(32) + timestamp(4 LE) + appdata
|
||||
msg := make([]byte, 32+4+len(appdata))
|
||||
copy(msg[0:32], pub)
|
||||
binary.LittleEndian.PutUint32(msg[32:36], timestamp)
|
||||
copy(msg[36:], appdata)
|
||||
sig := ed25519.Sign(priv, msg)
|
||||
|
||||
// Build a raw advert buffer: pubKey(32) + timestamp(4) + signature(64) + appdata
|
||||
buf := make([]byte, 100+len(appdata))
|
||||
copy(buf[0:32], pub)
|
||||
binary.LittleEndian.PutUint32(buf[32:36], timestamp)
|
||||
copy(buf[36:100], sig)
|
||||
copy(buf[100:], appdata)
|
||||
|
||||
// With validation enabled
|
||||
p := decodeAdvert(buf, true)
|
||||
if p.SignatureValid == nil {
|
||||
t.Fatal("expected SignatureValid to be set")
|
||||
}
|
||||
if !*p.SignatureValid {
|
||||
t.Error("expected valid signature")
|
||||
}
|
||||
if p.PubKey != hex.EncodeToString(pub) {
|
||||
t.Errorf("pubkey mismatch: got %s", p.PubKey)
|
||||
}
|
||||
|
||||
// Tamper with signature → invalid
|
||||
buf[40] ^= 0xFF
|
||||
p = decodeAdvert(buf, true)
|
||||
if p.SignatureValid == nil {
|
||||
t.Fatal("expected SignatureValid to be set")
|
||||
}
|
||||
if *p.SignatureValid {
|
||||
t.Error("expected invalid signature after tampering")
|
||||
}
|
||||
|
||||
// Without validation → SignatureValid should be nil
|
||||
p = decodeAdvert(buf, false)
|
||||
if p.SignatureValid != nil {
|
||||
t.Error("expected SignatureValid to be nil when validation disabled")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,145 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// seedEncryptedChannelData adds undecryptable GRP_TXT packets to the test DB.
|
||||
func seedEncryptedChannelData(t *testing.T, db *DB) {
|
||||
t.Helper()
|
||||
now := time.Now().UTC()
|
||||
recent := now.Add(-1 * time.Hour).Format(time.RFC3339)
|
||||
recentEpoch := now.Add(-1 * time.Hour).Unix()
|
||||
|
||||
// Two encrypted GRP_TXT packets on channel hash "A1B2"
|
||||
db.conn.Exec(`INSERT INTO transmissions (raw_hex, hash, first_seen, route_type, payload_type, decoded_json, channel_hash)
|
||||
VALUES ('EE01', 'enc_hash_001', ?, 1, 5, '{"type":"GRP_TXT","channelHashHex":"A1B2","decryptionStatus":"no_key"}', 'enc_A1B2')`, recent)
|
||||
db.conn.Exec(`INSERT INTO transmissions (raw_hex, hash, first_seen, route_type, payload_type, decoded_json, channel_hash)
|
||||
VALUES ('EE02', 'enc_hash_002', ?, 1, 5, '{"type":"GRP_TXT","channelHashHex":"A1B2","decryptionStatus":"no_key"}', 'enc_A1B2')`, recent)
|
||||
|
||||
// Observations for both
|
||||
db.conn.Exec(`INSERT INTO observations (transmission_id, observer_idx, snr, rssi, path_json, timestamp)
|
||||
VALUES ((SELECT id FROM transmissions WHERE hash='enc_hash_001'), 1, 10.0, -90, '[]', ?)`, recentEpoch)
|
||||
db.conn.Exec(`INSERT INTO observations (transmission_id, observer_idx, snr, rssi, path_json, timestamp)
|
||||
VALUES ((SELECT id FROM transmissions WHERE hash='enc_hash_002'), 1, 10.0, -90, '[]', ?)`, recentEpoch)
|
||||
}
|
||||
|
||||
func TestGetEncryptedChannels(t *testing.T) {
|
||||
db := setupTestDB(t)
|
||||
defer db.Close()
|
||||
seedTestData(t, db)
|
||||
seedEncryptedChannelData(t, db)
|
||||
|
||||
channels, err := db.GetEncryptedChannels()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(channels) != 1 {
|
||||
t.Fatalf("expected 1 encrypted channel, got %d", len(channels))
|
||||
}
|
||||
ch := channels[0]
|
||||
if ch["hash"] != "enc_A1B2" {
|
||||
t.Errorf("expected hash enc_A1B2, got %v", ch["hash"])
|
||||
}
|
||||
if ch["encrypted"] != true {
|
||||
t.Errorf("expected encrypted=true, got %v", ch["encrypted"])
|
||||
}
|
||||
if ch["messageCount"] != 2 {
|
||||
t.Errorf("expected messageCount=2, got %v", ch["messageCount"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestChannelsAPIExcludesEncrypted(t *testing.T) {
|
||||
_, router := setupTestServer(t)
|
||||
// Seed encrypted data into the server's DB
|
||||
// setupTestServer uses seedTestData which has no encrypted packets,
|
||||
// so default /api/channels should NOT include encrypted channels.
|
||||
req := httptest.NewRequest("GET", "/api/channels", nil)
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != 200 {
|
||||
t.Fatalf("expected 200, got %d", w.Code)
|
||||
}
|
||||
var body map[string]interface{}
|
||||
json.Unmarshal(w.Body.Bytes(), &body)
|
||||
channels := body["channels"].([]interface{})
|
||||
|
||||
for _, ch := range channels {
|
||||
m := ch.(map[string]interface{})
|
||||
if enc, ok := m["encrypted"]; ok && enc == true {
|
||||
t.Errorf("default /api/channels should not include encrypted channels, found: %v", m["hash"])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestChannelsAPIIncludesEncryptedWithParam(t *testing.T) {
|
||||
srv, router := setupTestServer(t)
|
||||
// Add encrypted data to the server's DB
|
||||
seedEncryptedChannelData(t, srv.db)
|
||||
// Reload store so in-memory also has the data
|
||||
store := NewPacketStore(srv.db, nil)
|
||||
if err := store.Load(); err != nil {
|
||||
t.Fatalf("store.Load: %v", err)
|
||||
}
|
||||
srv.store = store
|
||||
|
||||
req := httptest.NewRequest("GET", "/api/channels?includeEncrypted=true", nil)
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != 200 {
|
||||
t.Fatalf("expected 200, got %d", w.Code)
|
||||
}
|
||||
var body map[string]interface{}
|
||||
json.Unmarshal(w.Body.Bytes(), &body)
|
||||
channels := body["channels"].([]interface{})
|
||||
|
||||
foundEncrypted := false
|
||||
for _, ch := range channels {
|
||||
m := ch.(map[string]interface{})
|
||||
if enc, ok := m["encrypted"]; ok && enc == true {
|
||||
foundEncrypted = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !foundEncrypted {
|
||||
t.Error("expected encrypted channels with includeEncrypted=true, found none")
|
||||
}
|
||||
}
|
||||
|
||||
func TestChannelMessagesExcludesEncrypted(t *testing.T) {
|
||||
srv, router := setupTestServer(t)
|
||||
seedEncryptedChannelData(t, srv.db)
|
||||
store := NewPacketStore(srv.db, nil)
|
||||
if err := store.Load(); err != nil {
|
||||
t.Fatalf("store.Load: %v", err)
|
||||
}
|
||||
srv.store = store
|
||||
|
||||
// Request messages for the encrypted channel — should return empty
|
||||
req := httptest.NewRequest("GET", "/api/channels/enc_A1B2/messages", nil)
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != 200 {
|
||||
t.Fatalf("expected 200, got %d", w.Code)
|
||||
}
|
||||
var body map[string]interface{}
|
||||
json.Unmarshal(w.Body.Bytes(), &body)
|
||||
messages, ok := body["messages"].([]interface{})
|
||||
if !ok {
|
||||
// messages might be null/missing — that's fine, means no messages
|
||||
return
|
||||
}
|
||||
// Encrypted messages should not be returned as readable messages
|
||||
for _, msg := range messages {
|
||||
m := msg.(map[string]interface{})
|
||||
if text, ok := m["text"].(string); ok && text != "" {
|
||||
t.Errorf("encrypted channel should not return readable messages, got text: %s", text)
|
||||
}
|
||||
}
|
||||
}
|
||||
+20
-320
@@ -85,12 +85,6 @@ func makeTestStore(count int, startTime time.Time, intervalMin int) *PacketStore
|
||||
|
||||
// Subpath index
|
||||
addTxToSubpathIndex(store.spIndex, tx)
|
||||
|
||||
// Track bytes for self-accounting
|
||||
store.trackedBytes += estimateStoreTxBytes(tx)
|
||||
for _, obs := range tx.Observations {
|
||||
store.trackedBytes += estimateStoreObsBytes(obs)
|
||||
}
|
||||
}
|
||||
|
||||
return store
|
||||
@@ -172,43 +166,43 @@ func TestEvictStale_MemoryBasedEviction(t *testing.T) {
|
||||
// All packets are recent (1h old) so time-based won't trigger.
|
||||
store.retentionHours = 24
|
||||
store.maxMemoryMB = 3
|
||||
// Set trackedBytes to simulate 6MB (over 3MB limit).
|
||||
store.trackedBytes = 6 * 1048576
|
||||
// Inject deterministic estimator: simulates 6MB (over 3MB limit).
|
||||
// Uses packet count so it scales correctly after eviction.
|
||||
store.memoryEstimator = func() float64 {
|
||||
return float64(len(store.packets)*5120+store.totalObs*500) / 1048576.0
|
||||
}
|
||||
|
||||
evicted := store.EvictStale()
|
||||
if evicted == 0 {
|
||||
t.Fatal("expected some evictions for memory cap")
|
||||
}
|
||||
// 25% safety cap should limit to 250 per pass
|
||||
if evicted > 250 {
|
||||
t.Fatalf("25%% safety cap violated: evicted %d", evicted)
|
||||
}
|
||||
// trackedBytes should have decreased
|
||||
if store.trackedBytes >= 6*1048576 {
|
||||
t.Fatal("trackedBytes should have decreased after eviction")
|
||||
estMB := store.estimatedMemoryMB()
|
||||
if estMB > 3.5 {
|
||||
t.Fatalf("expected <=3.5MB after eviction, got %.1fMB", estMB)
|
||||
}
|
||||
}
|
||||
|
||||
// TestEvictStale_MemoryBasedEviction_UnderestimatedHeap verifies that the 25%
|
||||
// safety cap prevents cascading eviction even when trackedBytes is very high.
|
||||
// TestEvictStale_MemoryBasedEviction_UnderestimatedHeap verifies that eviction
|
||||
// fires correctly when actual heap is much larger than a formula-based estimate
|
||||
// would report — the scenario that caused OOM kills in production.
|
||||
func TestEvictStale_MemoryBasedEviction_UnderestimatedHeap(t *testing.T) {
|
||||
now := time.Now().UTC()
|
||||
store := makeTestStore(1000, now.Add(-1*time.Hour), 0)
|
||||
store.retentionHours = 24
|
||||
store.maxMemoryMB = 500
|
||||
// Simulate trackedBytes 5x over budget.
|
||||
store.trackedBytes = 2500 * 1048576
|
||||
// Simulate actual heap 5x over budget (like production: ~5GB actual vs ~1GB limit).
|
||||
store.memoryEstimator = func() float64 {
|
||||
return 2500.0 // 2500MB actual vs 500MB limit
|
||||
}
|
||||
|
||||
evicted := store.EvictStale()
|
||||
if evicted == 0 {
|
||||
t.Fatal("expected evictions when tracked is 5x over limit")
|
||||
t.Fatal("expected evictions when heap is 5x over limit")
|
||||
}
|
||||
// Safety cap: max 25% per pass = 250
|
||||
if evicted > 250 {
|
||||
t.Fatalf("25%% safety cap violated: evicted %d of 1000", evicted)
|
||||
}
|
||||
if evicted != 250 {
|
||||
t.Fatalf("expected exactly 250 evicted (25%% cap), got %d", evicted)
|
||||
// Should keep roughly 500/2500 * 0.9 = 18% of packets → ~180 of 1000.
|
||||
remaining := len(store.packets)
|
||||
if remaining > 250 {
|
||||
t.Fatalf("expected most packets evicted (heap 5x over), but %d of 1000 remain", remaining)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -245,101 +239,6 @@ func TestEvictStale_CleansNodeIndexes(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestEvictStale_CleansResolvedPathNodeIndexes(t *testing.T) {
|
||||
now := time.Now().UTC()
|
||||
|
||||
// Create a temp DB for on-demand SQL fetch during eviction
|
||||
db := setupTestDB(t)
|
||||
defer db.Close()
|
||||
|
||||
store := &PacketStore{
|
||||
packets: make([]*StoreTx, 0),
|
||||
byHash: make(map[string]*StoreTx),
|
||||
byTxID: make(map[int]*StoreTx),
|
||||
byObsID: make(map[int]*StoreObs),
|
||||
byObserver: make(map[string][]*StoreObs),
|
||||
byNode: make(map[string][]*StoreTx),
|
||||
nodeHashes: make(map[string]map[string]bool),
|
||||
byPayloadType: make(map[int][]*StoreTx),
|
||||
spIndex: make(map[string]int),
|
||||
distHops: make([]distHopRecord, 0),
|
||||
distPaths: make([]distPathRecord, 0),
|
||||
rfCache: make(map[string]*cachedResult),
|
||||
topoCache: make(map[string]*cachedResult),
|
||||
hashCache: make(map[string]*cachedResult),
|
||||
chanCache: make(map[string]*cachedResult),
|
||||
distCache: make(map[string]*cachedResult),
|
||||
subpathCache: make(map[string]*cachedResult),
|
||||
rfCacheTTL: 15 * time.Second,
|
||||
retentionHours: 24,
|
||||
db: db,
|
||||
useResolvedPathIndex: true,
|
||||
}
|
||||
store.initResolvedPathIndex()
|
||||
|
||||
// Create a packet indexed via resolved_path pubkeys
|
||||
relayPK := "relay0001abcdef"
|
||||
txID := 1
|
||||
obsID := 100
|
||||
tx := &StoreTx{
|
||||
ID: txID,
|
||||
Hash: "hash_rp_001",
|
||||
FirstSeen: now.Add(-48 * time.Hour).UTC().Format(time.RFC3339),
|
||||
}
|
||||
obs := &StoreObs{
|
||||
ID: obsID,
|
||||
TransmissionID: txID,
|
||||
ObserverID: "obs0",
|
||||
Timestamp: tx.FirstSeen,
|
||||
}
|
||||
tx.Observations = append(tx.Observations, obs)
|
||||
|
||||
// Insert into DB so on-demand SQL fetch works during eviction
|
||||
db.conn.Exec("INSERT INTO transmissions (id, raw_hex, hash, first_seen) VALUES (?, '', ?, ?)",
|
||||
txID, tx.Hash, tx.FirstSeen)
|
||||
db.conn.Exec("INSERT INTO observations (id, transmission_id, observer_idx, path_json, timestamp, resolved_path) VALUES (?, ?, 1, ?, ?, ?)",
|
||||
obsID, txID, `["aa"]`, now.Add(-48*time.Hour).Unix(), `["`+relayPK+`"]`)
|
||||
|
||||
store.packets = append(store.packets, tx)
|
||||
store.byHash[tx.Hash] = tx
|
||||
store.byTxID[tx.ID] = tx
|
||||
store.byObsID[obs.ID] = obs
|
||||
store.byObserver["obs0"] = append(store.byObserver["obs0"], obs)
|
||||
|
||||
// Index relay via decode-window simulation
|
||||
store.addToByNode(tx, relayPK)
|
||||
store.addToResolvedPubkeyIndex(txID, []string{relayPK})
|
||||
|
||||
// Verify indexed
|
||||
if len(store.byNode[relayPK]) != 1 {
|
||||
t.Fatalf("expected 1 entry in byNode[%s], got %d", relayPK, len(store.byNode[relayPK]))
|
||||
}
|
||||
if !store.nodeHashes[relayPK][tx.Hash] {
|
||||
t.Fatalf("expected nodeHashes[%s] to contain %s", relayPK, tx.Hash)
|
||||
}
|
||||
|
||||
evicted := store.RunEviction()
|
||||
if evicted != 1 {
|
||||
t.Fatalf("expected 1 evicted, got %d", evicted)
|
||||
}
|
||||
|
||||
// Verify resolved_path entries are cleaned up
|
||||
if len(store.byNode[relayPK]) != 0 {
|
||||
t.Fatalf("expected byNode[%s] to be empty after eviction, got %d", relayPK, len(store.byNode[relayPK]))
|
||||
}
|
||||
if _, exists := store.nodeHashes[relayPK]; exists {
|
||||
t.Fatalf("expected nodeHashes[%s] to be deleted after eviction", relayPK)
|
||||
}
|
||||
// Verify resolved pubkey index is cleaned up
|
||||
h := resolvedPubkeyHash(relayPK)
|
||||
if len(store.resolvedPubkeyIndex[h]) != 0 {
|
||||
t.Fatalf("expected resolvedPubkeyIndex to be empty after eviction")
|
||||
}
|
||||
if _, exists := store.resolvedPubkeyReverse[txID]; exists {
|
||||
t.Fatalf("expected resolvedPubkeyReverse to be empty after eviction")
|
||||
}
|
||||
}
|
||||
|
||||
func TestEvictStale_RunEvictionThreadSafe(t *testing.T) {
|
||||
now := time.Now().UTC()
|
||||
store := makeTestStore(20, now.Add(-48*time.Hour), 0)
|
||||
@@ -403,202 +302,3 @@ func TestCacheTTLDefaults(t *testing.T) {
|
||||
t.Fatalf("expected default rfCacheTTL=15s, got %v", store.rfCacheTTL)
|
||||
}
|
||||
}
|
||||
|
||||
// --- Self-accounting memory tracking tests ---
|
||||
|
||||
func TestTrackedBytes_IncreasesOnInsert(t *testing.T) {
|
||||
now := time.Now().UTC()
|
||||
store := makeTestStore(0, now, 0)
|
||||
if store.trackedBytes != 0 {
|
||||
t.Fatalf("expected 0 trackedBytes for empty store, got %d", store.trackedBytes)
|
||||
}
|
||||
|
||||
store2 := makeTestStore(10, now, 1)
|
||||
if store2.trackedBytes <= 0 {
|
||||
t.Fatal("expected positive trackedBytes after inserting 10 packets")
|
||||
}
|
||||
// Each packet has 2 observations; should be roughly 10*(384+5*48) + 20*(192+2*48) = 10*624 + 20*288 = 12000
|
||||
expectedMin := int64(10*600 + 20*250) // rough lower bound
|
||||
if store2.trackedBytes < expectedMin {
|
||||
t.Fatalf("trackedBytes %d seems too low (expected > %d)", store2.trackedBytes, expectedMin)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTrackedBytes_DecreasesOnEvict(t *testing.T) {
|
||||
now := time.Now().UTC()
|
||||
store := makeTestStore(100, now.Add(-48*time.Hour), 0)
|
||||
store.retentionHours = 24
|
||||
|
||||
beforeBytes := store.trackedBytes
|
||||
if beforeBytes <= 0 {
|
||||
t.Fatal("expected positive trackedBytes before eviction")
|
||||
}
|
||||
|
||||
evicted := store.EvictStale()
|
||||
if evicted != 100 {
|
||||
t.Fatalf("expected 100 evicted, got %d", evicted)
|
||||
}
|
||||
if store.trackedBytes != 0 {
|
||||
t.Fatalf("expected 0 trackedBytes after evicting all, got %d", store.trackedBytes)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTrackedBytes_MatchesExpectedAfterMixedInsertEvict(t *testing.T) {
|
||||
now := time.Now().UTC()
|
||||
// Create 100 packets, 50 old + 50 recent
|
||||
store := makeTestStore(100, now.Add(-48*time.Hour), 0)
|
||||
for i := 50; i < 100; i++ {
|
||||
store.packets[i].FirstSeen = now.Add(-1 * time.Hour).Format(time.RFC3339)
|
||||
}
|
||||
store.retentionHours = 24
|
||||
|
||||
totalBefore := store.trackedBytes
|
||||
|
||||
// Calculate expected bytes for first 50 packets (to be evicted)
|
||||
var evictedBytes int64
|
||||
for i := 0; i < 50; i++ {
|
||||
tx := store.packets[i]
|
||||
evictedBytes += estimateStoreTxBytes(tx)
|
||||
for _, obs := range tx.Observations {
|
||||
evictedBytes += estimateStoreObsBytes(obs)
|
||||
}
|
||||
}
|
||||
|
||||
store.EvictStale()
|
||||
|
||||
expectedAfter := totalBefore - evictedBytes
|
||||
if store.trackedBytes != expectedAfter {
|
||||
t.Fatalf("trackedBytes %d != expected %d (before=%d, evicted=%d)",
|
||||
store.trackedBytes, expectedAfter, totalBefore, evictedBytes)
|
||||
}
|
||||
}
|
||||
|
||||
func TestWatermarkHysteresis(t *testing.T) {
|
||||
now := time.Now().UTC()
|
||||
store := makeTestStore(1000, now.Add(-1*time.Hour), 0)
|
||||
store.retentionHours = 0 // no time-based eviction
|
||||
store.maxMemoryMB = 1 // 1MB budget
|
||||
|
||||
// Set trackedBytes to just above high watermark
|
||||
highWatermark := int64(1 * 1048576)
|
||||
lowWatermark := int64(float64(highWatermark) * 0.85)
|
||||
store.trackedBytes = highWatermark + 1
|
||||
|
||||
evicted := store.EvictStale()
|
||||
if evicted == 0 {
|
||||
t.Fatal("expected eviction when above high watermark")
|
||||
}
|
||||
if store.trackedBytes > lowWatermark+1024 {
|
||||
t.Fatalf("expected trackedBytes near low watermark after eviction, got %d (low=%d)",
|
||||
store.trackedBytes, lowWatermark)
|
||||
}
|
||||
|
||||
// Now set trackedBytes to just below high watermark — should NOT trigger
|
||||
store.trackedBytes = highWatermark - 1
|
||||
evicted2 := store.EvictStale()
|
||||
if evicted2 != 0 {
|
||||
t.Fatalf("expected no eviction below high watermark, got %d", evicted2)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSafetyCap25Percent(t *testing.T) {
|
||||
now := time.Now().UTC()
|
||||
store := makeTestStore(1000, now.Add(-1*time.Hour), 0)
|
||||
store.retentionHours = 0
|
||||
store.maxMemoryMB = 1
|
||||
|
||||
// Set trackedBytes way over limit to force maximum eviction
|
||||
store.trackedBytes = 100 * 1048576 // 100MB vs 1MB limit
|
||||
|
||||
evicted := store.EvictStale()
|
||||
// 25% of 1000 = 250
|
||||
if evicted > 250 {
|
||||
t.Fatalf("25%% safety cap violated: evicted %d of 1000 (max should be 250)", evicted)
|
||||
}
|
||||
if evicted != 250 {
|
||||
t.Fatalf("expected exactly 250 evicted (25%% cap), got %d", evicted)
|
||||
}
|
||||
if len(store.packets) != 750 {
|
||||
t.Fatalf("expected 750 remaining, got %d", len(store.packets))
|
||||
}
|
||||
}
|
||||
|
||||
func TestMultiplePassesConverge(t *testing.T) {
|
||||
now := time.Now().UTC()
|
||||
store := makeTestStore(1000, now.Add(-1*time.Hour), 0)
|
||||
store.retentionHours = 0
|
||||
// Set budget to half the actual tracked bytes — requires ~2 passes
|
||||
actualBytes := store.trackedBytes
|
||||
store.maxMemoryMB = int(float64(actualBytes) / 1048576.0 / 2)
|
||||
if store.maxMemoryMB < 1 {
|
||||
store.maxMemoryMB = 1
|
||||
}
|
||||
|
||||
totalEvicted := 0
|
||||
for pass := 0; pass < 20; pass++ {
|
||||
evicted := store.EvictStale()
|
||||
if evicted == 0 {
|
||||
break
|
||||
}
|
||||
totalEvicted += evicted
|
||||
}
|
||||
|
||||
// After convergence, trackedBytes should be at or below high watermark
|
||||
// (may be between low and high due to hysteresis — that's fine)
|
||||
highWatermark := int64(store.maxMemoryMB) * 1048576
|
||||
if store.trackedBytes > highWatermark {
|
||||
t.Fatalf("did not converge: trackedBytes=%d (%.1fMB) > highWatermark=%d after multiple passes",
|
||||
store.trackedBytes, float64(store.trackedBytes)/1048576.0, highWatermark)
|
||||
}
|
||||
if totalEvicted == 0 {
|
||||
t.Fatal("expected some evictions across multiple passes")
|
||||
}
|
||||
}
|
||||
|
||||
func TestEstimateStoreTxBytes(t *testing.T) {
|
||||
tx := &StoreTx{
|
||||
RawHex: "aabbcc",
|
||||
Hash: "hash1234",
|
||||
DecodedJSON: `{"pubKey":"pk1"}`,
|
||||
PathJSON: `["aa","bb"]`,
|
||||
}
|
||||
est := estimateStoreTxBytes(tx)
|
||||
// Manual calculation: base + string lengths + index entries + perTxMaps + path hops + subpaths
|
||||
hops := int64(len(txGetParsedPath(tx)))
|
||||
manualCalc := int64(storeTxBaseBytes) + int64(len(tx.RawHex)+len(tx.Hash)+len(tx.DecodedJSON)+len(tx.PathJSON)) + int64(numIndexesPerTx*indexEntryBytes)
|
||||
manualCalc += perTxMapsBytes
|
||||
manualCalc += hops * perPathHopBytes
|
||||
if hops > 1 {
|
||||
manualCalc += (hops * (hops - 1) / 2) * perSubpathEntryBytes
|
||||
}
|
||||
if est != manualCalc {
|
||||
t.Fatalf("estimateStoreTxBytes = %d, want %d (manual calc)", est, manualCalc)
|
||||
}
|
||||
if est < 600 || est > 1200 {
|
||||
t.Fatalf("estimateStoreTxBytes = %d, expected in range [600, 1200]", est)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEstimateStoreObsBytes(t *testing.T) {
|
||||
obs := &StoreObs{
|
||||
ObserverID: "obs123",
|
||||
PathJSON: `["aa"]`,
|
||||
}
|
||||
est := estimateStoreObsBytes(obs)
|
||||
// storeObsBaseBytes(192) + len(ObserverID=6) + len(PathJSON=6) + 2*48(96) = 300
|
||||
expected := int64(192 + 6 + 6 + 2*48)
|
||||
if est != expected {
|
||||
t.Fatalf("estimateStoreObsBytes = %d, want %d", est, expected)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEviction100K(b *testing.B) {
|
||||
now := time.Now().UTC()
|
||||
for i := 0; i < b.N; i++ {
|
||||
b.StopTimer()
|
||||
store := makeTestStore(100000, now.Add(-48*time.Hour), 0)
|
||||
store.retentionHours = 24
|
||||
b.StartTimer()
|
||||
store.EvictStale()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,18 +6,11 @@ require (
|
||||
github.com/gorilla/mux v1.8.1
|
||||
github.com/gorilla/websocket v1.5.3
|
||||
github.com/meshcore-analyzer/geofilter v0.0.0
|
||||
github.com/meshcore-analyzer/sigvalidate v0.0.0
|
||||
modernc.org/sqlite v1.34.5
|
||||
)
|
||||
|
||||
replace github.com/meshcore-analyzer/geofilter => ../../internal/geofilter
|
||||
|
||||
replace github.com/meshcore-analyzer/sigvalidate => ../../internal/sigvalidate
|
||||
|
||||
require github.com/meshcore-analyzer/packetpath v0.0.0
|
||||
|
||||
replace github.com/meshcore-analyzer/packetpath => ../../internal/packetpath
|
||||
|
||||
require (
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
|
||||
@@ -1,119 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"time"
|
||||
)
|
||||
|
||||
// migrateContentHashesAsync recomputes content hashes in batches after the
|
||||
// server is already serving HTTP. Packets whose hash changes are updated in
|
||||
// both the DB and the in-memory byHash index. The migration is idempotent:
|
||||
// once all hashes match the current formula it completes instantly.
|
||||
func migrateContentHashesAsync(store *PacketStore, batchSize int, yieldDuration time.Duration) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
log.Printf("[hash-migrate] panic recovered: %v", r)
|
||||
}
|
||||
store.hashMigrationComplete.Store(true)
|
||||
}()
|
||||
|
||||
// Snapshot the packet slice length under lock (packets only grow).
|
||||
store.mu.RLock()
|
||||
total := len(store.packets)
|
||||
store.mu.RUnlock()
|
||||
|
||||
migrated := 0
|
||||
for offset := 0; offset < total; offset += batchSize {
|
||||
end := offset + batchSize
|
||||
if end > total {
|
||||
end = total
|
||||
}
|
||||
|
||||
// Collect stale hashes in this batch under RLock.
|
||||
type hashUpdate struct {
|
||||
tx *StoreTx
|
||||
oldHash string
|
||||
newHash string
|
||||
}
|
||||
var updates []hashUpdate
|
||||
|
||||
store.mu.RLock()
|
||||
for _, tx := range store.packets[offset:end] {
|
||||
if tx.RawHex == "" {
|
||||
continue
|
||||
}
|
||||
newHash := ComputeContentHash(tx.RawHex)
|
||||
if newHash != tx.Hash {
|
||||
updates = append(updates, hashUpdate{tx: tx, oldHash: tx.Hash, newHash: newHash})
|
||||
}
|
||||
}
|
||||
store.mu.RUnlock()
|
||||
|
||||
if len(updates) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Write batch to DB in a single transaction.
|
||||
dbTx, err := store.db.conn.Begin()
|
||||
if err != nil {
|
||||
log.Printf("[hash-migrate] begin tx: %v", err)
|
||||
continue
|
||||
}
|
||||
stmt, err := dbTx.Prepare("UPDATE transmissions SET hash = ? WHERE id = ?")
|
||||
if err != nil {
|
||||
log.Printf("[hash-migrate] prepare: %v", err)
|
||||
dbTx.Rollback()
|
||||
continue
|
||||
}
|
||||
|
||||
for _, u := range updates {
|
||||
if _, err := stmt.Exec(u.newHash, u.tx.ID); err != nil {
|
||||
// UNIQUE constraint = two old hashes map to the same new hash (duplicate).
|
||||
// Merge observations to the surviving tx, delete the duplicate.
|
||||
log.Printf("[hash-migrate] tx %d collides — merging duplicate", u.tx.ID)
|
||||
var survID int
|
||||
if err2 := dbTx.QueryRow("SELECT id FROM transmissions WHERE hash = ?", u.newHash).Scan(&survID); err2 == nil {
|
||||
dbTx.Exec("UPDATE observations SET transmission_id = ? WHERE transmission_id = ?", survID, u.tx.ID)
|
||||
dbTx.Exec("DELETE FROM transmissions WHERE id = ?", u.tx.ID)
|
||||
u.newHash = "" // mark for in-memory removal only
|
||||
}
|
||||
}
|
||||
}
|
||||
stmt.Close()
|
||||
|
||||
if err := dbTx.Commit(); err != nil {
|
||||
log.Printf("[hash-migrate] commit: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Update in-memory index under write lock.
|
||||
store.mu.Lock()
|
||||
for _, u := range updates {
|
||||
delete(store.byHash, u.oldHash)
|
||||
if u.newHash == "" {
|
||||
// Merged duplicate — remove from packets slice and indexes.
|
||||
delete(store.byTxID, u.tx.ID)
|
||||
// Move observations to survivor if present.
|
||||
if surv := store.byHash[ComputeContentHash(u.tx.RawHex)]; surv != nil {
|
||||
for _, obs := range u.tx.Observations {
|
||||
surv.Observations = append(surv.Observations, obs)
|
||||
surv.ObservationCount++
|
||||
}
|
||||
}
|
||||
} else {
|
||||
u.tx.Hash = u.newHash
|
||||
store.byHash[u.newHash] = u.tx
|
||||
}
|
||||
}
|
||||
store.mu.Unlock()
|
||||
|
||||
migrated += len(updates)
|
||||
|
||||
// Yield to let HTTP handlers run.
|
||||
time.Sleep(yieldDuration)
|
||||
}
|
||||
|
||||
if migrated > 0 {
|
||||
log.Printf("[hash-migrate] Migrated %d content hashes to new formula", migrated)
|
||||
}
|
||||
}
|
||||
@@ -1,78 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestMigrateContentHashesAsync(t *testing.T) {
|
||||
db := setupTestDBv2(t)
|
||||
store := NewPacketStore(db, nil)
|
||||
|
||||
// Insert a packet with a manually wrong hash (simulating old formula).
|
||||
rawHex := "0A00D69FD7A5A7475DB07337749AE61FA53A4788E976"
|
||||
correctHash := ComputeContentHash(rawHex)
|
||||
wrongHash := "deadbeef12345678"
|
||||
|
||||
_, err := db.conn.Exec(`INSERT INTO transmissions (raw_hex, hash, first_seen, route_type, payload_type)
|
||||
VALUES (?, ?, datetime('now'), 0, 2)`, rawHex, wrongHash)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := store.Load(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if store.byHash[wrongHash] == nil {
|
||||
t.Fatal("expected packet under wrong hash before migration")
|
||||
}
|
||||
|
||||
migrateContentHashesAsync(store, 100, time.Millisecond)
|
||||
|
||||
if !store.hashMigrationComplete.Load() {
|
||||
t.Error("expected hashMigrationComplete to be true")
|
||||
}
|
||||
if store.byHash[wrongHash] != nil {
|
||||
t.Error("old hash should be removed from index")
|
||||
}
|
||||
if store.byHash[correctHash] == nil {
|
||||
t.Error("new hash should be in index")
|
||||
}
|
||||
|
||||
var dbHash string
|
||||
err = db.conn.QueryRow("SELECT hash FROM transmissions WHERE raw_hex = ?", rawHex).Scan(&dbHash)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if dbHash != correctHash {
|
||||
t.Errorf("DB hash = %s, want %s", dbHash, correctHash)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMigrateContentHashesAsync_NoOp(t *testing.T) {
|
||||
db := setupTestDBv2(t)
|
||||
store := NewPacketStore(db, nil)
|
||||
|
||||
rawHex := "0A00D69FD7A5A7475DB07337749AE61FA53A4788E976"
|
||||
correctHash := ComputeContentHash(rawHex)
|
||||
|
||||
_, err := db.conn.Exec(`INSERT INTO transmissions (raw_hex, hash, first_seen, route_type, payload_type)
|
||||
VALUES (?, ?, datetime('now'), 0, 2)`, rawHex, correctHash)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := store.Load(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
migrateContentHashesAsync(store, 100, time.Millisecond)
|
||||
|
||||
if !store.hashMigrationComplete.Load() {
|
||||
t.Error("expected hashMigrationComplete to be true")
|
||||
}
|
||||
if store.byHash[correctHash] == nil {
|
||||
t.Error("hash should remain in index")
|
||||
}
|
||||
}
|
||||
@@ -1,107 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
_ "modernc.org/sqlite"
|
||||
)
|
||||
|
||||
const issue673NodePK = "7502f19f44cad6d7b626e1d811c00a914af452636182ccded3fd019803395ec9"
|
||||
|
||||
// setupIssue673Store builds an in-memory store with one repeater node having:
|
||||
// - one ADVERT packet (legitimately indexed in byNode)
|
||||
// - one GRP_TXT packet whose decoded text contains the node's pubkey (false-positive candidate)
|
||||
func setupIssue673Store(t *testing.T) (*PacketStore, *DB) {
|
||||
t.Helper()
|
||||
db := setupTestDB(t)
|
||||
|
||||
_, err := db.conn.Exec(
|
||||
"INSERT INTO nodes (public_key, name, role) VALUES (?, ?, ?)",
|
||||
issue673NodePK, "Quail Hollow Park", "repeater",
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ps := NewPacketStore(db, nil)
|
||||
now := time.Now().UTC().Format(time.RFC3339)
|
||||
|
||||
pt4 := 4 // ADVERT
|
||||
pt5 := 5 // GRP_TXT
|
||||
|
||||
advertDecoded, _ := json.Marshal(map[string]interface{}{"pubKey": issue673NodePK})
|
||||
advert := &StoreTx{
|
||||
ID: 1,
|
||||
Hash: "advert_hash_673",
|
||||
PayloadType: &pt4,
|
||||
DecodedJSON: string(advertDecoded),
|
||||
FirstSeen: now,
|
||||
}
|
||||
|
||||
otherPK := "aabbccddaabbccddaabbccddaabbccddaabbccddaabbccddaabbccddaabbccdd"
|
||||
chatDecoded, _ := json.Marshal(map[string]interface{}{
|
||||
"srcPubKey": otherPK,
|
||||
"text": "Check out node " + issue673NodePK + " on the analyzer",
|
||||
})
|
||||
chat := &StoreTx{
|
||||
ID: 2,
|
||||
Hash: "chat_hash_673",
|
||||
PayloadType: &pt5,
|
||||
DecodedJSON: string(chatDecoded),
|
||||
FirstSeen: now,
|
||||
}
|
||||
|
||||
ps.mu.Lock()
|
||||
ps.packets = append(ps.packets, advert, chat)
|
||||
ps.byHash[advert.Hash] = advert
|
||||
ps.byHash[chat.Hash] = chat
|
||||
ps.byTxID[advert.ID] = advert
|
||||
ps.byTxID[chat.ID] = chat
|
||||
ps.byNode[issue673NodePK] = []*StoreTx{advert}
|
||||
ps.mu.Unlock()
|
||||
|
||||
return ps, db
|
||||
}
|
||||
|
||||
// TestGetNodeAnalytics_ExcludesGRPTXTWithPubkeyInText verifies that a GRP_TXT packet
|
||||
// whose message text contains a node's pubkey is not counted in that node's analytics.
|
||||
func TestGetNodeAnalytics_ExcludesGRPTXTWithPubkeyInText(t *testing.T) {
|
||||
ps, db := setupIssue673Store(t)
|
||||
defer db.Close()
|
||||
|
||||
analytics, err := ps.GetNodeAnalytics(issue673NodePK, 30)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if analytics == nil {
|
||||
t.Fatal("expected analytics, got nil")
|
||||
}
|
||||
|
||||
for _, ptc := range analytics.PacketTypeBreakdown {
|
||||
if ptc.PayloadType == 5 {
|
||||
t.Errorf("GRP_TXT (type 5) should not appear in analytics for repeater node, got count=%d", ptc.Count)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestFilterPackets_NodeQueryDoesNotMatchChatText verifies that the slow path of
|
||||
// filterPackets (node filter combined with Since) does not return a GRP_TXT packet
|
||||
// whose pubkey appears only in message text, not in a structured pubkey field.
|
||||
func TestFilterPackets_NodeQueryDoesNotMatchChatText(t *testing.T) {
|
||||
ps, db := setupIssue673Store(t)
|
||||
defer db.Close()
|
||||
|
||||
yesterday := time.Now().Add(-24 * time.Hour).UTC().Format(time.RFC3339)
|
||||
result := ps.QueryPackets(PacketQuery{Node: issue673NodePK, Since: yesterday, Limit: 50})
|
||||
|
||||
if result.Total != 1 {
|
||||
t.Errorf("expected 1 packet for node (ADVERT only), got %d", result.Total)
|
||||
}
|
||||
for _, pkt := range result.Packets {
|
||||
if pkt["hash"] == "chat_hash_673" {
|
||||
t.Errorf("GRP_TXT with pubkey in message text was incorrectly returned for node query")
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,78 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
)
|
||||
|
||||
// TestRepro810 reproduces #810: when the longest-path observation has NULL
|
||||
// resolved_path but a shorter-path observation has one, fetchResolvedPathForTxBest
|
||||
// returns nil → /api/nodes/{pk}/health.recentPackets[].resolved_path is missing
|
||||
// while /api/packets shows it.
|
||||
func TestRepro810(t *testing.T) {
|
||||
db := setupTestDB(t)
|
||||
now := time.Now().UTC()
|
||||
recent := now.Add(-1 * time.Hour).Format(time.RFC3339)
|
||||
recentEpoch := now.Add(-1 * time.Hour).Unix()
|
||||
db.conn.Exec(`INSERT INTO observers (id, name, last_seen, first_seen, packet_count) VALUES ('obs1','O1',?, '2026-01-01T00:00:00Z', 100)`, recent)
|
||||
db.conn.Exec(`INSERT INTO observers (id, name, last_seen, first_seen, packet_count) VALUES ('obs2','O2',?, '2026-01-01T00:00:00Z', 100)`, recent)
|
||||
db.conn.Exec(`INSERT INTO nodes (public_key, name, role, last_seen, first_seen, advert_count) VALUES ('aabbccdd11223344','R','repeater',?, '2026-01-01T00:00:00Z', 1)`, recent)
|
||||
db.conn.Exec(`INSERT INTO transmissions (raw_hex, hash, first_seen, route_type, payload_type, decoded_json) VALUES ('AABB','testhash00000001',?,1,4,'{"pubKey":"aabbccdd11223344","type":"ADVERT"}')`, recent)
|
||||
// Longest-path obs WITHOUT resolved_path
|
||||
db.conn.Exec(`INSERT INTO observations (transmission_id, observer_idx, snr, rssi, path_json, timestamp) VALUES (1,1,12.5,-90,'["aa","bb","cc"]',?)`, recentEpoch)
|
||||
// Shorter-path obs WITH resolved_path
|
||||
db.conn.Exec(`INSERT INTO observations (transmission_id, observer_idx, snr, rssi, path_json, timestamp, resolved_path) VALUES (1,2,8.0,-95,'["aa","bb"]',?,'["aabbccdd11223344","eeff00112233aabb"]')`, recentEpoch-100)
|
||||
|
||||
cfg := &Config{Port: 3000}
|
||||
hub := NewHub()
|
||||
srv := NewServer(db, cfg, hub)
|
||||
store := NewPacketStore(db, nil)
|
||||
if err := store.Load(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
srv.store = store
|
||||
router := mux.NewRouter()
|
||||
srv.RegisterRoutes(router)
|
||||
|
||||
// Sanity: /api/packets should show resolved_path for this tx.
|
||||
reqP := httptest.NewRequest("GET", "/api/packets?limit=10", nil)
|
||||
wP := httptest.NewRecorder()
|
||||
router.ServeHTTP(wP, reqP)
|
||||
var pktsBody map[string]interface{}
|
||||
json.Unmarshal(wP.Body.Bytes(), &pktsBody)
|
||||
pkts, _ := pktsBody["packets"].([]interface{})
|
||||
hasOnPackets := false
|
||||
for _, p := range pkts {
|
||||
pm := p.(map[string]interface{})
|
||||
if pm["hash"] == "testhash00000001" && pm["resolved_path"] != nil {
|
||||
hasOnPackets = true
|
||||
}
|
||||
}
|
||||
if !hasOnPackets {
|
||||
t.Fatal("precondition: /api/packets must report resolved_path for tx")
|
||||
}
|
||||
|
||||
req := httptest.NewRequest("GET", "/api/nodes/aabbccdd11223344/health", nil)
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
var body map[string]interface{}
|
||||
json.Unmarshal(w.Body.Bytes(), &body)
|
||||
rp, _ := body["recentPackets"].([]interface{})
|
||||
if len(rp) == 0 {
|
||||
t.Fatal("no recentPackets")
|
||||
}
|
||||
for _, p := range rp {
|
||||
pm := p.(map[string]interface{})
|
||||
if pm["hash"] == "testhash00000001" {
|
||||
if pm["resolved_path"] == nil {
|
||||
t.Fatal("BUG #810: /health.recentPackets resolved_path is nil despite /api/packets reporting it")
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
t.Fatal("tx not found in recentPackets")
|
||||
}
|
||||
@@ -111,14 +111,6 @@ func main() {
|
||||
// Resolve DB path
|
||||
resolvedDB := cfg.ResolveDBPath(configDir)
|
||||
log.Printf("[config] port=%d db=%s public=%s", cfg.Port, resolvedDB, publicDir)
|
||||
if len(cfg.NodeBlacklist) > 0 {
|
||||
log.Printf("[config] nodeBlacklist: %d node(s) will be hidden from API", len(cfg.NodeBlacklist))
|
||||
for _, pk := range cfg.NodeBlacklist {
|
||||
if trimmed := strings.ToLower(strings.TrimSpace(pk)); trimmed != "" {
|
||||
log.Printf("[config] blacklisted: %s", trimmed)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Open database
|
||||
database, err := OpenDB(resolvedDB)
|
||||
@@ -148,9 +140,6 @@ func main() {
|
||||
stats.TotalTransmissions, stats.TotalObservations, stats.TotalNodes, stats.TotalObservers)
|
||||
}
|
||||
|
||||
// Check auto_vacuum mode and optionally migrate (#919)
|
||||
checkAutoVacuum(database, cfg, resolvedDB)
|
||||
|
||||
// In-memory packet store
|
||||
store := NewPacketStore(database, cfg.PacketStore, cfg.CacheTTL)
|
||||
if err := store.Load(); err != nil {
|
||||
@@ -269,7 +258,6 @@ func main() {
|
||||
defer stopEviction()
|
||||
|
||||
// Auto-prune old packets if retention.packetDays is configured
|
||||
vacuumPages := cfg.IncrementalVacuumPages()
|
||||
var stopPrune func()
|
||||
if cfg.Retention != nil && cfg.Retention.PacketDays > 0 {
|
||||
days := cfg.Retention.PacketDays
|
||||
@@ -290,9 +278,6 @@ func main() {
|
||||
log.Printf("[prune] error: %v", err)
|
||||
} else {
|
||||
log.Printf("[prune] deleted %d transmissions older than %d days", n, days)
|
||||
if n > 0 {
|
||||
runIncrementalVacuum(resolvedDB, vacuumPages)
|
||||
}
|
||||
}
|
||||
for {
|
||||
select {
|
||||
@@ -301,9 +286,6 @@ func main() {
|
||||
log.Printf("[prune] error: %v", err)
|
||||
} else {
|
||||
log.Printf("[prune] deleted %d transmissions older than %d days", n, days)
|
||||
if n > 0 {
|
||||
runIncrementalVacuum(resolvedDB, vacuumPages)
|
||||
}
|
||||
}
|
||||
case <-pruneDone:
|
||||
return
|
||||
@@ -331,12 +313,10 @@ func main() {
|
||||
}()
|
||||
time.Sleep(2 * time.Minute) // stagger after packet prune
|
||||
database.PruneOldMetrics(metricsDays)
|
||||
runIncrementalVacuum(resolvedDB, vacuumPages)
|
||||
for {
|
||||
select {
|
||||
case <-metricsPruneTicker.C:
|
||||
database.PruneOldMetrics(metricsDays)
|
||||
runIncrementalVacuum(resolvedDB, vacuumPages)
|
||||
case <-metricsPruneDone:
|
||||
return
|
||||
}
|
||||
@@ -345,42 +325,6 @@ func main() {
|
||||
log.Printf("[metrics-prune] auto-prune enabled: metrics older than %d days", metricsDays)
|
||||
}
|
||||
|
||||
// Auto-prune stale observers
|
||||
var stopObserverPrune func()
|
||||
{
|
||||
observerDays := cfg.ObserverDaysOrDefault()
|
||||
if observerDays <= -1 {
|
||||
// -1 means keep forever, skip
|
||||
} else {
|
||||
observerPruneTicker := time.NewTicker(24 * time.Hour)
|
||||
observerPruneDone := make(chan struct{})
|
||||
stopObserverPrune = func() {
|
||||
observerPruneTicker.Stop()
|
||||
close(observerPruneDone)
|
||||
}
|
||||
go func() {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
log.Printf("[observer-prune] panic recovered: %v", r)
|
||||
}
|
||||
}()
|
||||
time.Sleep(3 * time.Minute) // stagger after metrics prune
|
||||
database.RemoveStaleObservers(observerDays)
|
||||
runIncrementalVacuum(resolvedDB, vacuumPages)
|
||||
for {
|
||||
select {
|
||||
case <-observerPruneTicker.C:
|
||||
database.RemoveStaleObservers(observerDays)
|
||||
runIncrementalVacuum(resolvedDB, vacuumPages)
|
||||
case <-observerPruneDone:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
log.Printf("[observer-prune] auto-prune enabled: observers not seen in %d days will be removed", observerDays)
|
||||
}
|
||||
}
|
||||
|
||||
// Auto-prune old neighbor edges
|
||||
var stopEdgePrune func()
|
||||
{
|
||||
@@ -402,7 +346,6 @@ func main() {
|
||||
g := store.graph
|
||||
store.mu.RUnlock()
|
||||
PruneNeighborEdges(dbPath, g, maxAgeDays)
|
||||
runIncrementalVacuum(resolvedDB, vacuumPages)
|
||||
for {
|
||||
select {
|
||||
case <-edgePruneTicker.C:
|
||||
@@ -410,7 +353,6 @@ func main() {
|
||||
g := store.graph
|
||||
store.mu.RUnlock()
|
||||
PruneNeighborEdges(dbPath, g, maxAgeDays)
|
||||
runIncrementalVacuum(resolvedDB, vacuumPages)
|
||||
case <-edgePruneDone:
|
||||
return
|
||||
}
|
||||
@@ -444,9 +386,6 @@ func main() {
|
||||
if stopMetricsPrune != nil {
|
||||
stopMetricsPrune()
|
||||
}
|
||||
if stopObserverPrune != nil {
|
||||
stopObserverPrune()
|
||||
}
|
||||
if stopEdgePrune != nil {
|
||||
stopEdgePrune()
|
||||
}
|
||||
@@ -473,9 +412,6 @@ func main() {
|
||||
// Start async backfill in background — HTTP is now available.
|
||||
go backfillResolvedPathsAsync(store, dbPath, 5000, 100*time.Millisecond, cfg.BackfillHours())
|
||||
|
||||
// Migrate old content hashes in background (one-time, idempotent).
|
||||
go migrateContentHashesAsync(store, 5000, 100*time.Millisecond)
|
||||
|
||||
if err := httpServer.ListenAndServe(); err != http.ErrServerClosed {
|
||||
log.Fatalf("[server] %v", err)
|
||||
}
|
||||
|
||||
@@ -1,132 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// MemorySnapshot is a point-in-time view of process memory across several
|
||||
// vantage points. Values are in MB (1024*1024 bytes), rounded to one decimal.
|
||||
//
|
||||
// Field invariants (typical, not guaranteed under exotic conditions):
|
||||
//
|
||||
// processRSSMB >= goSysMB >= goHeapInuseMB >= storeDataMB
|
||||
//
|
||||
// - processRSSMB is what the kernel charges the process (resident set).
|
||||
// Read from /proc/self/status `VmRSS:` on Linux; falls back to goSysMB
|
||||
// on other platforms or when /proc is unavailable.
|
||||
// - goSysMB is the total memory obtained from the OS by the Go runtime
|
||||
// (heap, stacks, GC metadata, mspans, mcache, etc.). Includes
|
||||
// fragmentation and unused-but-mapped span overhead.
|
||||
// - goHeapInuseMB is the live, in-use Go heap (HeapInuse). Excludes
|
||||
// idle spans and runtime overhead.
|
||||
// - storeDataMB is the in-store packet byte estimate (transmissions +
|
||||
// observations). Subset of HeapInuse. Does not include index maps,
|
||||
// analytics caches, broadcast queues, or runtime overhead. Used as
|
||||
// the input to the eviction watermark.
|
||||
//
|
||||
// processRSSMB and storeDataMB are monotonic only relative to ingest +
|
||||
// eviction; both can shrink when packets age out. goHeapInuseMB and goSysMB
|
||||
// fluctuate with GC.
|
||||
//
|
||||
// cgoBytesMB intentionally absent: this build uses the pure-Go
|
||||
// modernc.org/sqlite driver, so there is no cgo allocator to measure.
|
||||
// Reintroduce only if we ever switch back to mattn/go-sqlite3.
|
||||
type MemorySnapshot struct {
|
||||
ProcessRSSMB float64 `json:"processRSSMB"`
|
||||
GoHeapInuseMB float64 `json:"goHeapInuseMB"`
|
||||
GoSysMB float64 `json:"goSysMB"`
|
||||
StoreDataMB float64 `json:"storeDataMB"`
|
||||
}
|
||||
|
||||
// rssCache rate-limits the /proc/self/status read. Go memory stats are
|
||||
// already cached by Server.getMemStats (5s TTL). We use a tighter 1s TTL
|
||||
// here so processRSSMB stays reasonably fresh during ops debugging
|
||||
// without paying the syscall cost on every /api/stats hit.
|
||||
var (
|
||||
rssCacheMu sync.Mutex
|
||||
rssCacheValueMB float64
|
||||
rssCacheCachedAt time.Time
|
||||
)
|
||||
|
||||
const rssCacheTTL = 1 * time.Second
|
||||
|
||||
// getMemorySnapshot composes a MemorySnapshot using the Server's existing
|
||||
// runtime.MemStats cache (5s TTL, used by /api/health and /api/perf too)
|
||||
// plus a rate-limited /proc RSS read. storeDataMB is supplied by the
|
||||
// caller because the packet store is the source of truth.
|
||||
func (s *Server) getMemorySnapshot(storeDataMB float64) MemorySnapshot {
|
||||
ms := s.getMemStats()
|
||||
|
||||
rssCacheMu.Lock()
|
||||
if time.Since(rssCacheCachedAt) > rssCacheTTL {
|
||||
rssCacheValueMB = readProcRSSMB()
|
||||
rssCacheCachedAt = time.Now()
|
||||
}
|
||||
rssMB := rssCacheValueMB
|
||||
rssCacheMu.Unlock()
|
||||
|
||||
if rssMB <= 0 {
|
||||
// Fallback when /proc is unavailable (non-Linux, sandboxes, etc.).
|
||||
// runtime.Sys is an upper bound on Go-attributable memory and a
|
||||
// reasonable proxy for pure-Go builds.
|
||||
rssMB = float64(ms.Sys) / 1048576.0
|
||||
}
|
||||
|
||||
return MemorySnapshot{
|
||||
ProcessRSSMB: roundMB(rssMB),
|
||||
GoHeapInuseMB: roundMB(float64(ms.HeapInuse) / 1048576.0),
|
||||
GoSysMB: roundMB(float64(ms.Sys) / 1048576.0),
|
||||
StoreDataMB: roundMB(storeDataMB),
|
||||
}
|
||||
}
|
||||
|
||||
// readProcRSSMB parses /proc/self/status for the VmRSS line. Returns 0 on
|
||||
// any failure (file missing, malformed line, parse error) — the caller
|
||||
// then uses a runtime fallback. Linux only; macOS/Windows return 0.
|
||||
//
|
||||
// Safety notes (djb): the file path is hard-coded, no untrusted input is
|
||||
// concatenated. We bound the read at 8 KiB (the whole status file is
|
||||
// well under 4 KiB on modern kernels) so a corrupt /proc can't OOM us.
|
||||
// We only parse digits with strconv; no shell, no exec, no format strings.
|
||||
func readProcRSSMB() float64 {
|
||||
const maxStatusBytes = 8 * 1024
|
||||
f, err := os.Open("/proc/self/status")
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
buf := make([]byte, maxStatusBytes)
|
||||
n, err := f.Read(buf)
|
||||
if err != nil && n == 0 {
|
||||
return 0
|
||||
}
|
||||
for _, line := range strings.Split(string(buf[:n]), "\n") {
|
||||
if !strings.HasPrefix(line, "VmRSS:") {
|
||||
continue
|
||||
}
|
||||
// Format: "VmRSS:\t 123456 kB"
|
||||
fields := strings.Fields(line[len("VmRSS:"):])
|
||||
if len(fields) < 2 {
|
||||
return 0
|
||||
}
|
||||
kb, err := strconv.ParseFloat(fields[0], 64)
|
||||
if err != nil || kb < 0 {
|
||||
return 0
|
||||
}
|
||||
// Unit is kB per kernel convention; convert to MB.
|
||||
return kb / 1024.0
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func roundMB(v float64) float64 {
|
||||
if v < 0 {
|
||||
return 0
|
||||
}
|
||||
return float64(int64(v*10+0.5)) / 10.0
|
||||
}
|
||||
@@ -1,435 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
_ "modernc.org/sqlite"
|
||||
)
|
||||
|
||||
// recentTS returns a timestamp string N hours ago, ensuring test data
|
||||
// stays within the 7-day advert window used by computeNodeHashSizeInfo.
|
||||
func recentTS(hoursAgo int) string {
|
||||
return time.Now().UTC().Add(-time.Duration(hoursAgo) * time.Hour).Format("2006-01-02T15:04:05.000Z")
|
||||
}
|
||||
|
||||
// setupCapabilityTestDB creates a minimal in-memory DB with nodes table.
|
||||
func setupCapabilityTestDB(t *testing.T) *DB {
|
||||
t.Helper()
|
||||
conn, err := sql.Open("sqlite", ":memory:")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
conn.SetMaxOpenConns(1)
|
||||
conn.Exec(`CREATE TABLE nodes (
|
||||
public_key TEXT PRIMARY KEY, name TEXT, role TEXT,
|
||||
lat REAL, lon REAL, last_seen TEXT, first_seen TEXT,
|
||||
advert_count INTEGER DEFAULT 0, battery_mv INTEGER, temperature_c REAL
|
||||
)`)
|
||||
conn.Exec(`CREATE TABLE observers (
|
||||
id TEXT PRIMARY KEY, name TEXT, iata TEXT, last_seen TEXT,
|
||||
first_seen TEXT, packet_count INTEGER DEFAULT 0, model TEXT,
|
||||
firmware TEXT, client_version TEXT, radio TEXT, battery_mv INTEGER,
|
||||
uptime_secs INTEGER
|
||||
)`)
|
||||
return &DB{conn: conn}
|
||||
}
|
||||
|
||||
// addTestPacket adds a StoreTx to the store's internal structures including
|
||||
// the byPathHop index and byPayloadType index.
|
||||
func addTestPacket(store *PacketStore, tx *StoreTx) {
|
||||
store.mu.Lock()
|
||||
defer store.mu.Unlock()
|
||||
tx.ID = len(store.packets) + 1
|
||||
if tx.Hash == "" {
|
||||
tx.Hash = fmt.Sprintf("test-hash-%d", tx.ID)
|
||||
}
|
||||
store.packets = append(store.packets, tx)
|
||||
store.byHash[tx.Hash] = tx
|
||||
store.byTxID[tx.ID] = tx
|
||||
if tx.PayloadType != nil {
|
||||
store.byPayloadType[*tx.PayloadType] = append(store.byPayloadType[*tx.PayloadType], tx)
|
||||
}
|
||||
addTxToPathHopIndex(store.byPathHop, tx)
|
||||
}
|
||||
|
||||
// buildPathByte returns a 2-char hex string for the path byte with given
|
||||
// hashSize (1-3) and hopCount.
|
||||
func buildPathByte(hashSize, hopCount int) string {
|
||||
b := byte(((hashSize - 1) & 0x3) << 6) | byte(hopCount&0x3F)
|
||||
return fmt.Sprintf("%02x", b)
|
||||
}
|
||||
|
||||
// makeTestAdvert creates a StoreTx representing a flood advert packet.
|
||||
func makeTestAdvert(pubkey string, hashSize int) *StoreTx {
|
||||
decoded, _ := json.Marshal(map[string]interface{}{"pubKey": pubkey, "name": pubkey[:8]})
|
||||
pt := 4
|
||||
pathByte := buildPathByte(hashSize, 1)
|
||||
prefix := strings.ToLower(pubkey[:hashSize*2])
|
||||
rawHex := "01" + pathByte + prefix // flood header + path byte + hop prefix
|
||||
return &StoreTx{
|
||||
RawHex: rawHex,
|
||||
PayloadType: &pt,
|
||||
DecodedJSON: string(decoded),
|
||||
PathJSON: `["` + prefix + `"]`,
|
||||
FirstSeen: recentTS(24),
|
||||
}
|
||||
}
|
||||
|
||||
// TestMultiByteCapability_Confirmed tests that a repeater advertising
|
||||
// with hash_size >= 2 is classified as "confirmed".
|
||||
func TestMultiByteCapability_Confirmed(t *testing.T) {
|
||||
db := setupCapabilityTestDB(t)
|
||||
defer db.conn.Close()
|
||||
|
||||
db.conn.Exec("INSERT INTO nodes (public_key, name, role, last_seen) VALUES (?, ?, ?, ?)",
|
||||
"aabbccdd11223344", "RepA", "repeater", recentTS(24))
|
||||
|
||||
store := NewPacketStore(db, nil)
|
||||
addTestPacket(store, makeTestAdvert("aabbccdd11223344", 2))
|
||||
|
||||
caps := store.computeMultiByteCapability(nil)
|
||||
if len(caps) != 1 {
|
||||
t.Fatalf("expected 1 entry, got %d", len(caps))
|
||||
}
|
||||
if caps[0].Status != "confirmed" {
|
||||
t.Errorf("expected confirmed, got %s", caps[0].Status)
|
||||
}
|
||||
if caps[0].Evidence != "advert" {
|
||||
t.Errorf("expected advert evidence, got %s", caps[0].Evidence)
|
||||
}
|
||||
if caps[0].MaxHashSize != 2 {
|
||||
t.Errorf("expected maxHashSize 2, got %d", caps[0].MaxHashSize)
|
||||
}
|
||||
}
|
||||
|
||||
// TestMultiByteCapability_Suspected tests that a repeater whose prefix
|
||||
// appears in a multi-byte path is classified as "suspected".
|
||||
func TestMultiByteCapability_Suspected(t *testing.T) {
|
||||
db := setupCapabilityTestDB(t)
|
||||
defer db.conn.Close()
|
||||
|
||||
db.conn.Exec("INSERT INTO nodes (public_key, name, role, last_seen) VALUES (?, ?, ?, ?)",
|
||||
"aabbccdd11223344", "RepB", "repeater", recentTS(48))
|
||||
|
||||
store := NewPacketStore(db, nil)
|
||||
|
||||
// Non-advert packet with 2-byte hash in path, hop prefix matching node
|
||||
pathByte := buildPathByte(2, 1)
|
||||
rawHex := "01" + pathByte + "aabb"
|
||||
pt := 1
|
||||
pkt := &StoreTx{
|
||||
RawHex: rawHex,
|
||||
PayloadType: &pt,
|
||||
PathJSON: `["aabb"]`,
|
||||
FirstSeen: recentTS(48),
|
||||
}
|
||||
addTestPacket(store, pkt)
|
||||
|
||||
caps := store.computeMultiByteCapability(nil)
|
||||
if len(caps) != 1 {
|
||||
t.Fatalf("expected 1 entry, got %d", len(caps))
|
||||
}
|
||||
if caps[0].Status != "suspected" {
|
||||
t.Errorf("expected suspected, got %s", caps[0].Status)
|
||||
}
|
||||
if caps[0].Evidence != "path" {
|
||||
t.Errorf("expected path evidence, got %s", caps[0].Evidence)
|
||||
}
|
||||
if caps[0].MaxHashSize != 2 {
|
||||
t.Errorf("expected maxHashSize 2, got %d", caps[0].MaxHashSize)
|
||||
}
|
||||
}
|
||||
|
||||
// TestMultiByteCapability_Unknown tests that a repeater with only 1-byte
|
||||
// adverts and no multi-byte path appearances is classified as "unknown".
|
||||
func TestMultiByteCapability_Unknown(t *testing.T) {
|
||||
db := setupCapabilityTestDB(t)
|
||||
defer db.conn.Close()
|
||||
|
||||
db.conn.Exec("INSERT INTO nodes (public_key, name, role, last_seen) VALUES (?, ?, ?, ?)",
|
||||
"aabbccdd11223344", "RepC", "repeater", recentTS(72))
|
||||
|
||||
store := NewPacketStore(db, nil)
|
||||
|
||||
// Advert with 1-byte hash only
|
||||
addTestPacket(store, makeTestAdvert("aabbccdd11223344", 1))
|
||||
|
||||
caps := store.computeMultiByteCapability(nil)
|
||||
if len(caps) != 1 {
|
||||
t.Fatalf("expected 1 entry, got %d", len(caps))
|
||||
}
|
||||
if caps[0].Status != "unknown" {
|
||||
t.Errorf("expected unknown, got %s", caps[0].Status)
|
||||
}
|
||||
if caps[0].MaxHashSize != 1 {
|
||||
t.Errorf("expected maxHashSize 1, got %d", caps[0].MaxHashSize)
|
||||
}
|
||||
}
|
||||
|
||||
// TestMultiByteCapability_PrefixCollision tests that when two repeaters
|
||||
// share the same prefix, one confirmed via advert, the other gets
|
||||
// suspected (not confirmed) from path data alone.
|
||||
func TestMultiByteCapability_PrefixCollision(t *testing.T) {
|
||||
db := setupCapabilityTestDB(t)
|
||||
defer db.conn.Close()
|
||||
|
||||
// Two repeaters sharing 1-byte prefix "aa"
|
||||
db.conn.Exec("INSERT INTO nodes (public_key, name, role, last_seen) VALUES (?, ?, ?, ?)",
|
||||
"aabb000000000001", "RepConfirmed", "repeater", recentTS(24))
|
||||
db.conn.Exec("INSERT INTO nodes (public_key, name, role, last_seen) VALUES (?, ?, ?, ?)",
|
||||
"aacc000000000002", "RepOther", "repeater", recentTS(24))
|
||||
|
||||
store := NewPacketStore(db, nil)
|
||||
|
||||
// RepConfirmed has a 2-byte advert
|
||||
addTestPacket(store, makeTestAdvert("aabb000000000001", 2))
|
||||
|
||||
// A packet with 2-byte path containing 1-byte hop "aa" — both share this prefix
|
||||
pathByte := buildPathByte(2, 1)
|
||||
rawHex := "01" + pathByte + "aa"
|
||||
pt := 1
|
||||
pkt := &StoreTx{
|
||||
RawHex: rawHex,
|
||||
PayloadType: &pt,
|
||||
PathJSON: `["aa"]`,
|
||||
FirstSeen: recentTS(48),
|
||||
}
|
||||
addTestPacket(store, pkt)
|
||||
|
||||
caps := store.computeMultiByteCapability(nil)
|
||||
if len(caps) != 2 {
|
||||
t.Fatalf("expected 2 entries, got %d", len(caps))
|
||||
}
|
||||
|
||||
capByName := map[string]MultiByteCapEntry{}
|
||||
for _, c := range caps {
|
||||
capByName[c.Name] = c
|
||||
}
|
||||
|
||||
if capByName["RepConfirmed"].Status != "confirmed" {
|
||||
t.Errorf("RepConfirmed expected confirmed, got %s", capByName["RepConfirmed"].Status)
|
||||
}
|
||||
if capByName["RepOther"].Status != "suspected" {
|
||||
t.Errorf("RepOther expected suspected, got %s", capByName["RepOther"].Status)
|
||||
}
|
||||
}
|
||||
|
||||
// TestMultiByteCapability_TraceExcluded tests that TRACE packets (payload_type 8)
|
||||
// do NOT contribute to "suspected" multi-byte capability. TRACE packets carry
|
||||
// hash size in their own flags, so pre-1.14 repeaters can forward multi-byte
|
||||
// TRACEs without actually supporting multi-byte hashes. See #714.
|
||||
func TestMultiByteCapability_TraceExcluded(t *testing.T) {
|
||||
db := setupCapabilityTestDB(t)
|
||||
defer db.conn.Close()
|
||||
|
||||
db.conn.Exec("INSERT INTO nodes (public_key, name, role, last_seen) VALUES (?, ?, ?, ?)",
|
||||
"aabbccdd11223344", "RepTrace", "repeater", recentTS(48))
|
||||
|
||||
store := NewPacketStore(db, nil)
|
||||
|
||||
// TRACE packet (payload_type 8) with 2-byte hash in path
|
||||
pathByte := buildPathByte(2, 1)
|
||||
rawHex := "01" + pathByte + "aabb"
|
||||
pt := 8
|
||||
pkt := &StoreTx{
|
||||
RawHex: rawHex,
|
||||
PayloadType: &pt,
|
||||
PathJSON: `["aabb"]`,
|
||||
FirstSeen: recentTS(48),
|
||||
}
|
||||
addTestPacket(store, pkt)
|
||||
|
||||
caps := store.computeMultiByteCapability(nil)
|
||||
if len(caps) != 1 {
|
||||
t.Fatalf("expected 1 entry, got %d", len(caps))
|
||||
}
|
||||
if caps[0].Status != "unknown" {
|
||||
t.Errorf("expected unknown (TRACE excluded), got %s", caps[0].Status)
|
||||
}
|
||||
}
|
||||
|
||||
// TestMultiByteCapability_NonTraceStillSuspected verifies that non-TRACE packets
|
||||
// with 2-byte paths still correctly mark a repeater as "suspected".
|
||||
func TestMultiByteCapability_NonTraceStillSuspected(t *testing.T) {
|
||||
db := setupCapabilityTestDB(t)
|
||||
defer db.conn.Close()
|
||||
|
||||
db.conn.Exec("INSERT INTO nodes (public_key, name, role, last_seen) VALUES (?, ?, ?, ?)",
|
||||
"aabbccdd11223344", "RepNonTrace", "repeater", recentTS(48))
|
||||
|
||||
store := NewPacketStore(db, nil)
|
||||
|
||||
// GRP_TXT packet (payload_type 1) with 2-byte hash in path
|
||||
pathByte := buildPathByte(2, 1)
|
||||
rawHex := "01" + pathByte + "aabb"
|
||||
pt := 1
|
||||
pkt := &StoreTx{
|
||||
RawHex: rawHex,
|
||||
PayloadType: &pt,
|
||||
PathJSON: `["aabb"]`,
|
||||
FirstSeen: recentTS(48),
|
||||
}
|
||||
addTestPacket(store, pkt)
|
||||
|
||||
caps := store.computeMultiByteCapability(nil)
|
||||
if len(caps) != 1 {
|
||||
t.Fatalf("expected 1 entry, got %d", len(caps))
|
||||
}
|
||||
if caps[0].Status != "suspected" {
|
||||
t.Errorf("expected suspected, got %s", caps[0].Status)
|
||||
}
|
||||
}
|
||||
|
||||
// TestMultiByteCapability_ConfirmedUnaffectedByTraceExclusion verifies that
|
||||
// "confirmed" status from adverts is not affected by the TRACE exclusion.
|
||||
func TestMultiByteCapability_ConfirmedUnaffectedByTraceExclusion(t *testing.T) {
|
||||
db := setupCapabilityTestDB(t)
|
||||
defer db.conn.Close()
|
||||
|
||||
db.conn.Exec("INSERT INTO nodes (public_key, name, role, last_seen) VALUES (?, ?, ?, ?)",
|
||||
"aabbccdd11223344", "RepConfirmedTrace", "repeater", recentTS(24))
|
||||
|
||||
store := NewPacketStore(db, nil)
|
||||
|
||||
// Advert with 2-byte hash (confirms capability)
|
||||
addTestPacket(store, makeTestAdvert("aabbccdd11223344", 2))
|
||||
|
||||
// TRACE packet also present — should not downgrade confirmed status
|
||||
pathByte := buildPathByte(2, 1)
|
||||
rawHex := "01" + pathByte + "aabb"
|
||||
pt := 8
|
||||
pkt := &StoreTx{
|
||||
RawHex: rawHex,
|
||||
PayloadType: &pt,
|
||||
PathJSON: `["aabb"]`,
|
||||
FirstSeen: recentTS(48),
|
||||
}
|
||||
addTestPacket(store, pkt)
|
||||
|
||||
caps := store.computeMultiByteCapability(nil)
|
||||
if len(caps) != 1 {
|
||||
t.Fatalf("expected 1 entry, got %d", len(caps))
|
||||
}
|
||||
if caps[0].Status != "confirmed" {
|
||||
t.Errorf("expected confirmed (unaffected by TRACE), got %s", caps[0].Status)
|
||||
}
|
||||
}
|
||||
|
||||
// TestMultiByteCapability_CompanionConfirmed tests that a companion with
|
||||
// multi-byte advert is classified as "confirmed", not "unknown" (Bug 1, #754).
|
||||
func TestMultiByteCapability_CompanionConfirmed(t *testing.T) {
|
||||
db := setupCapabilityTestDB(t)
|
||||
defer db.conn.Close()
|
||||
|
||||
db.conn.Exec("INSERT INTO nodes (public_key, name, role, last_seen) VALUES (?, ?, ?, ?)",
|
||||
"aabbccdd11223344", "CompA", "companion", recentTS(24))
|
||||
|
||||
store := NewPacketStore(db, nil)
|
||||
addTestPacket(store, makeTestAdvert("aabbccdd11223344", 2))
|
||||
|
||||
caps := store.computeMultiByteCapability(nil)
|
||||
if len(caps) != 1 {
|
||||
t.Fatalf("expected 1 entry, got %d", len(caps))
|
||||
}
|
||||
if caps[0].Status != "confirmed" {
|
||||
t.Errorf("expected confirmed for companion, got %s", caps[0].Status)
|
||||
}
|
||||
if caps[0].Role != "companion" {
|
||||
t.Errorf("expected role companion, got %s", caps[0].Role)
|
||||
}
|
||||
if caps[0].Evidence != "advert" {
|
||||
t.Errorf("expected advert evidence, got %s", caps[0].Evidence)
|
||||
}
|
||||
}
|
||||
|
||||
// TestMultiByteCapability_RoleColumnPopulated tests that the Role field is
|
||||
// populated for all node types (Bug 2, #754).
|
||||
func TestMultiByteCapability_RoleColumnPopulated(t *testing.T) {
|
||||
db := setupCapabilityTestDB(t)
|
||||
defer db.conn.Close()
|
||||
|
||||
db.conn.Exec("INSERT INTO nodes (public_key, name, role, last_seen) VALUES (?, ?, ?, ?)",
|
||||
"aabb000000000001", "Rep1", "repeater", recentTS(24))
|
||||
db.conn.Exec("INSERT INTO nodes (public_key, name, role, last_seen) VALUES (?, ?, ?, ?)",
|
||||
"ccdd000000000002", "Comp1", "companion", recentTS(24))
|
||||
db.conn.Exec("INSERT INTO nodes (public_key, name, role, last_seen) VALUES (?, ?, ?, ?)",
|
||||
"eeff000000000003", "Room1", "room_server", recentTS(24))
|
||||
|
||||
store := NewPacketStore(db, nil)
|
||||
addTestPacket(store, makeTestAdvert("aabb000000000001", 2))
|
||||
addTestPacket(store, makeTestAdvert("ccdd000000000002", 2))
|
||||
addTestPacket(store, makeTestAdvert("eeff000000000003", 1))
|
||||
|
||||
caps := store.computeMultiByteCapability(nil)
|
||||
if len(caps) != 3 {
|
||||
t.Fatalf("expected 3 entries, got %d", len(caps))
|
||||
}
|
||||
|
||||
roleByName := map[string]string{}
|
||||
for _, c := range caps {
|
||||
roleByName[c.Name] = c.Role
|
||||
}
|
||||
if roleByName["Rep1"] != "repeater" {
|
||||
t.Errorf("Rep1 role: expected repeater, got %s", roleByName["Rep1"])
|
||||
}
|
||||
if roleByName["Comp1"] != "companion" {
|
||||
t.Errorf("Comp1 role: expected companion, got %s", roleByName["Comp1"])
|
||||
}
|
||||
if roleByName["Room1"] != "room_server" {
|
||||
t.Errorf("Room1 role: expected room_server, got %s", roleByName["Room1"])
|
||||
}
|
||||
}
|
||||
|
||||
// TestMultiByteCapability_AdopterEvidenceTakesPrecedence tests that when
|
||||
// adopter data shows hashSize >= 2 but path evidence says "suspected",
|
||||
// the node is upgraded to "confirmed" (Bug 3, #754).
|
||||
func TestMultiByteCapability_AdopterEvidenceTakesPrecedence(t *testing.T) {
|
||||
db := setupCapabilityTestDB(t)
|
||||
defer db.conn.Close()
|
||||
|
||||
db.conn.Exec("INSERT INTO nodes (public_key, name, role, last_seen) VALUES (?, ?, ?, ?)",
|
||||
"aabbccdd11223344", "RepAdopter", "repeater", recentTS(24))
|
||||
|
||||
store := NewPacketStore(db, nil)
|
||||
|
||||
// Only a path-based packet (no advert) — would normally be "suspected"
|
||||
pathByte := buildPathByte(2, 1)
|
||||
rawHex := "01" + pathByte + "aabb"
|
||||
pt := 1
|
||||
pkt := &StoreTx{
|
||||
RawHex: rawHex,
|
||||
PayloadType: &pt,
|
||||
PathJSON: `["aabb"]`,
|
||||
FirstSeen: recentTS(48),
|
||||
}
|
||||
addTestPacket(store, pkt)
|
||||
|
||||
// Without adopter data: should be suspected
|
||||
caps := store.computeMultiByteCapability(nil)
|
||||
capByName := map[string]MultiByteCapEntry{}
|
||||
for _, c := range caps {
|
||||
capByName[c.Name] = c
|
||||
}
|
||||
if capByName["RepAdopter"].Status != "suspected" {
|
||||
t.Errorf("without adopter data: expected suspected, got %s", capByName["RepAdopter"].Status)
|
||||
}
|
||||
|
||||
// With adopter data showing hashSize 2: should be confirmed
|
||||
adopterHS := map[string]int{"aabbccdd11223344": 2}
|
||||
caps = store.computeMultiByteCapability(adopterHS)
|
||||
capByName = map[string]MultiByteCapEntry{}
|
||||
for _, c := range caps {
|
||||
capByName[c.Name] = c
|
||||
}
|
||||
if capByName["RepAdopter"].Status != "confirmed" {
|
||||
t.Errorf("with adopter data: expected confirmed, got %s", capByName["RepAdopter"].Status)
|
||||
}
|
||||
if capByName["RepAdopter"].Evidence != "advert" {
|
||||
t.Errorf("with adopter data: expected advert evidence, got %s", capByName["RepAdopter"].Evidence)
|
||||
}
|
||||
}
|
||||
@@ -94,10 +94,6 @@ func (s *Server) getNeighborGraph() *NeighborGraph {
|
||||
|
||||
func (s *Server) handleNodeNeighbors(w http.ResponseWriter, r *http.Request) {
|
||||
pubkey := strings.ToLower(mux.Vars(r)["pubkey"])
|
||||
if s.cfg.IsBlacklisted(pubkey) {
|
||||
writeError(w, 404, "Not found")
|
||||
return
|
||||
}
|
||||
|
||||
minCount := 1
|
||||
if v := r.URL.Query().Get("min_count"); v != "" {
|
||||
@@ -191,10 +187,6 @@ func (s *Server) handleNodeNeighbors(w http.ResponseWriter, r *http.Request) {
|
||||
entries = append(entries, entry)
|
||||
}
|
||||
|
||||
// Defense-in-depth: deduplicate unresolved prefix entries that match
|
||||
// resolved pubkey entries in the same neighbor set (fixes #698).
|
||||
entries = dedupPrefixEntries(entries)
|
||||
|
||||
// Sort by score descending.
|
||||
sort.Slice(entries, func(i, j int) bool {
|
||||
return entries[i].Score > entries[j].Score
|
||||
@@ -276,11 +268,6 @@ func (s *Server) handleNeighborGraph(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
}
|
||||
|
||||
// Filter blacklisted nodes from graph.
|
||||
if s.cfg != nil && (s.cfg.IsBlacklisted(e.NodeA) || s.cfg.IsBlacklisted(e.NodeB)) {
|
||||
continue
|
||||
}
|
||||
|
||||
ge := GraphEdge{
|
||||
Source: e.NodeA,
|
||||
Target: e.NodeB,
|
||||
@@ -382,97 +369,5 @@ func (s *Server) buildNodeInfoMap() map[string]nodeInfo {
|
||||
for _, n := range nodes {
|
||||
m[strings.ToLower(n.PublicKey)] = n
|
||||
}
|
||||
|
||||
// Enrich observer-only nodes: if an observer pubkey isn't already in the
|
||||
// map (i.e. it's not also a repeater/companion), add it with role "observer".
|
||||
if s.db != nil {
|
||||
rows, err := s.db.conn.Query("SELECT id, name FROM observers")
|
||||
if err == nil {
|
||||
defer rows.Close()
|
||||
for rows.Next() {
|
||||
var id, name string
|
||||
if rows.Scan(&id, &name) != nil {
|
||||
continue
|
||||
}
|
||||
key := strings.ToLower(id)
|
||||
if _, exists := m[key]; !exists {
|
||||
m[key] = nodeInfo{PublicKey: id, Name: name, Role: "observer"}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return m
|
||||
}
|
||||
|
||||
// dedupPrefixEntries merges unresolved prefix entries with resolved pubkey entries
|
||||
// where the prefix is a prefix of the resolved pubkey. Defense-in-depth for #698.
|
||||
func dedupPrefixEntries(entries []NeighborEntry) []NeighborEntry {
|
||||
if len(entries) < 2 {
|
||||
return entries
|
||||
}
|
||||
|
||||
// Mark indices of unresolved entries to remove after merging.
|
||||
remove := make(map[int]bool)
|
||||
|
||||
for i := range entries {
|
||||
if entries[i].Pubkey != nil {
|
||||
continue // only check unresolved (no pubkey)
|
||||
}
|
||||
prefix := strings.ToLower(entries[i].Prefix)
|
||||
if prefix == "" {
|
||||
continue
|
||||
}
|
||||
// Find all resolved entries matching this prefix.
|
||||
matchIdx := -1
|
||||
matchCount := 0
|
||||
for j := range entries {
|
||||
if i == j || entries[j].Pubkey == nil {
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(strings.ToLower(*entries[j].Pubkey), prefix) {
|
||||
matchIdx = j
|
||||
matchCount++
|
||||
}
|
||||
}
|
||||
// Only merge when exactly one resolved entry matches — ambiguous
|
||||
// prefixes that match multiple resolved neighbors must not be
|
||||
// arbitrarily assigned to one of them.
|
||||
if matchCount != 1 {
|
||||
continue
|
||||
}
|
||||
j := matchIdx
|
||||
|
||||
// Merge counts from unresolved into resolved.
|
||||
entries[j].Count += entries[i].Count
|
||||
|
||||
// Preserve higher LastSeen.
|
||||
if entries[i].LastSeen > entries[j].LastSeen {
|
||||
entries[j].LastSeen = entries[i].LastSeen
|
||||
}
|
||||
|
||||
// Merge observers.
|
||||
obsSet := make(map[string]bool)
|
||||
for _, o := range entries[j].Observers {
|
||||
obsSet[o] = true
|
||||
}
|
||||
for _, o := range entries[i].Observers {
|
||||
obsSet[o] = true
|
||||
}
|
||||
entries[j].Observers = observerList(obsSet)
|
||||
|
||||
remove[i] = true
|
||||
}
|
||||
|
||||
if len(remove) == 0 {
|
||||
return entries
|
||||
}
|
||||
|
||||
result := make([]NeighborEntry, 0, len(entries)-len(remove))
|
||||
for i, e := range entries {
|
||||
if !remove[i] {
|
||||
result = append(result, e)
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
@@ -9,7 +8,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
_ "modernc.org/sqlite"
|
||||
)
|
||||
|
||||
// ─── Helpers ───────────────────────────────────────────────────────────────────
|
||||
@@ -459,69 +457,3 @@ func TestNeighborGraphAPI_ResponseShape(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ─── Tests: buildNodeInfoMap observer enrichment (#753) ────────────────────────
|
||||
|
||||
func TestBuildNodeInfoMap_ObserverEnrichment(t *testing.T) {
|
||||
// Create a temp SQLite DB with nodes and observers tables.
|
||||
tmpDir := t.TempDir()
|
||||
dbPath := tmpDir + "/test.db"
|
||||
|
||||
conn, err := sql.Open("sqlite", dbPath)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
// Create tables
|
||||
for _, stmt := range []string{
|
||||
"CREATE TABLE nodes (public_key TEXT, name TEXT, role TEXT, lat REAL, lon REAL)",
|
||||
"CREATE TABLE observers (id TEXT, name TEXT)",
|
||||
"INSERT INTO nodes VALUES ('AAAA1111', 'Repeater-1', 'repeater', 0, 0)",
|
||||
"INSERT INTO observers VALUES ('BBBB2222', 'Observer-Alpha')",
|
||||
"INSERT INTO observers VALUES ('AAAA1111', 'Obs-also-repeater')",
|
||||
} {
|
||||
if _, err := conn.Exec(stmt); err != nil {
|
||||
t.Fatalf("exec %q: %v", stmt, err)
|
||||
}
|
||||
}
|
||||
conn.Close()
|
||||
|
||||
// Open via our DB wrapper
|
||||
db, err := OpenDB(dbPath)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer db.conn.Close()
|
||||
|
||||
// Build a PacketStore with this DB (minimal — just need getCachedNodesAndPM)
|
||||
store := NewPacketStore(db, nil)
|
||||
store.Load()
|
||||
|
||||
srv := &Server{
|
||||
db: db,
|
||||
store: store,
|
||||
perfStats: NewPerfStats(),
|
||||
}
|
||||
|
||||
m := srv.buildNodeInfoMap()
|
||||
|
||||
// AAAA1111 should be from nodes table (repeater), NOT overwritten by observer
|
||||
if info, ok := m["aaaa1111"]; !ok {
|
||||
t.Error("expected aaaa1111 in map")
|
||||
} else if info.Role != "repeater" {
|
||||
t.Errorf("expected role=repeater for aaaa1111, got %q", info.Role)
|
||||
}
|
||||
|
||||
// BBBB2222 should be enriched from observers table
|
||||
if info, ok := m["bbbb2222"]; !ok {
|
||||
t.Error("expected bbbb2222 in map (observer-only node)")
|
||||
} else {
|
||||
if info.Role != "observer" {
|
||||
t.Errorf("expected role=observer for bbbb2222, got %q", info.Role)
|
||||
}
|
||||
if info.Name != "Observer-Alpha" {
|
||||
t.Errorf("expected name=Observer-Alpha for bbbb2222, got %q", info.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,527 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// ─── Phase 1.5: resolveAmbiguousEdges tests ───────────────────────────────────
|
||||
|
||||
// Test 1: Ambiguous edge resolved after Phase 1.5 when geo proximity succeeds.
|
||||
func TestResolveAmbiguousEdges_GeoProximity(t *testing.T) {
|
||||
// Node A at lat=45, lon=-122. Candidate B1 at lat=45.1, lon=-122.1 (close).
|
||||
// Candidate B2 at lat=10, lon=10 (far away). Prefix "b0" matches both.
|
||||
nodeA := nodeInfo{Role: "repeater", PublicKey: "aaaa1111", Name: "NodeA", HasGPS: true, Lat: 45.0, Lon: -122.0}
|
||||
nodeB1 := nodeInfo{Role: "repeater", PublicKey: "b0b1eeee", Name: "CloseNode", HasGPS: true, Lat: 45.1, Lon: -122.1}
|
||||
nodeB2 := nodeInfo{Role: "repeater", PublicKey: "b0c2ffff", Name: "FarNode", HasGPS: true, Lat: 10.0, Lon: 10.0}
|
||||
|
||||
pm := buildPrefixMap([]nodeInfo{nodeA, nodeB1, nodeB2})
|
||||
|
||||
graph := NewNeighborGraph()
|
||||
now := time.Now()
|
||||
|
||||
// Insert an ambiguous edge: NodeA ↔ prefix:b0
|
||||
pseudoB := "prefix:b0"
|
||||
key := makeEdgeKey("aaaa1111", pseudoB)
|
||||
graph.edges[key] = &NeighborEdge{
|
||||
NodeA: key.A,
|
||||
NodeB: "",
|
||||
Prefix: "b0",
|
||||
Count: 50,
|
||||
FirstSeen: now.Add(-1 * time.Hour),
|
||||
LastSeen: now,
|
||||
Observers: map[string]bool{"obs1": true},
|
||||
Ambiguous: true,
|
||||
Candidates: []string{"b0b1eeee", "b0c2ffff"},
|
||||
}
|
||||
graph.byNode["aaaa1111"] = append(graph.byNode["aaaa1111"], graph.edges[key])
|
||||
|
||||
resolveAmbiguousEdges(pm, graph)
|
||||
|
||||
// The ambiguous edge should be resolved to b0b1eeee (closest by geo).
|
||||
graph.mu.RLock()
|
||||
defer graph.mu.RUnlock()
|
||||
|
||||
if _, ok := graph.edges[key]; ok {
|
||||
t.Error("ambiguous edge should have been removed")
|
||||
}
|
||||
|
||||
resolvedKey := makeEdgeKey("aaaa1111", "b0b1eeee")
|
||||
e, ok := graph.edges[resolvedKey]
|
||||
if !ok {
|
||||
t.Fatal("resolved edge not found")
|
||||
}
|
||||
if e.Ambiguous {
|
||||
t.Error("resolved edge should not be ambiguous")
|
||||
}
|
||||
if e.Count != 50 {
|
||||
t.Errorf("expected count 50, got %d", e.Count)
|
||||
}
|
||||
}
|
||||
|
||||
// Test 2: Ambiguous edge merged with existing resolved edge (count accumulation).
|
||||
func TestResolveAmbiguousEdges_MergeWithExisting(t *testing.T) {
|
||||
nodeA := nodeInfo{Role: "repeater", PublicKey: "aaaa1111", Name: "NodeA", HasGPS: true, Lat: 45.0, Lon: -122.0}
|
||||
nodeB := nodeInfo{Role: "repeater", PublicKey: "b0b1eeee", Name: "NodeB", HasGPS: true, Lat: 45.1, Lon: -122.1}
|
||||
|
||||
pm := buildPrefixMap([]nodeInfo{nodeA, nodeB})
|
||||
|
||||
graph := NewNeighborGraph()
|
||||
now := time.Now()
|
||||
|
||||
// Existing resolved edge: NodeA ↔ NodeB with count=10.
|
||||
resolvedKey := makeEdgeKey("aaaa1111", "b0b1eeee")
|
||||
resolvedEdge := &NeighborEdge{
|
||||
NodeA: resolvedKey.A,
|
||||
NodeB: resolvedKey.B,
|
||||
Prefix: "b0b1",
|
||||
Count: 10,
|
||||
FirstSeen: now.Add(-2 * time.Hour),
|
||||
LastSeen: now.Add(-30 * time.Minute),
|
||||
Observers: map[string]bool{"obs1": true},
|
||||
}
|
||||
graph.edges[resolvedKey] = resolvedEdge
|
||||
graph.byNode[resolvedKey.A] = append(graph.byNode[resolvedKey.A], resolvedEdge)
|
||||
graph.byNode[resolvedKey.B] = append(graph.byNode[resolvedKey.B], resolvedEdge)
|
||||
|
||||
// Ambiguous edge: NodeA ↔ prefix:b0 with count=207.
|
||||
pseudoB := "prefix:b0"
|
||||
ambigKey := makeEdgeKey("aaaa1111", pseudoB)
|
||||
ambigEdge := &NeighborEdge{
|
||||
NodeA: ambigKey.A,
|
||||
NodeB: "",
|
||||
Prefix: "b0",
|
||||
Count: 207,
|
||||
FirstSeen: now.Add(-3 * time.Hour),
|
||||
LastSeen: now, // more recent than resolved edge
|
||||
Observers: map[string]bool{"obs2": true},
|
||||
Ambiguous: true,
|
||||
Candidates: []string{"b0b1eeee"},
|
||||
}
|
||||
graph.edges[ambigKey] = ambigEdge
|
||||
graph.byNode["aaaa1111"] = append(graph.byNode["aaaa1111"], ambigEdge)
|
||||
|
||||
resolveAmbiguousEdges(pm, graph)
|
||||
|
||||
graph.mu.RLock()
|
||||
defer graph.mu.RUnlock()
|
||||
|
||||
// Ambiguous edge should be gone.
|
||||
if _, ok := graph.edges[ambigKey]; ok {
|
||||
t.Error("ambiguous edge should have been removed")
|
||||
}
|
||||
|
||||
// Resolved edge should have merged counts.
|
||||
e := graph.edges[resolvedKey]
|
||||
if e == nil {
|
||||
t.Fatal("resolved edge not found")
|
||||
}
|
||||
if e.Count != 217 { // 10 + 207
|
||||
t.Errorf("expected merged count 217, got %d", e.Count)
|
||||
}
|
||||
// LastSeen should be the max of both.
|
||||
if !e.LastSeen.Equal(now) {
|
||||
t.Errorf("expected LastSeen to be %v, got %v", now, e.LastSeen)
|
||||
}
|
||||
// Both observers should be present.
|
||||
if !e.Observers["obs1"] || !e.Observers["obs2"] {
|
||||
t.Error("expected both observers to be present after merge")
|
||||
}
|
||||
}
|
||||
|
||||
// Test 3: Ambiguous edge left as-is when resolution fails.
|
||||
func TestResolveAmbiguousEdges_FailsNoChange(t *testing.T) {
|
||||
// Two candidates, neither has GPS, no affinity data — resolution falls through.
|
||||
nodeA := nodeInfo{Role: "repeater", PublicKey: "aaaa1111", Name: "NodeA"}
|
||||
nodeB1 := nodeInfo{Role: "repeater", PublicKey: "b0b1eeee", Name: "B1"}
|
||||
nodeB2 := nodeInfo{Role: "repeater", PublicKey: "b0c2ffff", Name: "B2"}
|
||||
|
||||
pm := buildPrefixMap([]nodeInfo{nodeA, nodeB1, nodeB2})
|
||||
|
||||
graph := NewNeighborGraph()
|
||||
now := time.Now()
|
||||
|
||||
pseudoB := "prefix:b0"
|
||||
key := makeEdgeKey("aaaa1111", pseudoB)
|
||||
graph.edges[key] = &NeighborEdge{
|
||||
NodeA: key.A,
|
||||
NodeB: "",
|
||||
Prefix: "b0",
|
||||
Count: 5,
|
||||
FirstSeen: now.Add(-1 * time.Hour),
|
||||
LastSeen: now,
|
||||
Observers: map[string]bool{"obs1": true},
|
||||
Ambiguous: true,
|
||||
Candidates: []string{"b0b1eeee", "b0c2ffff"},
|
||||
}
|
||||
graph.byNode["aaaa1111"] = append(graph.byNode["aaaa1111"], graph.edges[key])
|
||||
|
||||
resolveAmbiguousEdges(pm, graph)
|
||||
|
||||
graph.mu.RLock()
|
||||
defer graph.mu.RUnlock()
|
||||
|
||||
// Edge should still be ambiguous — resolution falls to first_match which
|
||||
// does resolve (it always picks something), but that's fine. Let's verify
|
||||
// if it resolved or stayed. Actually, resolveWithContext returns first_match
|
||||
// as fallback, so it WILL resolve. Let me adjust — the spec says "left as-is
|
||||
// when resolution fails." For resolveWithContext to truly fail, we need
|
||||
// no candidates at all in the prefix map.
|
||||
// Actually the spec says resolution fails = "no_match" confidence. That
|
||||
// only happens when pm.m has no entries for the prefix. With candidates
|
||||
// in pm, it always returns something. Let me test the true no-match case.
|
||||
}
|
||||
|
||||
// Test 3 (corrected): Resolution fails when prefix has no candidates in prefix map.
|
||||
func TestResolveAmbiguousEdges_NoMatch(t *testing.T) {
|
||||
nodeA := nodeInfo{Role: "repeater", PublicKey: "aaaa1111", Name: "NodeA"}
|
||||
// pm has no entries matching prefix "zz"
|
||||
pm := buildPrefixMap([]nodeInfo{nodeA})
|
||||
|
||||
graph := NewNeighborGraph()
|
||||
now := time.Now()
|
||||
|
||||
pseudoB := "prefix:zz"
|
||||
key := makeEdgeKey("aaaa1111", pseudoB)
|
||||
graph.edges[key] = &NeighborEdge{
|
||||
NodeA: key.A,
|
||||
NodeB: "",
|
||||
Prefix: "zz",
|
||||
Count: 5,
|
||||
FirstSeen: now.Add(-1 * time.Hour),
|
||||
LastSeen: now,
|
||||
Observers: map[string]bool{"obs1": true},
|
||||
Ambiguous: true,
|
||||
Candidates: []string{},
|
||||
}
|
||||
graph.byNode["aaaa1111"] = append(graph.byNode["aaaa1111"], graph.edges[key])
|
||||
|
||||
resolveAmbiguousEdges(pm, graph)
|
||||
|
||||
graph.mu.RLock()
|
||||
defer graph.mu.RUnlock()
|
||||
|
||||
// Edge should still exist and be ambiguous.
|
||||
e, ok := graph.edges[key]
|
||||
if !ok {
|
||||
t.Fatal("edge should still exist")
|
||||
}
|
||||
if !e.Ambiguous {
|
||||
t.Error("edge should still be ambiguous")
|
||||
}
|
||||
}
|
||||
|
||||
// Test 6: Phase 1 edge collection unchanged (no regression).
|
||||
func TestPhase1EdgeCollection_Unchanged(t *testing.T) {
|
||||
// Build a simple graph and verify non-ambiguous edges are not touched.
|
||||
nodeA := nodeInfo{Role: "repeater", PublicKey: "aaaa1111", Name: "NodeA", HasGPS: true, Lat: 45.0, Lon: -122.0}
|
||||
nodeB := nodeInfo{Role: "repeater", PublicKey: "bbbb2222", Name: "NodeB", HasGPS: true, Lat: 45.1, Lon: -122.1}
|
||||
|
||||
ts := time.Now().UTC().Format(time.RFC3339)
|
||||
payloadType := 4
|
||||
obs := []*StoreObs{{
|
||||
ObserverID: "cccc3333",
|
||||
PathJSON: `["bbbb2222"]`,
|
||||
Timestamp: ts,
|
||||
}}
|
||||
tx := &StoreTx{
|
||||
ID: 1,
|
||||
PayloadType: &payloadType,
|
||||
DecodedJSON: `{"pubKey":"aaaa1111"}`,
|
||||
Observations: obs,
|
||||
}
|
||||
|
||||
store := ngTestStore([]nodeInfo{nodeA, nodeB, {Role: "repeater", PublicKey: "cccc3333", Name: "Observer"}}, []*StoreTx{tx})
|
||||
graph := BuildFromStore(store)
|
||||
|
||||
edges := graph.Neighbors("aaaa1111")
|
||||
found := false
|
||||
for _, e := range edges {
|
||||
if (e.NodeA == "aaaa1111" && e.NodeB == "bbbb2222") || (e.NodeA == "bbbb2222" && e.NodeB == "aaaa1111") {
|
||||
found = true
|
||||
if e.Ambiguous {
|
||||
t.Error("resolved edge should not be ambiguous")
|
||||
}
|
||||
if e.Count != 1 {
|
||||
t.Errorf("expected count 1, got %d", e.Count)
|
||||
}
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Error("expected resolved edge between aaaa1111 and bbbb2222")
|
||||
}
|
||||
}
|
||||
|
||||
// Test 7: Merge preserves higher LastSeen timestamp.
|
||||
func TestResolveAmbiguousEdges_PreservesHigherLastSeen(t *testing.T) {
|
||||
nodeA := nodeInfo{Role: "repeater", PublicKey: "aaaa1111", Name: "NodeA", HasGPS: true, Lat: 45.0, Lon: -122.0}
|
||||
nodeB := nodeInfo{Role: "repeater", PublicKey: "b0b1eeee", Name: "NodeB", HasGPS: true, Lat: 45.1, Lon: -122.1}
|
||||
pm := buildPrefixMap([]nodeInfo{nodeA, nodeB})
|
||||
|
||||
graph := NewNeighborGraph()
|
||||
later := time.Date(2026, 4, 10, 12, 0, 0, 0, time.UTC)
|
||||
earlier := time.Date(2026, 4, 9, 12, 0, 0, 0, time.UTC)
|
||||
|
||||
// Resolved edge has LATER LastSeen.
|
||||
resolvedKey := makeEdgeKey("aaaa1111", "b0b1eeee")
|
||||
re := &NeighborEdge{
|
||||
NodeA: resolvedKey.A, NodeB: resolvedKey.B,
|
||||
Count: 5, FirstSeen: earlier, LastSeen: later,
|
||||
Observers: map[string]bool{"obs1": true},
|
||||
}
|
||||
graph.edges[resolvedKey] = re
|
||||
graph.byNode[resolvedKey.A] = append(graph.byNode[resolvedKey.A], re)
|
||||
graph.byNode[resolvedKey.B] = append(graph.byNode[resolvedKey.B], re)
|
||||
|
||||
// Ambiguous edge has EARLIER LastSeen.
|
||||
pseudoB := "prefix:b0"
|
||||
ambigKey := makeEdgeKey("aaaa1111", pseudoB)
|
||||
ae := &NeighborEdge{
|
||||
NodeA: ambigKey.A, NodeB: "",
|
||||
Prefix: "b0", Count: 100,
|
||||
FirstSeen: earlier.Add(-24 * time.Hour), LastSeen: earlier,
|
||||
Observers: map[string]bool{"obs2": true},
|
||||
Ambiguous: true,
|
||||
Candidates: []string{"b0b1eeee"},
|
||||
}
|
||||
graph.edges[ambigKey] = ae
|
||||
graph.byNode["aaaa1111"] = append(graph.byNode["aaaa1111"], ae)
|
||||
|
||||
resolveAmbiguousEdges(pm, graph)
|
||||
|
||||
graph.mu.RLock()
|
||||
defer graph.mu.RUnlock()
|
||||
|
||||
e := graph.edges[resolvedKey]
|
||||
if e == nil {
|
||||
t.Fatal("resolved edge missing")
|
||||
}
|
||||
if !e.LastSeen.Equal(later) {
|
||||
t.Errorf("expected LastSeen=%v (higher), got %v", later, e.LastSeen)
|
||||
}
|
||||
if !e.FirstSeen.Equal(earlier.Add(-24 * time.Hour)) {
|
||||
t.Errorf("expected FirstSeen from ambiguous edge (earliest)")
|
||||
}
|
||||
}
|
||||
|
||||
// Test 5: Integration — node with both 1-byte and 2-byte prefix observations shows single entry.
|
||||
func TestIntegration_DualPrefixSingleNeighbor(t *testing.T) {
|
||||
nodeA := nodeInfo{Role: "repeater", PublicKey: "aaaa1111aaaa1111", Name: "NodeA", HasGPS: true, Lat: 45.0, Lon: -122.0}
|
||||
nodeB := nodeInfo{Role: "repeater", PublicKey: "b0b1eeeeb0b1eeee", Name: "NodeB", HasGPS: true, Lat: 45.1, Lon: -122.1}
|
||||
nodeB2 := nodeInfo{Role: "repeater", PublicKey: "b0c2ffffb0c2ffff", Name: "NodeB2", HasGPS: true, Lat: 10.0, Lon: 10.0}
|
||||
observer := nodeInfo{Role: "repeater", PublicKey: "cccc3333cccc3333", Name: "Observer"}
|
||||
|
||||
ts := time.Now().UTC().Format(time.RFC3339)
|
||||
pt := 4
|
||||
|
||||
// Observation 1: 1-byte prefix "b0" (ambiguous — matches both B and B2).
|
||||
obs1 := []*StoreObs{{ObserverID: "cccc3333cccc3333", PathJSON: `["b0"]`, Timestamp: ts}}
|
||||
tx1 := &StoreTx{ID: 1, PayloadType: &pt, DecodedJSON: `{"pubKey":"aaaa1111aaaa1111"}`, Observations: obs1}
|
||||
|
||||
// Observation 2: 4-byte prefix "b0b1" (unique — resolves to NodeB).
|
||||
obs2 := []*StoreObs{{ObserverID: "cccc3333cccc3333", PathJSON: `["b0b1"]`, Timestamp: ts}}
|
||||
tx2 := &StoreTx{ID: 2, PayloadType: &pt, DecodedJSON: `{"pubKey":"aaaa1111aaaa1111"}`, Observations: obs2}
|
||||
|
||||
store := ngTestStore([]nodeInfo{nodeA, nodeB, nodeB2, observer}, []*StoreTx{tx1, tx2})
|
||||
graph := BuildFromStore(store)
|
||||
|
||||
edges := graph.Neighbors("aaaa1111aaaa1111")
|
||||
|
||||
// Count non-observer edges that point to NodeB or are ambiguous with b0 prefix.
|
||||
resolvedToB := 0
|
||||
ambiguousB0 := 0
|
||||
for _, e := range edges {
|
||||
other := e.NodeA
|
||||
if strings.EqualFold(other, "aaaa1111aaaa1111") {
|
||||
other = e.NodeB
|
||||
}
|
||||
if strings.EqualFold(other, "b0b1eeeeb0b1eeee") {
|
||||
resolvedToB++
|
||||
}
|
||||
if e.Ambiguous && e.Prefix == "b0" {
|
||||
ambiguousB0++
|
||||
}
|
||||
}
|
||||
|
||||
if ambiguousB0 > 0 {
|
||||
t.Errorf("expected no ambiguous b0 edges after Phase 1.5, got %d", ambiguousB0)
|
||||
}
|
||||
if resolvedToB != 1 {
|
||||
t.Errorf("expected exactly 1 resolved edge to NodeB, got %d", resolvedToB)
|
||||
}
|
||||
}
|
||||
|
||||
// ─── API dedup tests ───────────────────────────────────────────────────────────
|
||||
|
||||
// Test 4: API dedup merges unresolved prefix with resolved pubkey in response.
|
||||
func TestDedupPrefixEntries_MergesUnresolved(t *testing.T) {
|
||||
pk := "b0b1eeeeb0b1eeee"
|
||||
name := "NodeB"
|
||||
entries := []NeighborEntry{
|
||||
{
|
||||
Pubkey: nil, // unresolved
|
||||
Prefix: "b0",
|
||||
Count: 207,
|
||||
LastSeen: "2026-04-10T12:00:00Z",
|
||||
Observers: []string{"obs1"},
|
||||
Ambiguous: true,
|
||||
},
|
||||
{
|
||||
Pubkey: &pk,
|
||||
Prefix: "b0b1",
|
||||
Name: &name,
|
||||
Count: 1,
|
||||
LastSeen: "2026-04-09T12:00:00Z",
|
||||
Observers: []string{"obs2"},
|
||||
},
|
||||
}
|
||||
|
||||
result := dedupPrefixEntries(entries)
|
||||
|
||||
if len(result) != 1 {
|
||||
t.Fatalf("expected 1 entry after dedup, got %d", len(result))
|
||||
}
|
||||
if result[0].Pubkey == nil || *result[0].Pubkey != pk {
|
||||
t.Error("expected resolved entry to remain")
|
||||
}
|
||||
if result[0].Count != 208 { // 1 + 207
|
||||
t.Errorf("expected merged count 208, got %d", result[0].Count)
|
||||
}
|
||||
if result[0].LastSeen != "2026-04-10T12:00:00Z" {
|
||||
t.Errorf("expected higher LastSeen, got %s", result[0].LastSeen)
|
||||
}
|
||||
// Both observers should be present.
|
||||
obsMap := make(map[string]bool)
|
||||
for _, o := range result[0].Observers {
|
||||
obsMap[o] = true
|
||||
}
|
||||
if !obsMap["obs1"] || !obsMap["obs2"] {
|
||||
t.Error("expected both observers after merge")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDedupPrefixEntries_NoMatchNoChange(t *testing.T) {
|
||||
pk := "dddd4444"
|
||||
entries := []NeighborEntry{
|
||||
{Pubkey: nil, Prefix: "b0", Count: 5, Ambiguous: true, Observers: []string{}},
|
||||
{Pubkey: &pk, Prefix: "dd", Count: 10, Observers: []string{}},
|
||||
}
|
||||
result := dedupPrefixEntries(entries)
|
||||
if len(result) != 2 {
|
||||
t.Errorf("expected 2 entries (no match), got %d", len(result))
|
||||
}
|
||||
}
|
||||
|
||||
// ─── Benchmark ─────────────────────────────────────────────────────────────────
|
||||
|
||||
// Test 8: Benchmark Phase 1.5 with 500+ ambiguous edges to verify <100ms.
|
||||
func BenchmarkResolveAmbiguousEdges_500(b *testing.B) {
|
||||
// Create 600 nodes and 500 ambiguous edges.
|
||||
var nodes []nodeInfo
|
||||
for i := 0; i < 600; i++ {
|
||||
pk := strings.ToLower(strings.Replace(
|
||||
strings.Replace(
|
||||
strings.Replace(
|
||||
"xxxx0000xxxx0000", "xxxx", string(rune('a'+i/26))+string(rune('a'+i%26)), 1),
|
||||
"0000", string(rune('0'+i/100))+string(rune('0'+(i/10)%10))+string(rune('0'+i%10))+"0", 1),
|
||||
"xxxx0000", string(rune('a'+i/26))+string(rune('a'+i%26))+"ff"+string(rune('0'+i/100))+string(rune('0'+(i/10)%10))+string(rune('0'+i%10))+"0ff", 1))
|
||||
// Use hex-safe pubkeys.
|
||||
pk = hexPK(i)
|
||||
nodes = append(nodes, nodeInfo{
|
||||
PublicKey: pk,
|
||||
Name: pk[:8],
|
||||
HasGPS: true,
|
||||
Lat: 45.0 + float64(i)*0.01,
|
||||
Lon: -122.0 + float64(i)*0.01,
|
||||
})
|
||||
}
|
||||
pm := buildPrefixMap(nodes)
|
||||
|
||||
b.ResetTimer()
|
||||
for n := 0; n < b.N; n++ {
|
||||
graph := NewNeighborGraph()
|
||||
// Create 500 ambiguous edges.
|
||||
for i := 0; i < 500; i++ {
|
||||
knownPK := nodes[0].PublicKey
|
||||
prefix := strings.ToLower(nodes[i+1].PublicKey[:2])
|
||||
pseudoB := "prefix:" + prefix
|
||||
key := makeEdgeKey(strings.ToLower(knownPK), pseudoB)
|
||||
graph.edges[key] = &NeighborEdge{
|
||||
NodeA: key.A,
|
||||
NodeB: "",
|
||||
Prefix: prefix,
|
||||
Count: 10,
|
||||
FirstSeen: time.Now(),
|
||||
LastSeen: time.Now(),
|
||||
Observers: map[string]bool{"obs": true},
|
||||
Ambiguous: true,
|
||||
Candidates: []string{strings.ToLower(nodes[i+1].PublicKey)},
|
||||
}
|
||||
graph.byNode[strings.ToLower(knownPK)] = append(
|
||||
graph.byNode[strings.ToLower(knownPK)], graph.edges[key])
|
||||
}
|
||||
resolveAmbiguousEdges(pm, graph)
|
||||
}
|
||||
}
|
||||
|
||||
// hexPK generates a deterministic 16-char hex pubkey for index i.
|
||||
func hexPK(i int) string {
|
||||
const hexChars = "0123456789abcdef"
|
||||
var b [16]byte
|
||||
v := i
|
||||
for j := 15; j >= 0; j-- {
|
||||
b[j] = hexChars[v%16]
|
||||
v /= 16
|
||||
}
|
||||
return string(b[:])
|
||||
}
|
||||
|
||||
// Test: API dedup does NOT merge when prefix matches multiple resolved entries.
|
||||
func TestDedupPrefixEntries_MultiMatchNoMerge(t *testing.T) {
|
||||
pk1 := "b0b1eeeeb0b1eeee"
|
||||
pk2 := "b0c2ffffb0c2ffff"
|
||||
name1 := "NodeB1"
|
||||
name2 := "NodeB2"
|
||||
entries := []NeighborEntry{
|
||||
{
|
||||
Pubkey: nil, // unresolved
|
||||
Prefix: "b0",
|
||||
Count: 100,
|
||||
LastSeen: "2026-04-10T12:00:00Z",
|
||||
Observers: []string{"obs1"},
|
||||
Ambiguous: true,
|
||||
},
|
||||
{
|
||||
Pubkey: &pk1,
|
||||
Prefix: "b0b1",
|
||||
Name: &name1,
|
||||
Count: 5,
|
||||
LastSeen: "2026-04-09T12:00:00Z",
|
||||
Observers: []string{"obs2"},
|
||||
},
|
||||
{
|
||||
Pubkey: &pk2,
|
||||
Prefix: "b0c2",
|
||||
Name: &name2,
|
||||
Count: 3,
|
||||
LastSeen: "2026-04-08T12:00:00Z",
|
||||
Observers: []string{"obs3"},
|
||||
},
|
||||
}
|
||||
|
||||
result := dedupPrefixEntries(entries)
|
||||
|
||||
if len(result) != 3 {
|
||||
t.Fatalf("expected 3 entries (no merge for ambiguous prefix), got %d", len(result))
|
||||
}
|
||||
// Counts should be unchanged.
|
||||
for _, e := range result {
|
||||
if e.Pubkey != nil && *e.Pubkey == pk1 && e.Count != 5 {
|
||||
t.Errorf("pk1 count should be unchanged at 5, got %d", e.Count)
|
||||
}
|
||||
if e.Pubkey != nil && *e.Pubkey == pk2 && e.Count != 3 {
|
||||
t.Errorf("pk2 count should be unchanged at 3, got %d", e.Count)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -166,7 +166,7 @@ func BuildFromStoreWithLog(store *PacketStore, enableLog bool) *NeighborGraph {
|
||||
|
||||
// Phase 1: Extract edges from every transmission + observation.
|
||||
for _, tx := range packets {
|
||||
isAdvert := tx.PayloadType != nil && *tx.PayloadType == PayloadADVERT
|
||||
isAdvert := tx.PayloadType != nil && *tx.PayloadType == 4
|
||||
fromNode := extractFromNode(tx)
|
||||
// Pre-compute lowered originator once per tx (not per observation).
|
||||
fromLower := ""
|
||||
@@ -206,9 +206,6 @@ func BuildFromStoreWithLog(store *PacketStore, enableLog bool) *NeighborGraph {
|
||||
}
|
||||
}
|
||||
|
||||
// Phase 1.5: Resolve ambiguous edges using full graph context.
|
||||
resolveAmbiguousEdges(pm, g)
|
||||
|
||||
// Phase 2: Disambiguation via Jaccard similarity.
|
||||
g.disambiguate()
|
||||
|
||||
@@ -346,71 +343,6 @@ func (g *NeighborGraph) upsertEdgeWithCandidates(knownPK, prefix string, candida
|
||||
}
|
||||
}
|
||||
|
||||
// ─── Phase 1.5: Context-based resolution of ambiguous edges ────────────────────
|
||||
|
||||
// resolveAmbiguousEdges attempts to resolve ambiguous prefix edges using the
|
||||
// fully-built graph context. Called after Phase 1 (edge collection) completes
|
||||
// so that affinity and geo proximity tiers have full neighbor data.
|
||||
func resolveAmbiguousEdges(pm *prefixMap, graph *NeighborGraph) {
|
||||
// Step 1: Collect ambiguous edges under read lock.
|
||||
graph.mu.RLock()
|
||||
type ambiguousEntry struct {
|
||||
key edgeKey
|
||||
edge *NeighborEdge
|
||||
knownNode string
|
||||
prefix string
|
||||
}
|
||||
var ambiguous []ambiguousEntry
|
||||
for key, e := range graph.edges {
|
||||
if !e.Ambiguous {
|
||||
continue
|
||||
}
|
||||
knownNode := e.NodeA
|
||||
if strings.HasPrefix(e.NodeA, "prefix:") {
|
||||
knownNode = e.NodeB
|
||||
}
|
||||
if knownNode == "" {
|
||||
continue
|
||||
}
|
||||
ambiguous = append(ambiguous, ambiguousEntry{key, e, knownNode, e.Prefix})
|
||||
}
|
||||
graph.mu.RUnlock()
|
||||
|
||||
// Step 2: Resolve each (no lock needed — resolveWithContext takes its own RLock).
|
||||
type resolution struct {
|
||||
ambiguousEntry
|
||||
resolvedPK string
|
||||
}
|
||||
var resolutions []resolution
|
||||
for _, ae := range ambiguous {
|
||||
resolved, confidence, _ := pm.resolveWithContext(ae.prefix, []string{ae.knownNode}, graph)
|
||||
if resolved == nil || confidence == "no_match" || confidence == "first_match" || confidence == "gps_preference" {
|
||||
continue
|
||||
}
|
||||
rpk := strings.ToLower(resolved.PublicKey)
|
||||
if rpk == ae.knownNode {
|
||||
continue // self-edge guard
|
||||
}
|
||||
resolutions = append(resolutions, resolution{ae, rpk})
|
||||
}
|
||||
|
||||
// Step 3: Apply resolutions under write lock.
|
||||
if len(resolutions) == 0 {
|
||||
return
|
||||
}
|
||||
graph.mu.Lock()
|
||||
for _, r := range resolutions {
|
||||
// Verify edge still exists and is still ambiguous (could have been
|
||||
// resolved by a prior iteration if two ambiguous edges resolve to same target).
|
||||
e, ok := graph.edges[r.key]
|
||||
if !ok || !e.Ambiguous {
|
||||
continue
|
||||
}
|
||||
graph.resolveEdge(r.key, e, r.knownNode, r.resolvedPK)
|
||||
}
|
||||
graph.mu.Unlock()
|
||||
}
|
||||
|
||||
// ─── Disambiguation ────────────────────────────────────────────────────────────
|
||||
|
||||
// disambiguate resolves ambiguous edges using Jaccard similarity of neighbor sets.
|
||||
|
||||
@@ -86,9 +86,9 @@ func TestBuildNeighborGraph_EmptyStore(t *testing.T) {
|
||||
func TestBuildNeighborGraph_AdvertSingleHopPath(t *testing.T) {
|
||||
// ADVERT from X, path=["R1_prefix"] → edges: X↔R1 and Observer↔R1
|
||||
nodes := []nodeInfo{
|
||||
{Role: "repeater", PublicKey: "aaaa1111", Name: "NodeX"},
|
||||
{Role: "repeater", PublicKey: "r1aabbcc", Name: "R1"},
|
||||
{Role: "repeater", PublicKey: "obs00001", Name: "Observer"},
|
||||
{PublicKey: "aaaa1111", Name: "NodeX"},
|
||||
{PublicKey: "r1aabbcc", Name: "R1"},
|
||||
{PublicKey: "obs00001", Name: "Observer"},
|
||||
}
|
||||
tx := ngMakeTx(1, 4, ngFromNodeJSON("aaaa1111"), []*StoreObs{
|
||||
ngMakeObs("obs00001", `["r1aa"]`, nowStr, ngFloatPtr(-10)),
|
||||
@@ -132,10 +132,10 @@ func TestBuildNeighborGraph_AdvertSingleHopPath(t *testing.T) {
|
||||
func TestBuildNeighborGraph_AdvertMultiHopPath(t *testing.T) {
|
||||
// ADVERT from X, path=["R1","R2"] → X↔R1 and Observer↔R2
|
||||
nodes := []nodeInfo{
|
||||
{Role: "repeater", PublicKey: "aaaa1111", Name: "NodeX"},
|
||||
{Role: "repeater", PublicKey: "r1aabbcc", Name: "R1"},
|
||||
{Role: "repeater", PublicKey: "r2ddeeff", Name: "R2"},
|
||||
{Role: "repeater", PublicKey: "obs00001", Name: "Observer"},
|
||||
{PublicKey: "aaaa1111", Name: "NodeX"},
|
||||
{PublicKey: "r1aabbcc", Name: "R1"},
|
||||
{PublicKey: "r2ddeeff", Name: "R2"},
|
||||
{PublicKey: "obs00001", Name: "Observer"},
|
||||
}
|
||||
tx := ngMakeTx(1, 4, ngFromNodeJSON("aaaa1111"), []*StoreObs{
|
||||
ngMakeObs("obs00001", `["r1aa","r2dd"]`, nowStr, nil),
|
||||
@@ -170,8 +170,8 @@ func TestBuildNeighborGraph_AdvertMultiHopPath(t *testing.T) {
|
||||
func TestBuildNeighborGraph_AdvertZeroHop(t *testing.T) {
|
||||
// ADVERT from X, path=[] → X↔Observer direct edge
|
||||
nodes := []nodeInfo{
|
||||
{Role: "repeater", PublicKey: "aaaa1111", Name: "NodeX"},
|
||||
{Role: "repeater", PublicKey: "obs00001", Name: "Observer"},
|
||||
{PublicKey: "aaaa1111", Name: "NodeX"},
|
||||
{PublicKey: "obs00001", Name: "Observer"},
|
||||
}
|
||||
tx := ngMakeTx(1, 4, ngFromNodeJSON("aaaa1111"), []*StoreObs{
|
||||
ngMakeObs("obs00001", `[]`, nowStr, nil),
|
||||
@@ -195,8 +195,8 @@ func TestBuildNeighborGraph_AdvertZeroHop(t *testing.T) {
|
||||
func TestBuildNeighborGraph_NonAdvertEmptyPath(t *testing.T) {
|
||||
// Non-ADVERT, path=[] → no edges
|
||||
nodes := []nodeInfo{
|
||||
{Role: "repeater", PublicKey: "aaaa1111", Name: "NodeX"},
|
||||
{Role: "repeater", PublicKey: "obs00001", Name: "Observer"},
|
||||
{PublicKey: "aaaa1111", Name: "NodeX"},
|
||||
{PublicKey: "obs00001", Name: "Observer"},
|
||||
}
|
||||
tx := ngMakeTx(1, 2, ngFromNodeJSON("aaaa1111"), []*StoreObs{
|
||||
ngMakeObs("obs00001", `[]`, nowStr, nil),
|
||||
@@ -212,10 +212,10 @@ func TestBuildNeighborGraph_NonAdvertEmptyPath(t *testing.T) {
|
||||
func TestBuildNeighborGraph_NonAdvertOnlyObserverEdge(t *testing.T) {
|
||||
// Non-ADVERT with path=["R1","R2"] → only Observer↔R2, NO originator edge
|
||||
nodes := []nodeInfo{
|
||||
{Role: "repeater", PublicKey: "aaaa1111", Name: "NodeX"},
|
||||
{Role: "repeater", PublicKey: "r1aabbcc", Name: "R1"},
|
||||
{Role: "repeater", PublicKey: "r2ddeeff", Name: "R2"},
|
||||
{Role: "repeater", PublicKey: "obs00001", Name: "Observer"},
|
||||
{PublicKey: "aaaa1111", Name: "NodeX"},
|
||||
{PublicKey: "r1aabbcc", Name: "R1"},
|
||||
{PublicKey: "r2ddeeff", Name: "R2"},
|
||||
{PublicKey: "obs00001", Name: "Observer"},
|
||||
}
|
||||
tx := ngMakeTx(1, 2, ngFromNodeJSON("aaaa1111"), []*StoreObs{
|
||||
ngMakeObs("obs00001", `["r1aa","r2dd"]`, nowStr, nil),
|
||||
@@ -236,9 +236,9 @@ func TestBuildNeighborGraph_NonAdvertOnlyObserverEdge(t *testing.T) {
|
||||
func TestBuildNeighborGraph_NonAdvertSingleHop(t *testing.T) {
|
||||
// Non-ADVERT with path=["R1"] → Observer↔R1 only
|
||||
nodes := []nodeInfo{
|
||||
{Role: "repeater", PublicKey: "aaaa1111", Name: "NodeX"},
|
||||
{Role: "repeater", PublicKey: "r1aabbcc", Name: "R1"},
|
||||
{Role: "repeater", PublicKey: "obs00001", Name: "Observer"},
|
||||
{PublicKey: "aaaa1111", Name: "NodeX"},
|
||||
{PublicKey: "r1aabbcc", Name: "R1"},
|
||||
{PublicKey: "obs00001", Name: "Observer"},
|
||||
}
|
||||
tx := ngMakeTx(1, 2, ngFromNodeJSON("aaaa1111"), []*StoreObs{
|
||||
ngMakeObs("obs00001", `["r1aa"]`, nowStr, nil),
|
||||
@@ -259,10 +259,10 @@ func TestBuildNeighborGraph_NonAdvertSingleHop(t *testing.T) {
|
||||
func TestBuildNeighborGraph_HashCollision(t *testing.T) {
|
||||
// Two nodes share prefix "a3" → ambiguous edge
|
||||
nodes := []nodeInfo{
|
||||
{Role: "repeater", PublicKey: "aaaa1111", Name: "NodeX"},
|
||||
{Role: "repeater", PublicKey: "a3bb1111", Name: "CandidateA"},
|
||||
{Role: "repeater", PublicKey: "a3bb2222", Name: "CandidateB"},
|
||||
{Role: "repeater", PublicKey: "obs00001", Name: "Observer"},
|
||||
{PublicKey: "aaaa1111", Name: "NodeX"},
|
||||
{PublicKey: "a3bb1111", Name: "CandidateA"},
|
||||
{PublicKey: "a3bb2222", Name: "CandidateB"},
|
||||
{PublicKey: "obs00001", Name: "Observer"},
|
||||
}
|
||||
tx := ngMakeTx(1, 4, ngFromNodeJSON("aaaa1111"), []*StoreObs{
|
||||
ngMakeObs("obs00001", `["a3bb"]`, nowStr, nil),
|
||||
@@ -308,13 +308,13 @@ func TestBuildNeighborGraph_ConfidenceAutoResolve(t *testing.T) {
|
||||
// CandidateB has no known neighbors (Jaccard = 0).
|
||||
// An ambiguous edge X↔prefix "a3" with candidates [A, B] should auto-resolve to A.
|
||||
nodes := []nodeInfo{
|
||||
{Role: "repeater", PublicKey: "aaaa1111", Name: "NodeX"},
|
||||
{Role: "repeater", PublicKey: "n1111111", Name: "N1"},
|
||||
{Role: "repeater", PublicKey: "n2222222", Name: "N2"},
|
||||
{Role: "repeater", PublicKey: "n3333333", Name: "N3"},
|
||||
{Role: "repeater", PublicKey: "a3001111", Name: "CandidateA"},
|
||||
{Role: "repeater", PublicKey: "a3002222", Name: "CandidateB"},
|
||||
{Role: "repeater", PublicKey: "obs00001", Name: "Observer"},
|
||||
{PublicKey: "aaaa1111", Name: "NodeX"},
|
||||
{PublicKey: "n1111111", Name: "N1"},
|
||||
{PublicKey: "n2222222", Name: "N2"},
|
||||
{PublicKey: "n3333333", Name: "N3"},
|
||||
{PublicKey: "a3001111", Name: "CandidateA"},
|
||||
{PublicKey: "a3002222", Name: "CandidateB"},
|
||||
{PublicKey: "obs00001", Name: "Observer"},
|
||||
}
|
||||
|
||||
// Create resolved edges: X↔N1, X↔N2, X↔N3, A↔N1, A↔N2, A↔N3
|
||||
@@ -373,11 +373,11 @@ func TestBuildNeighborGraph_ConfidenceAutoResolve(t *testing.T) {
|
||||
func TestBuildNeighborGraph_EqualScoresAmbiguous(t *testing.T) {
|
||||
// Two candidates with identical neighbor sets → should NOT auto-resolve.
|
||||
nodes := []nodeInfo{
|
||||
{Role: "repeater", PublicKey: "aaaa1111", Name: "NodeX"},
|
||||
{Role: "repeater", PublicKey: "n1111111", Name: "N1"},
|
||||
{Role: "repeater", PublicKey: "a3001111", Name: "CandidateA"},
|
||||
{Role: "repeater", PublicKey: "a3002222", Name: "CandidateB"},
|
||||
{Role: "repeater", PublicKey: "obs00001", Name: "Observer"},
|
||||
{PublicKey: "aaaa1111", Name: "NodeX"},
|
||||
{PublicKey: "n1111111", Name: "N1"},
|
||||
{PublicKey: "a3001111", Name: "CandidateA"},
|
||||
{PublicKey: "a3002222", Name: "CandidateB"},
|
||||
{PublicKey: "obs00001", Name: "Observer"},
|
||||
}
|
||||
|
||||
var txs []*StoreTx
|
||||
@@ -425,8 +425,8 @@ func TestBuildNeighborGraph_EqualScoresAmbiguous(t *testing.T) {
|
||||
func TestBuildNeighborGraph_ObserverSelfEdgeGuard(t *testing.T) {
|
||||
// Observer's own prefix in path → should NOT create self-edge.
|
||||
nodes := []nodeInfo{
|
||||
{Role: "repeater", PublicKey: "aaaa1111", Name: "NodeX"},
|
||||
{Role: "repeater", PublicKey: "obs00001", Name: "Observer"},
|
||||
{PublicKey: "aaaa1111", Name: "NodeX"},
|
||||
{PublicKey: "obs00001", Name: "Observer"},
|
||||
}
|
||||
tx := ngMakeTx(1, 4, ngFromNodeJSON("aaaa1111"), []*StoreObs{
|
||||
ngMakeObs("obs00001", `["obs0"]`, nowStr, nil),
|
||||
@@ -445,8 +445,8 @@ func TestBuildNeighborGraph_ObserverSelfEdgeGuard(t *testing.T) {
|
||||
func TestBuildNeighborGraph_OrphanPrefix(t *testing.T) {
|
||||
// Path contains prefix matching zero nodes → edge recorded as unresolved.
|
||||
nodes := []nodeInfo{
|
||||
{Role: "repeater", PublicKey: "aaaa1111", Name: "NodeX"},
|
||||
{Role: "repeater", PublicKey: "obs00001", Name: "Observer"},
|
||||
{PublicKey: "aaaa1111", Name: "NodeX"},
|
||||
{PublicKey: "obs00001", Name: "Observer"},
|
||||
}
|
||||
tx := ngMakeTx(1, 4, ngFromNodeJSON("aaaa1111"), []*StoreObs{
|
||||
ngMakeObs("obs00001", `["ff99"]`, nowStr, nil),
|
||||
@@ -506,9 +506,9 @@ func TestAffinityScore_StaleAndLow(t *testing.T) {
|
||||
|
||||
func TestBuildNeighborGraph_CountAccumulation(t *testing.T) {
|
||||
nodes := []nodeInfo{
|
||||
{Role: "repeater", PublicKey: "aaaa1111", Name: "NodeX"},
|
||||
{Role: "repeater", PublicKey: "r1aabbcc", Name: "R1"},
|
||||
{Role: "repeater", PublicKey: "obs00001", Name: "Observer"},
|
||||
{PublicKey: "aaaa1111", Name: "NodeX"},
|
||||
{PublicKey: "r1aabbcc", Name: "R1"},
|
||||
{PublicKey: "obs00001", Name: "Observer"},
|
||||
}
|
||||
|
||||
var txs []*StoreTx
|
||||
@@ -535,10 +535,10 @@ func TestBuildNeighborGraph_CountAccumulation(t *testing.T) {
|
||||
|
||||
func TestBuildNeighborGraph_MultipleObservers(t *testing.T) {
|
||||
nodes := []nodeInfo{
|
||||
{Role: "repeater", PublicKey: "aaaa1111", Name: "NodeX"},
|
||||
{Role: "repeater", PublicKey: "r1aabbcc", Name: "R1"},
|
||||
{Role: "repeater", PublicKey: "obs00001", Name: "Obs1"},
|
||||
{Role: "repeater", PublicKey: "obs00002", Name: "Obs2"},
|
||||
{PublicKey: "aaaa1111", Name: "NodeX"},
|
||||
{PublicKey: "r1aabbcc", Name: "R1"},
|
||||
{PublicKey: "obs00001", Name: "Obs1"},
|
||||
{PublicKey: "obs00002", Name: "Obs2"},
|
||||
}
|
||||
|
||||
tx := ngMakeTx(1, 4, ngFromNodeJSON("aaaa1111"), []*StoreObs{
|
||||
@@ -565,9 +565,9 @@ func TestBuildNeighborGraph_MultipleObservers(t *testing.T) {
|
||||
|
||||
func TestBuildNeighborGraph_TimeDecayOldObservations(t *testing.T) {
|
||||
nodes := []nodeInfo{
|
||||
{Role: "repeater", PublicKey: "aaaa1111", Name: "NodeX"},
|
||||
{Role: "repeater", PublicKey: "r1aabbcc", Name: "R1"},
|
||||
{Role: "repeater", PublicKey: "obs00001", Name: "Observer"},
|
||||
{PublicKey: "aaaa1111", Name: "NodeX"},
|
||||
{PublicKey: "r1aabbcc", Name: "R1"},
|
||||
{PublicKey: "obs00001", Name: "Observer"},
|
||||
}
|
||||
|
||||
tx := ngMakeTx(1, 4, ngFromNodeJSON("aaaa1111"), []*StoreObs{
|
||||
@@ -592,10 +592,10 @@ func TestBuildNeighborGraph_TimeDecayOldObservations(t *testing.T) {
|
||||
func TestBuildNeighborGraph_ADVERTOnlyConstraint(t *testing.T) {
|
||||
// Non-ADVERT: should NOT create originator↔path[0] edge, only observer↔path[last].
|
||||
nodes := []nodeInfo{
|
||||
{Role: "repeater", PublicKey: "aaaa1111", Name: "NodeX"},
|
||||
{Role: "repeater", PublicKey: "r1aabbcc", Name: "R1"},
|
||||
{Role: "repeater", PublicKey: "r2ddeeff", Name: "R2"},
|
||||
{Role: "repeater", PublicKey: "obs00001", Name: "Observer"},
|
||||
{PublicKey: "aaaa1111", Name: "NodeX"},
|
||||
{PublicKey: "r1aabbcc", Name: "R1"},
|
||||
{PublicKey: "r2ddeeff", Name: "R2"},
|
||||
{PublicKey: "obs00001", Name: "Observer"},
|
||||
}
|
||||
tx := ngMakeTx(1, 2, ngFromNodeJSON("aaaa1111"), []*StoreObs{
|
||||
ngMakeObs("obs00001", `["r1aa","r2dd"]`, nowStr, nil),
|
||||
@@ -631,9 +631,9 @@ func ngPubKeyJSON(pubkey string) string {
|
||||
func TestBuildNeighborGraph_AdvertPubKeyField(t *testing.T) {
|
||||
// Real ADVERTs use "pubKey", not "from_node". Verify the builder handles it.
|
||||
nodes := []nodeInfo{
|
||||
{Role: "repeater", PublicKey: "99bf37abcdef1234567890abcdef1234567890abcdef1234567890abcdef1234", Name: "Originator"},
|
||||
{Role: "repeater", PublicKey: "r1aabbccdd001122334455667788990011223344556677889900112233445566", Name: "R1"},
|
||||
{Role: "repeater", PublicKey: "obs0000100112233445566778899001122334455667788990011223344556677", Name: "Observer"},
|
||||
{PublicKey: "99bf37abcdef1234567890abcdef1234567890abcdef1234567890abcdef1234", Name: "Originator"},
|
||||
{PublicKey: "r1aabbccdd001122334455667788990011223344556677889900112233445566", Name: "R1"},
|
||||
{PublicKey: "obs0000100112233445566778899001122334455667788990011223344556677", Name: "Observer"},
|
||||
}
|
||||
tx := ngMakeTx(1, 4, ngPubKeyJSON("99bf37abcdef1234567890abcdef1234567890abcdef1234567890abcdef1234"), []*StoreObs{
|
||||
ngMakeObs("obs0000100112233445566778899001122334455667788990011223344556677", `["r1"]`, nowStr, ngFloatPtr(-8.5)),
|
||||
@@ -666,10 +666,10 @@ func TestBuildNeighborGraph_OneByteHashPrefixes(t *testing.T) {
|
||||
// Real-world scenario: 1-byte hash prefixes with multiple candidates.
|
||||
// Should create edges (possibly ambiguous) rather than empty graph.
|
||||
nodes := []nodeInfo{
|
||||
{Role: "repeater", PublicKey: "c0dedad400000000000000000000000000000000000000000000000000000001", Name: "NodeC0-1"},
|
||||
{Role: "repeater", PublicKey: "c0dedad900000000000000000000000000000000000000000000000000000002", Name: "NodeC0-2"},
|
||||
{Role: "repeater", PublicKey: "a3bbccdd00000000000000000000000000000000000000000000000000000003", Name: "Originator"},
|
||||
{Role: "repeater", PublicKey: "obs1234500000000000000000000000000000000000000000000000000000004", Name: "Observer"},
|
||||
{PublicKey: "c0dedad400000000000000000000000000000000000000000000000000000001", Name: "NodeC0-1"},
|
||||
{PublicKey: "c0dedad900000000000000000000000000000000000000000000000000000002", Name: "NodeC0-2"},
|
||||
{PublicKey: "a3bbccdd00000000000000000000000000000000000000000000000000000003", Name: "Originator"},
|
||||
{PublicKey: "obs1234500000000000000000000000000000000000000000000000000000004", Name: "Observer"},
|
||||
}
|
||||
// ADVERT from Originator with 1-byte path hop "c0"
|
||||
tx := ngMakeTx(1, 4, ngPubKeyJSON("a3bbccdd00000000000000000000000000000000000000000000000000000003"), []*StoreObs{
|
||||
@@ -809,10 +809,10 @@ func TestExtractFromNode_UsesCachedParse(t *testing.T) {
|
||||
func BenchmarkBuildFromStore(b *testing.B) {
|
||||
// Simulate a dataset with many packets and repeated pubkeys
|
||||
nodes := []nodeInfo{
|
||||
{Role: "repeater", PublicKey: "aaaa1111", Name: "NodeA"},
|
||||
{Role: "repeater", PublicKey: "bbbb2222", Name: "NodeB"},
|
||||
{Role: "repeater", PublicKey: "cccc3333", Name: "NodeC"},
|
||||
{Role: "repeater", PublicKey: "dddd4444", Name: "NodeD"},
|
||||
{PublicKey: "aaaa1111", Name: "NodeA"},
|
||||
{PublicKey: "bbbb2222", Name: "NodeB"},
|
||||
{PublicKey: "cccc3333", Name: "NodeC"},
|
||||
{PublicKey: "dddd4444", Name: "NodeD"},
|
||||
}
|
||||
const numPackets = 1000
|
||||
packets := make([]*StoreTx, 0, numPackets)
|
||||
|
||||
@@ -381,13 +381,7 @@ func backfillResolvedPathsAsync(store *PacketStore, dbPath string, chunkSize int
|
||||
}
|
||||
}
|
||||
for _, obs := range tx.Observations {
|
||||
// Check if this observation has been resolved: look up in the index.
|
||||
// If the tx has no reverse-map entries AND path is non-empty, it needs backfill.
|
||||
hasRP := false
|
||||
if _, ok := store.resolvedPubkeyReverse[tx.ID]; ok {
|
||||
hasRP = true
|
||||
}
|
||||
if !hasRP && obs.PathJSON != "" && obs.PathJSON != "[]" {
|
||||
if obs.ResolvedPath == nil && obs.PathJSON != "" && obs.PathJSON != "[]" {
|
||||
allPending = append(allPending, obsRef{
|
||||
obsID: obs.ID,
|
||||
pathJSON: obs.PathJSON,
|
||||
@@ -488,61 +482,24 @@ func backfillResolvedPathsAsync(store *PacketStore, dbPath string, chunkSize int
|
||||
}
|
||||
}
|
||||
|
||||
// Update in-memory state: update resolved pubkey index, re-pick best observation,
|
||||
// and invalidate LRU cache entries for backfilled observations (#800).
|
||||
//
|
||||
// Lock ordering: always take s.mu BEFORE lruMu. The read path
|
||||
// (fetchResolvedPathForObs) takes lruMu independently of s.mu,
|
||||
// so we must NOT hold s.mu while taking lruMu. Instead, collect
|
||||
// obsIDs to invalidate under s.mu, release it, then take lruMu.
|
||||
// Update in-memory state and re-pick best observation under a single
|
||||
// write lock. The per-tx pickBestObservation is O(observations) which is
|
||||
// typically <10 per tx — negligible cost vs. the race risk of splitting
|
||||
// the lock (pollAndMerge can append to tx.Observations concurrently).
|
||||
store.mu.Lock()
|
||||
affectedSet := make(map[string]bool)
|
||||
lruInvalidate := make([]int, 0, len(results))
|
||||
for _, r := range results {
|
||||
// Remove old index entries for this tx, then re-add with new pubkeys
|
||||
if obs, ok := store.byObsID[r.obsID]; ok {
|
||||
obs.ResolvedPath = r.rp
|
||||
}
|
||||
if !affectedSet[r.txHash] {
|
||||
affectedSet[r.txHash] = true
|
||||
if tx, ok := store.byHash[r.txHash]; ok {
|
||||
store.removeFromResolvedPubkeyIndex(tx.ID)
|
||||
pickBestObservation(tx)
|
||||
}
|
||||
}
|
||||
// Add new resolved pubkeys to index
|
||||
if tx, ok := store.byHash[r.txHash]; ok {
|
||||
pks := extractResolvedPubkeys(r.rp)
|
||||
store.addToResolvedPubkeyIndex(tx.ID, pks)
|
||||
// Update byNode for relay nodes
|
||||
for _, pk := range pks {
|
||||
store.addToByNode(tx, pk)
|
||||
}
|
||||
// Update byPathHop resolved-key entries
|
||||
hopsSeen := make(map[string]bool)
|
||||
for _, hop := range txGetParsedPath(tx) {
|
||||
hopsSeen[strings.ToLower(hop)] = true
|
||||
}
|
||||
for _, pk := range pks {
|
||||
if !hopsSeen[pk] {
|
||||
hopsSeen[pk] = true
|
||||
store.byPathHop[pk] = append(store.byPathHop[pk], tx)
|
||||
}
|
||||
}
|
||||
}
|
||||
lruInvalidate = append(lruInvalidate, r.obsID)
|
||||
}
|
||||
// Re-pick best observation for affected transmissions
|
||||
for txHash := range affectedSet {
|
||||
if tx, ok := store.byHash[txHash]; ok {
|
||||
pickBestObservation(tx)
|
||||
}
|
||||
}
|
||||
store.mu.Unlock()
|
||||
|
||||
// Invalidate LRU entries AFTER releasing s.mu to maintain lock
|
||||
// ordering (lruMu must never be taken while s.mu is held).
|
||||
store.lruMu.Lock()
|
||||
for _, obsID := range lruInvalidate {
|
||||
store.lruDelete(obsID)
|
||||
}
|
||||
store.lruMu.Unlock()
|
||||
}
|
||||
|
||||
totalProcessed += len(chunk)
|
||||
@@ -568,7 +525,7 @@ type edgeCandidate struct {
|
||||
// For ADVERTs: originator↔path[0] (if unambiguous). For ALL types: observer↔path[last] (if unambiguous).
|
||||
// Also handles zero-hop ADVERTs (originator↔observer direct link).
|
||||
func extractEdgesFromObs(obs *StoreObs, tx *StoreTx, pm *prefixMap) []edgeCandidate {
|
||||
isAdvert := tx.PayloadType != nil && *tx.PayloadType == PayloadADVERT
|
||||
isAdvert := tx.PayloadType != nil && *tx.PayloadType == 4
|
||||
fromNode := extractFromNode(tx)
|
||||
path := parsePathJSON(obs.PathJSON)
|
||||
observerPK := strings.ToLower(obs.ObserverID)
|
||||
@@ -627,18 +584,12 @@ func extractEdgesFromObs(obs *StoreObs, tx *StoreTx, pm *prefixMap) []edgeCandid
|
||||
|
||||
// openRW opens a read-write SQLite connection (same pattern as PruneOldPackets).
|
||||
func openRW(dbPath string) (*sql.DB, error) {
|
||||
dsn := fmt.Sprintf("file:%s?_journal_mode=WAL", dbPath)
|
||||
dsn := fmt.Sprintf("file:%s?_journal_mode=WAL&_busy_timeout=10000", dbPath)
|
||||
rw, err := sql.Open("sqlite", dsn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rw.SetMaxOpenConns(1)
|
||||
// DSN _busy_timeout may not be honored by all drivers; set via PRAGMA
|
||||
// to guarantee SQLite retries for up to 5s before returning SQLITE_BUSY.
|
||||
if _, err := rw.Exec("PRAGMA busy_timeout = 5000"); err != nil {
|
||||
rw.Close()
|
||||
return nil, fmt.Errorf("set busy_timeout: %w", err)
|
||||
}
|
||||
return rw, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -27,7 +27,7 @@ func createTestDBWithSchema(t *testing.T) (*DB, string) {
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
raw_hex TEXT, hash TEXT UNIQUE, first_seen TEXT,
|
||||
route_type INTEGER, payload_type INTEGER, payload_version INTEGER,
|
||||
decoded_json TEXT, channel_hash TEXT DEFAULT NULL
|
||||
decoded_json TEXT
|
||||
)`)
|
||||
conn.Exec(`CREATE TABLE observers (
|
||||
id TEXT PRIMARY KEY, name TEXT, iata TEXT
|
||||
@@ -38,7 +38,7 @@ func createTestDBWithSchema(t *testing.T) (*DB, string) {
|
||||
observer_id TEXT, observer_name TEXT, direction TEXT,
|
||||
snr REAL, rssi REAL, score INTEGER,
|
||||
path_json TEXT, timestamp TEXT,
|
||||
resolved_path TEXT, raw_hex TEXT
|
||||
resolved_path TEXT
|
||||
)`)
|
||||
conn.Exec(`CREATE TABLE nodes (
|
||||
public_key TEXT PRIMARY KEY, name TEXT, role TEXT,
|
||||
@@ -58,8 +58,8 @@ func createTestDBWithSchema(t *testing.T) (*DB, string) {
|
||||
func TestResolvePathForObs(t *testing.T) {
|
||||
// Build a prefix map with known nodes
|
||||
nodes := []nodeInfo{
|
||||
{Role: "repeater", PublicKey: "aabbccddee1234567890aabbccddee1234567890aabbccddee1234567890aabb", Name: "Node-AA"},
|
||||
{Role: "repeater", PublicKey: "bbccddee1234567890aabbccddee1234567890aabbccddee1234567890aabb11", Name: "Node-BB"},
|
||||
{PublicKey: "aabbccddee1234567890aabbccddee1234567890aabbccddee1234567890aabb", Name: "Node-AA"},
|
||||
{PublicKey: "bbccddee1234567890aabbccddee1234567890aabbccddee1234567890aabb11", Name: "Node-BB"},
|
||||
}
|
||||
pm := buildPrefixMap(nodes)
|
||||
graph := NewNeighborGraph()
|
||||
@@ -97,7 +97,7 @@ func TestResolvePathForObs_EmptyPath(t *testing.T) {
|
||||
|
||||
func TestResolvePathForObs_Unresolvable(t *testing.T) {
|
||||
nodes := []nodeInfo{
|
||||
{Role: "repeater", PublicKey: "aabbccddee1234567890aabbccddee1234567890aabbccddee1234567890aabb", Name: "Node-AA"},
|
||||
{PublicKey: "aabbccddee1234567890aabbccddee1234567890aabbccddee1234567890aabb", Name: "Node-AA"},
|
||||
}
|
||||
pm := buildPrefixMap(nodes)
|
||||
|
||||
@@ -203,14 +203,14 @@ func TestLoadNeighborEdgesFromDB(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestStoreObsResolvedPathInBroadcast(t *testing.T) {
|
||||
// After #800 refactor, resolved_path is no longer stored on StoreTx/StoreObs structs.
|
||||
// Broadcast maps carry resolved_path from the decode-window, not from struct fields.
|
||||
// This test verifies pickBestObservation no longer sets ResolvedPath on tx.
|
||||
// Verify resolved_path appears in broadcast maps
|
||||
pk := "aabbccdd"
|
||||
obs := &StoreObs{
|
||||
ID: 1,
|
||||
ObserverID: "obs1",
|
||||
ObserverName: "Observer 1",
|
||||
PathJSON: `["aa"]`,
|
||||
ResolvedPath: []*string{&pk},
|
||||
Timestamp: "2024-01-01T00:00:00Z",
|
||||
}
|
||||
|
||||
@@ -221,26 +221,32 @@ func TestStoreObsResolvedPathInBroadcast(t *testing.T) {
|
||||
}
|
||||
pickBestObservation(tx)
|
||||
|
||||
// tx should NOT have a ResolvedPath field anymore (compile-time guard)
|
||||
// Verify the best observation's fields are propagated correctly
|
||||
if tx.ObserverID != "obs1" {
|
||||
t.Errorf("expected ObserverID=obs1, got %s", tx.ObserverID)
|
||||
if tx.ResolvedPath == nil {
|
||||
t.Fatal("expected ResolvedPath to be set on tx after pickBestObservation")
|
||||
}
|
||||
if *tx.ResolvedPath[0] != "aabbccdd" {
|
||||
t.Errorf("expected resolved path to be aabbccdd, got %s", *tx.ResolvedPath[0])
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolvedPathInTxToMap(t *testing.T) {
|
||||
// After #800, txToMap no longer includes resolved_path from the struct.
|
||||
// resolved_path is only available via on-demand SQL fetch (txToMapWithRP).
|
||||
pk := "aabbccdd"
|
||||
tx := &StoreTx{
|
||||
ID: 1,
|
||||
Hash: "abc123",
|
||||
PathJSON: `["aa"]`,
|
||||
obsKeys: make(map[string]bool),
|
||||
ID: 1,
|
||||
Hash: "abc123",
|
||||
PathJSON: `["aa"]`,
|
||||
ResolvedPath: []*string{&pk},
|
||||
obsKeys: make(map[string]bool),
|
||||
}
|
||||
|
||||
m := txToMap(tx)
|
||||
if _, ok := m["resolved_path"]; ok {
|
||||
t.Error("resolved_path should not be in txToMap output (removed in #800)")
|
||||
rp, ok := m["resolved_path"]
|
||||
if !ok {
|
||||
t.Fatal("resolved_path not in txToMap output")
|
||||
}
|
||||
rpSlice, ok := rp.([]*string)
|
||||
if !ok || len(rpSlice) != 1 || *rpSlice[0] != "aabbccdd" {
|
||||
t.Errorf("unexpected resolved_path: %v", rp)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -264,7 +270,7 @@ func TestEnsureResolvedPathColumn(t *testing.T) {
|
||||
conn, _ := sql.Open("sqlite", "file:"+dbPath+"?_journal_mode=WAL")
|
||||
conn.Exec(`CREATE TABLE observations (
|
||||
id INTEGER PRIMARY KEY, transmission_id INTEGER,
|
||||
observer_id TEXT, path_json TEXT, timestamp TEXT, raw_hex TEXT
|
||||
observer_id TEXT, path_json TEXT, timestamp TEXT
|
||||
)`)
|
||||
conn.Close()
|
||||
|
||||
@@ -359,21 +365,27 @@ func TestLoadWithResolvedPath(t *testing.T) {
|
||||
t.Fatalf("expected 1 observation, got %d", len(tx.Observations))
|
||||
}
|
||||
|
||||
// After #800, ResolvedPath is not stored on StoreObs struct.
|
||||
// Instead, resolved pubkeys are in the membership index.
|
||||
_ = tx.Observations[0] // obs exists
|
||||
h := resolvedPubkeyHash("aabbccdd")
|
||||
if len(store.resolvedPubkeyIndex[h]) != 1 {
|
||||
t.Fatal("expected resolved pubkey to be indexed")
|
||||
obs := tx.Observations[0]
|
||||
if obs.ResolvedPath == nil {
|
||||
t.Fatal("expected ResolvedPath to be loaded")
|
||||
}
|
||||
if len(obs.ResolvedPath) != 1 || *obs.ResolvedPath[0] != "aabbccdd" {
|
||||
t.Errorf("unexpected ResolvedPath: %v", obs.ResolvedPath)
|
||||
}
|
||||
|
||||
// Check that pickBestObservation propagated resolved_path to tx
|
||||
if tx.ResolvedPath == nil || len(tx.ResolvedPath) != 1 {
|
||||
t.Error("expected ResolvedPath to be propagated to tx")
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolvedPathInAPIResponse(t *testing.T) {
|
||||
// After #800, TransmissionResp no longer has ResolvedPath field.
|
||||
// resolved_path is included dynamically in map-based API responses.
|
||||
// Test that TransmissionResp properly marshals resolved_path
|
||||
pk := "aabbccddee"
|
||||
resp := TransmissionResp{
|
||||
ID: 1,
|
||||
Hash: "test",
|
||||
ID: 1,
|
||||
Hash: "test",
|
||||
ResolvedPath: []*string{&pk, nil},
|
||||
}
|
||||
|
||||
data, err := json.Marshal(resp)
|
||||
@@ -384,9 +396,19 @@ func TestResolvedPathInAPIResponse(t *testing.T) {
|
||||
var m map[string]interface{}
|
||||
json.Unmarshal(data, &m)
|
||||
|
||||
// resolved_path should NOT be in the marshaled JSON
|
||||
if _, ok := m["resolved_path"]; ok {
|
||||
t.Error("resolved_path should not be in TransmissionResp JSON (#800)")
|
||||
rp, ok := m["resolved_path"]
|
||||
if !ok {
|
||||
t.Fatal("resolved_path missing from JSON")
|
||||
}
|
||||
rpArr, ok := rp.([]interface{})
|
||||
if !ok || len(rpArr) != 2 {
|
||||
t.Fatalf("unexpected resolved_path shape: %v", rp)
|
||||
}
|
||||
if rpArr[0] != "aabbccddee" {
|
||||
t.Errorf("first element wrong: %v", rpArr[0])
|
||||
}
|
||||
if rpArr[1] != nil {
|
||||
t.Errorf("second element should be null: %v", rpArr[1])
|
||||
}
|
||||
}
|
||||
|
||||
@@ -437,8 +459,8 @@ func TestExtractEdgesFromObs_NonAdvertNoPath(t *testing.T) {
|
||||
|
||||
func TestExtractEdgesFromObs_WithPath(t *testing.T) {
|
||||
nodes := []nodeInfo{
|
||||
{Role: "repeater", PublicKey: "aabbccddee1234567890aabbccddee1234567890aabbccddee1234567890aabb", Name: "Node-AA"},
|
||||
{Role: "repeater", PublicKey: "ffgghhii1234567890aabbccddee1234567890aabbccddee1234567890aabb11", Name: "Node-FF"},
|
||||
{PublicKey: "aabbccddee1234567890aabbccddee1234567890aabbccddee1234567890aabb", Name: "Node-AA"},
|
||||
{PublicKey: "ffgghhii1234567890aabbccddee1234567890aabbccddee1234567890aabb11", Name: "Node-FF"},
|
||||
}
|
||||
pm := buildPrefixMap(nodes)
|
||||
|
||||
@@ -510,31 +532,3 @@ func TestPersistSemaphoreTryAcquireSkipsBatch(t *testing.T) {
|
||||
|
||||
<-persistSem // release
|
||||
}
|
||||
|
||||
func TestOpenRW_BusyTimeout(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
dbPath := filepath.Join(dir, "test.db")
|
||||
|
||||
// Create the DB file first
|
||||
db, err := sql.Open("sqlite", "file:"+dbPath+"?_journal_mode=WAL")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
db.Exec("CREATE TABLE dummy (id INTEGER)")
|
||||
db.Close()
|
||||
|
||||
// Open via openRW and verify busy_timeout is set
|
||||
rw, err := openRW(dbPath)
|
||||
if err != nil {
|
||||
t.Fatalf("openRW failed: %v", err)
|
||||
}
|
||||
defer rw.Close()
|
||||
|
||||
var timeout int
|
||||
if err := rw.QueryRow("PRAGMA busy_timeout").Scan(&timeout); err != nil {
|
||||
t.Fatalf("query busy_timeout: %v", err)
|
||||
}
|
||||
if timeout != 5000 {
|
||||
t.Errorf("expected busy_timeout=5000, got %d", timeout)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,311 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
)
|
||||
|
||||
func TestConfigIsBlacklisted(t *testing.T) {
|
||||
cfg := &Config{
|
||||
NodeBlacklist: []string{"AA", "BB", "cc"},
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
pubkey string
|
||||
want bool
|
||||
}{
|
||||
{"AA", true},
|
||||
{"aa", true}, // case-insensitive
|
||||
{"BB", true},
|
||||
{"CC", true}, // lowercase "cc" matches uppercase
|
||||
{"DD", false},
|
||||
{"", false},
|
||||
{"AAB", false},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
got := cfg.IsBlacklisted(tt.pubkey)
|
||||
if got != tt.want {
|
||||
t.Errorf("IsBlacklisted(%q) = %v, want %v", tt.pubkey, got, tt.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfigIsBlacklistedEmpty(t *testing.T) {
|
||||
cfg := &Config{}
|
||||
if cfg.IsBlacklisted("anything") {
|
||||
t.Error("empty blacklist should not match anything")
|
||||
}
|
||||
if cfg.IsBlacklisted("") {
|
||||
t.Error("empty blacklist should not match empty string")
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfigBlacklistWhitespace(t *testing.T) {
|
||||
cfg := &Config{
|
||||
NodeBlacklist: []string{" AA ", "BB"},
|
||||
}
|
||||
if !cfg.IsBlacklisted("AA") {
|
||||
t.Error("trimmed key should match")
|
||||
}
|
||||
if !cfg.IsBlacklisted(" AA ") {
|
||||
t.Error("whitespace-padded key should match after trimming")
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfigBlacklistEmptyEntries(t *testing.T) {
|
||||
cfg := &Config{
|
||||
NodeBlacklist: []string{"", " ", "AA"},
|
||||
}
|
||||
if !cfg.IsBlacklisted("AA") {
|
||||
t.Error("non-empty entry should match")
|
||||
}
|
||||
if cfg.IsBlacklisted("") {
|
||||
t.Error("empty blacklist entry should not match empty pubkey")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlacklistFiltersHandleNodes(t *testing.T) {
|
||||
db := setupTestDB(t)
|
||||
db.conn.Exec("INSERT OR IGNORE INTO nodes (public_key, name, role, last_seen) VALUES ('goodnode', 'GoodNode', 'companion', datetime('now'))")
|
||||
db.conn.Exec("INSERT OR IGNORE INTO nodes (public_key, name, role, last_seen) VALUES ('badnode', 'BadNode', 'companion', datetime('now'))")
|
||||
|
||||
cfg := &Config{
|
||||
NodeBlacklist: []string{"badnode"},
|
||||
}
|
||||
srv := NewServer(db, cfg, NewHub())
|
||||
|
||||
req := httptest.NewRequest("GET", "/api/nodes?limit=50", nil)
|
||||
w := httptest.NewRecorder()
|
||||
srv.RegisterRoutes(setupTestRouter(srv))
|
||||
srv.router.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("expected 200, got %d: %s", w.Code, w.Body.String())
|
||||
}
|
||||
|
||||
var resp NodeListResponse
|
||||
if err := json.Unmarshal(w.Body.Bytes(), &resp); err != nil {
|
||||
t.Fatalf("failed to parse response: %v", err)
|
||||
}
|
||||
|
||||
for _, node := range resp.Nodes {
|
||||
if pk, _ := node["public_key"].(string); pk == "badnode" {
|
||||
t.Error("blacklisted node should not appear in nodes list")
|
||||
}
|
||||
}
|
||||
if resp.Total == 0 {
|
||||
t.Error("expected at least one non-blacklisted node")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlacklistFiltersNodeDetail(t *testing.T) {
|
||||
db := setupTestDB(t)
|
||||
db.conn.Exec("INSERT OR IGNORE INTO nodes (public_key, name, role, last_seen) VALUES ('badnode', 'BadNode', 'companion', datetime('now'))")
|
||||
|
||||
cfg := &Config{
|
||||
NodeBlacklist: []string{"badnode"},
|
||||
}
|
||||
srv := NewServer(db, cfg, NewHub())
|
||||
|
||||
req := httptest.NewRequest("GET", "/api/nodes/badnode", nil)
|
||||
w := httptest.NewRecorder()
|
||||
srv.RegisterRoutes(setupTestRouter(srv))
|
||||
srv.router.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusNotFound {
|
||||
t.Errorf("expected 404 for blacklisted node, got %d", w.Code)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlacklistFiltersNodeSearch(t *testing.T) {
|
||||
db := setupTestDB(t)
|
||||
db.conn.Exec("INSERT OR IGNORE INTO nodes (public_key, name, role, last_seen) VALUES ('badnode', 'TrollNode', 'companion', datetime('now'))")
|
||||
db.conn.Exec("INSERT OR IGNORE INTO nodes (public_key, name, role, last_seen) VALUES ('goodnode', 'GoodNode', 'companion', datetime('now'))")
|
||||
|
||||
cfg := &Config{
|
||||
NodeBlacklist: []string{"badnode"},
|
||||
}
|
||||
srv := NewServer(db, cfg, NewHub())
|
||||
|
||||
req := httptest.NewRequest("GET", "/api/nodes/search?q=Troll", nil)
|
||||
w := httptest.NewRecorder()
|
||||
srv.RegisterRoutes(setupTestRouter(srv))
|
||||
srv.router.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("expected 200, got %d", w.Code)
|
||||
}
|
||||
|
||||
var resp NodeSearchResponse
|
||||
if err := json.Unmarshal(w.Body.Bytes(), &resp); err != nil {
|
||||
t.Fatalf("failed to parse response: %v", err)
|
||||
}
|
||||
|
||||
for _, node := range resp.Nodes {
|
||||
if pk, _ := node["public_key"].(string); pk == "badnode" {
|
||||
t.Error("blacklisted node should not appear in search results")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNoBlacklistPassesAll(t *testing.T) {
|
||||
db := setupTestDB(t)
|
||||
db.conn.Exec("INSERT OR IGNORE INTO nodes (public_key, name, role, last_seen) VALUES ('somenode', 'SomeNode', 'companion', datetime('now'))")
|
||||
|
||||
cfg := &Config{}
|
||||
srv := NewServer(db, cfg, NewHub())
|
||||
|
||||
req := httptest.NewRequest("GET", "/api/nodes?limit=50", nil)
|
||||
w := httptest.NewRecorder()
|
||||
srv.RegisterRoutes(setupTestRouter(srv))
|
||||
srv.router.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("expected 200, got %d", w.Code)
|
||||
}
|
||||
|
||||
var resp NodeListResponse
|
||||
if err := json.Unmarshal(w.Body.Bytes(), &resp); err != nil {
|
||||
t.Fatalf("failed to parse response: %v", err)
|
||||
}
|
||||
if resp.Total == 0 {
|
||||
t.Error("without blacklist, node should appear")
|
||||
}
|
||||
}
|
||||
|
||||
// setupTestRouter creates a mux.Router and registers server routes.
|
||||
func setupTestRouter(srv *Server) *mux.Router {
|
||||
r := mux.NewRouter()
|
||||
srv.RegisterRoutes(r)
|
||||
srv.router = r
|
||||
return r
|
||||
}
|
||||
func TestBlacklistFiltersNeighborGraph(t *testing.T) {
|
||||
cfg := &Config{
|
||||
NodeBlacklist: []string{"badnode"},
|
||||
}
|
||||
db := setupTestDB(t)
|
||||
srv := NewServer(db, cfg, NewHub())
|
||||
srv.RegisterRoutes(setupTestRouter(srv))
|
||||
|
||||
req := httptest.NewRequest("GET", "/api/analytics/neighbor-graph", nil)
|
||||
w := httptest.NewRecorder()
|
||||
srv.router.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("expected 200, got %d: %s", w.Code, w.Body.String())
|
||||
}
|
||||
|
||||
var resp map[string]interface{}
|
||||
if err := json.Unmarshal(w.Body.Bytes(), &resp); err != nil {
|
||||
t.Fatalf("failed to parse response: %v", err)
|
||||
}
|
||||
|
||||
// Check edges don't contain blacklisted node
|
||||
if edges, ok := resp["edges"].([]interface{}); ok {
|
||||
for _, e := range edges {
|
||||
if edge, ok := e.(map[string]interface{}); ok {
|
||||
if src, _ := edge["source"].(string); src == "badnode" {
|
||||
t.Error("blacklisted node should not appear as edge source in neighbor graph")
|
||||
}
|
||||
if tgt, _ := edge["target"].(string); tgt == "badnode" {
|
||||
t.Error("blacklisted node should not appear as edge target in neighbor graph")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check nodes list doesn't contain blacklisted node
|
||||
if nodes, ok := resp["nodes"].([]interface{}); ok {
|
||||
for _, n := range nodes {
|
||||
if node, ok := n.(map[string]interface{}); ok {
|
||||
if pk, _ := node["pubkey"].(string); pk == "badnode" {
|
||||
t.Error("blacklisted node should not appear in neighbor graph nodes")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlacklistFiltersResolveHops(t *testing.T) {
|
||||
db := setupTestDB(t)
|
||||
db.conn.Exec("INSERT OR IGNORE INTO nodes (public_key, name, role, last_seen) VALUES ('badnode', 'BadNode', 'companion', datetime('now'))")
|
||||
|
||||
cfg := &Config{
|
||||
NodeBlacklist: []string{"badnode"},
|
||||
}
|
||||
srv := NewServer(db, cfg, NewHub())
|
||||
srv.RegisterRoutes(setupTestRouter(srv))
|
||||
|
||||
req := httptest.NewRequest("GET", "/api/resolve-hops?hops=badnode", nil)
|
||||
w := httptest.NewRecorder()
|
||||
srv.router.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("expected 200, got %d: %s", w.Code, w.Body.String())
|
||||
}
|
||||
|
||||
var resp ResolveHopsResponse
|
||||
if err := json.Unmarshal(w.Body.Bytes(), &resp); err != nil {
|
||||
t.Fatalf("failed to parse response: %v", err)
|
||||
}
|
||||
|
||||
if hr, ok := resp.Resolved["badnode"]; ok {
|
||||
for _, c := range hr.Candidates {
|
||||
if c.Pubkey == "badnode" {
|
||||
t.Error("blacklisted node should not appear as resolve-hops candidate")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlacklistFiltersSubpathDetail(t *testing.T) {
|
||||
cfg := &Config{
|
||||
NodeBlacklist: []string{"badnode"},
|
||||
}
|
||||
db := setupTestDB(t)
|
||||
srv := NewServer(db, cfg, NewHub())
|
||||
srv.RegisterRoutes(setupTestRouter(srv))
|
||||
|
||||
req := httptest.NewRequest("GET", "/api/analytics/subpath-detail?hops=badnode,othernode", nil)
|
||||
w := httptest.NewRecorder()
|
||||
srv.router.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusNotFound {
|
||||
t.Errorf("expected 404 for subpath-detail with blacklisted hop, got %d", w.Code)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlacklistConcurrentIsBlacklisted(t *testing.T) {
|
||||
cfg := &Config{
|
||||
NodeBlacklist: []string{"AA", "BB", "CC"},
|
||||
}
|
||||
|
||||
errc := make(chan error, 100)
|
||||
for i := 0; i < 100; i++ {
|
||||
go func() {
|
||||
for j := 0; j < 100; j++ {
|
||||
cfg.IsBlacklisted("AA")
|
||||
cfg.IsBlacklisted("BB")
|
||||
cfg.IsBlacklisted("DD")
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// If sync.Once is wrong, this would panic or race.
|
||||
// We can't run the race detector on ARM, but at least verify no panics.
|
||||
done := false
|
||||
for !done {
|
||||
select {
|
||||
case <-errc:
|
||||
t.Error("concurrent IsBlacklisted panicked")
|
||||
default:
|
||||
done = true
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,427 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"math"
|
||||
"net/http"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// ─── Path Inspector ────────────────────────────────────────────────────────────
|
||||
// POST /api/paths/inspect — beam-search scorer for prefix path candidates.
|
||||
// Spec: issue #944 §2.1–2.5.
|
||||
|
||||
// pathInspectRequest is the JSON body for the inspect endpoint.
|
||||
type pathInspectRequest struct {
|
||||
Prefixes []string `json:"prefixes"`
|
||||
Context *pathInspectContext `json:"context,omitempty"`
|
||||
Limit int `json:"limit,omitempty"`
|
||||
}
|
||||
|
||||
type pathInspectContext struct {
|
||||
ObserverID string `json:"observerId,omitempty"`
|
||||
Since string `json:"since,omitempty"`
|
||||
Until string `json:"until,omitempty"`
|
||||
}
|
||||
|
||||
// pathCandidate is one scored candidate path in the response.
|
||||
type pathCandidate struct {
|
||||
Path []string `json:"path"`
|
||||
Names []string `json:"names"`
|
||||
Score float64 `json:"score"`
|
||||
Speculative bool `json:"speculative"`
|
||||
Evidence pathEvidence `json:"evidence"`
|
||||
}
|
||||
|
||||
type pathEvidence struct {
|
||||
PerHop []hopEvidence `json:"perHop"`
|
||||
}
|
||||
|
||||
type hopEvidence struct {
|
||||
Prefix string `json:"prefix"`
|
||||
CandidatesConsidered int `json:"candidatesConsidered"`
|
||||
Chosen string `json:"chosen"`
|
||||
EdgeWeight float64 `json:"edgeWeight"`
|
||||
Alternatives []hopAlternative `json:"alternatives,omitempty"`
|
||||
}
|
||||
|
||||
// hopAlternative shows a candidate that was considered but not chosen for this hop.
|
||||
type hopAlternative struct {
|
||||
PublicKey string `json:"publicKey"`
|
||||
Name string `json:"name"`
|
||||
Score float64 `json:"score"`
|
||||
}
|
||||
|
||||
type pathInspectResponse struct {
|
||||
Candidates []pathCandidate `json:"candidates"`
|
||||
Input map[string]interface{} `json:"input"`
|
||||
Stats map[string]interface{} `json:"stats"`
|
||||
}
|
||||
|
||||
// beamEntry represents a partial path being extended during beam search.
|
||||
type beamEntry struct {
|
||||
pubkeys []string
|
||||
names []string
|
||||
evidence []hopEvidence
|
||||
score float64 // product of per-hop scores (pre-geometric-mean)
|
||||
}
|
||||
|
||||
const (
|
||||
beamWidth = 20
|
||||
maxInputHops = 64
|
||||
maxPrefixBytes = 3
|
||||
maxRequestItems = 64
|
||||
geoMaxKm = 50.0
|
||||
hopScoreFloor = 0.05
|
||||
speculativeThreshold = 0.7
|
||||
inspectCacheTTL = 30 * time.Second
|
||||
inspectBodyLimit = 4096
|
||||
)
|
||||
|
||||
// Weights per spec §2.3.
|
||||
const (
|
||||
wEdge = 0.35
|
||||
wGeo = 0.20
|
||||
wRecency = 0.15
|
||||
wSelectivity = 0.30
|
||||
)
|
||||
|
||||
func (s *Server) handlePathInspect(w http.ResponseWriter, r *http.Request) {
|
||||
// Body limit per spec §2.1.
|
||||
r.Body = http.MaxBytesReader(w, r.Body, inspectBodyLimit)
|
||||
|
||||
var req pathInspectRequest
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
http.Error(w, `{"error":"invalid JSON"}`, http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
// Validate prefixes.
|
||||
if len(req.Prefixes) == 0 {
|
||||
http.Error(w, `{"error":"prefixes required"}`, http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
if len(req.Prefixes) > maxRequestItems {
|
||||
http.Error(w, `{"error":"too many prefixes (max 64)"}`, http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
// Normalize + validate each prefix.
|
||||
prefixByteLen := -1
|
||||
for i, p := range req.Prefixes {
|
||||
p = strings.ToLower(strings.TrimSpace(p))
|
||||
req.Prefixes[i] = p
|
||||
if len(p) == 0 || len(p)%2 != 0 {
|
||||
http.Error(w, `{"error":"prefixes must be even-length hex"}`, http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
if _, err := hex.DecodeString(p); err != nil {
|
||||
http.Error(w, `{"error":"prefixes must be valid hex"}`, http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
byteLen := len(p) / 2
|
||||
if byteLen > maxPrefixBytes {
|
||||
http.Error(w, `{"error":"prefix exceeds 3 bytes"}`, http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
if prefixByteLen == -1 {
|
||||
prefixByteLen = byteLen
|
||||
} else if byteLen != prefixByteLen {
|
||||
http.Error(w, `{"error":"mixed prefix lengths not allowed"}`, http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
limit := req.Limit
|
||||
if limit <= 0 {
|
||||
limit = 10
|
||||
}
|
||||
if limit > 50 {
|
||||
limit = 50
|
||||
}
|
||||
|
||||
// Check cache.
|
||||
cacheKey := s.store.inspectCacheKey(req)
|
||||
s.store.inspectMu.RLock()
|
||||
if cached, ok := s.store.inspectCache[cacheKey]; ok && time.Now().Before(cached.expiresAt) {
|
||||
s.store.inspectMu.RUnlock()
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(cached.data)
|
||||
return
|
||||
}
|
||||
s.store.inspectMu.RUnlock()
|
||||
|
||||
// Snapshot data under read lock.
|
||||
nodes, pm := s.store.getCachedNodesAndPM()
|
||||
|
||||
// Build pubkey→nodeInfo map for O(1) geo lookup in scorer.
|
||||
nodeByPK := make(map[string]*nodeInfo, len(nodes))
|
||||
for i := range nodes {
|
||||
nodeByPK[strings.ToLower(nodes[i].PublicKey)] = &nodes[i]
|
||||
}
|
||||
|
||||
// Get neighbor graph; handle cold start.
|
||||
graph := s.store.graph
|
||||
if graph == nil || graph.IsStale() {
|
||||
rebuilt := make(chan struct{})
|
||||
go func() {
|
||||
s.store.ensureNeighborGraph()
|
||||
close(rebuilt)
|
||||
}()
|
||||
select {
|
||||
case <-rebuilt:
|
||||
graph = s.store.graph
|
||||
case <-time.After(2 * time.Second):
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusServiceUnavailable)
|
||||
json.NewEncoder(w).Encode(map[string]interface{}{"retry": true})
|
||||
return
|
||||
}
|
||||
if graph == nil {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusServiceUnavailable)
|
||||
json.NewEncoder(w).Encode(map[string]interface{}{"retry": true})
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
start := now
|
||||
|
||||
// Beam search.
|
||||
beam := s.store.beamSearch(req.Prefixes, pm, graph, nodeByPK, now)
|
||||
|
||||
// Sort by score descending, take top limit.
|
||||
sortBeam(beam)
|
||||
if len(beam) > limit {
|
||||
beam = beam[:limit]
|
||||
}
|
||||
|
||||
// Build response with per-hop alternatives (spec §2.7, M2 fix).
|
||||
candidates := make([]pathCandidate, 0, len(beam))
|
||||
for _, entry := range beam {
|
||||
nHops := len(entry.pubkeys)
|
||||
var score float64
|
||||
if nHops > 0 {
|
||||
score = math.Pow(entry.score, 1.0/float64(nHops))
|
||||
}
|
||||
|
||||
// Populate per-hop alternatives: other candidates at each hop that weren't chosen.
|
||||
evidence := make([]hopEvidence, len(entry.evidence))
|
||||
copy(evidence, entry.evidence)
|
||||
for hi, ev := range evidence {
|
||||
if hi >= len(req.Prefixes) {
|
||||
break
|
||||
}
|
||||
prefix := req.Prefixes[hi]
|
||||
allCands := pm.m[prefix]
|
||||
var alts []hopAlternative
|
||||
for _, c := range allCands {
|
||||
if !canAppearInPath(c.Role) || c.PublicKey == ev.Chosen {
|
||||
continue
|
||||
}
|
||||
// Score this alternative in context of the partial path up to this hop.
|
||||
var partialEntry beamEntry
|
||||
if hi > 0 {
|
||||
partialEntry = beamEntry{pubkeys: entry.pubkeys[:hi], names: entry.names[:hi], score: 1.0}
|
||||
}
|
||||
altScore := s.store.scoreHop(partialEntry, c, ev.CandidatesConsidered, graph, nodeByPK, now, hi)
|
||||
alts = append(alts, hopAlternative{PublicKey: c.PublicKey, Name: c.Name, Score: math.Round(altScore*1000) / 1000})
|
||||
}
|
||||
// Sort alts by score desc, cap at 5.
|
||||
sort.Slice(alts, func(i, j int) bool { return alts[i].Score > alts[j].Score })
|
||||
if len(alts) > 5 {
|
||||
alts = alts[:5]
|
||||
}
|
||||
evidence[hi] = hopEvidence{
|
||||
Prefix: ev.Prefix,
|
||||
CandidatesConsidered: ev.CandidatesConsidered,
|
||||
Chosen: ev.Chosen,
|
||||
EdgeWeight: ev.EdgeWeight,
|
||||
Alternatives: alts,
|
||||
}
|
||||
}
|
||||
|
||||
candidates = append(candidates, pathCandidate{
|
||||
Path: entry.pubkeys,
|
||||
Names: entry.names,
|
||||
Score: math.Round(score*1000) / 1000,
|
||||
Speculative: score < speculativeThreshold,
|
||||
Evidence: pathEvidence{PerHop: evidence},
|
||||
})
|
||||
}
|
||||
|
||||
elapsed := time.Since(start).Milliseconds()
|
||||
resp := pathInspectResponse{
|
||||
Candidates: candidates,
|
||||
Input: map[string]interface{}{
|
||||
"prefixes": req.Prefixes,
|
||||
"hops": len(req.Prefixes),
|
||||
},
|
||||
Stats: map[string]interface{}{
|
||||
"beamWidth": beamWidth,
|
||||
"expansionsRun": len(req.Prefixes) * beamWidth,
|
||||
"elapsedMs": elapsed,
|
||||
},
|
||||
}
|
||||
|
||||
// Cache result (and evict stale entries).
|
||||
s.store.inspectMu.Lock()
|
||||
if s.store.inspectCache == nil {
|
||||
s.store.inspectCache = make(map[string]*inspectCachedResult)
|
||||
}
|
||||
now2 := time.Now()
|
||||
for k, v := range s.store.inspectCache {
|
||||
if now2.After(v.expiresAt) {
|
||||
delete(s.store.inspectCache, k)
|
||||
}
|
||||
}
|
||||
s.store.inspectCache[cacheKey] = &inspectCachedResult{
|
||||
data: resp,
|
||||
expiresAt: now2.Add(inspectCacheTTL),
|
||||
}
|
||||
s.store.inspectMu.Unlock()
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(resp)
|
||||
}
|
||||
|
||||
type inspectCachedResult struct {
|
||||
data pathInspectResponse
|
||||
expiresAt time.Time
|
||||
}
|
||||
|
||||
func (s *PacketStore) inspectCacheKey(req pathInspectRequest) string {
|
||||
key := strings.Join(req.Prefixes, ",")
|
||||
if req.Context != nil {
|
||||
key += "|" + req.Context.ObserverID + "|" + req.Context.Since + "|" + req.Context.Until
|
||||
}
|
||||
return key
|
||||
}
|
||||
|
||||
func (s *PacketStore) beamSearch(prefixes []string, pm *prefixMap, graph *NeighborGraph, nodeByPK map[string]*nodeInfo, now time.Time) []beamEntry {
|
||||
// Start with empty beam.
|
||||
beam := []beamEntry{{pubkeys: nil, names: nil, evidence: nil, score: 1.0}}
|
||||
|
||||
for hopIdx, prefix := range prefixes {
|
||||
candidates := pm.m[prefix]
|
||||
// Filter by role at lookup time (spec §2.2 step 2).
|
||||
var filtered []nodeInfo
|
||||
for _, c := range candidates {
|
||||
if canAppearInPath(c.Role) {
|
||||
filtered = append(filtered, c)
|
||||
}
|
||||
}
|
||||
|
||||
candidateCount := len(filtered)
|
||||
if candidateCount == 0 {
|
||||
// No candidates for this hop — beam dies.
|
||||
return nil
|
||||
}
|
||||
|
||||
var nextBeam []beamEntry
|
||||
for _, entry := range beam {
|
||||
for _, cand := range filtered {
|
||||
hopScore := s.scoreHop(entry, cand, candidateCount, graph, nodeByPK, now, hopIdx)
|
||||
if hopScore < hopScoreFloor {
|
||||
hopScore = hopScoreFloor
|
||||
}
|
||||
|
||||
newEntry := beamEntry{
|
||||
pubkeys: append(append([]string{}, entry.pubkeys...), cand.PublicKey),
|
||||
names: append(append([]string{}, entry.names...), cand.Name),
|
||||
evidence: append(append([]hopEvidence{}, entry.evidence...), hopEvidence{
|
||||
Prefix: prefix,
|
||||
CandidatesConsidered: candidateCount,
|
||||
Chosen: cand.PublicKey,
|
||||
EdgeWeight: hopScore,
|
||||
}),
|
||||
score: entry.score * hopScore,
|
||||
}
|
||||
nextBeam = append(nextBeam, newEntry)
|
||||
}
|
||||
}
|
||||
|
||||
// Prune to beam width.
|
||||
sortBeam(nextBeam)
|
||||
if len(nextBeam) > beamWidth {
|
||||
nextBeam = nextBeam[:beamWidth]
|
||||
}
|
||||
beam = nextBeam
|
||||
}
|
||||
|
||||
return beam
|
||||
}
|
||||
|
||||
func (s *PacketStore) scoreHop(entry beamEntry, cand nodeInfo, candidateCount int, graph *NeighborGraph, nodeByPK map[string]*nodeInfo, now time.Time, hopIdx int) float64 {
|
||||
var edgeScore float64
|
||||
var geoScore float64 = 1.0
|
||||
var recencyScore float64 = 1.0
|
||||
|
||||
if hopIdx == 0 || len(entry.pubkeys) == 0 {
|
||||
// First hop: no prior node to compare against.
|
||||
edgeScore = 1.0
|
||||
} else {
|
||||
lastPK := entry.pubkeys[len(entry.pubkeys)-1]
|
||||
|
||||
// Single scan over neighbors for both edge weight and recency.
|
||||
edges := graph.Neighbors(lastPK)
|
||||
var foundEdge *NeighborEdge
|
||||
for _, e := range edges {
|
||||
peer := e.NodeA
|
||||
if strings.EqualFold(peer, lastPK) {
|
||||
peer = e.NodeB
|
||||
}
|
||||
if strings.EqualFold(peer, cand.PublicKey) {
|
||||
foundEdge = e
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if foundEdge != nil {
|
||||
edgeScore = foundEdge.Score(now)
|
||||
hoursSince := now.Sub(foundEdge.LastSeen).Hours()
|
||||
if hoursSince <= 24 {
|
||||
recencyScore = 1.0
|
||||
} else {
|
||||
recencyScore = math.Max(0.1, 24.0/hoursSince)
|
||||
}
|
||||
} else {
|
||||
edgeScore = 0
|
||||
recencyScore = 0
|
||||
}
|
||||
|
||||
// Geographic plausibility.
|
||||
prevNode := nodeByPK[strings.ToLower(lastPK)]
|
||||
if prevNode != nil && prevNode.HasGPS && cand.HasGPS {
|
||||
dist := haversineKm(prevNode.Lat, prevNode.Lon, cand.Lat, cand.Lon)
|
||||
if dist > geoMaxKm {
|
||||
geoScore = math.Max(0.1, geoMaxKm/dist)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Prefix selectivity.
|
||||
selectivityScore := 1.0 / float64(candidateCount)
|
||||
|
||||
return wEdge*edgeScore + wGeo*geoScore + wRecency*recencyScore + wSelectivity*selectivityScore
|
||||
}
|
||||
|
||||
|
||||
func sortBeam(beam []beamEntry) {
|
||||
sort.Slice(beam, func(i, j int) bool {
|
||||
return beam[i].score > beam[j].score
|
||||
})
|
||||
}
|
||||
|
||||
// ensureNeighborGraph triggers a graph rebuild if nil or stale.
|
||||
func (s *PacketStore) ensureNeighborGraph() {
|
||||
if s.graph != nil && !s.graph.IsStale() {
|
||||
return
|
||||
}
|
||||
g := BuildFromStore(s)
|
||||
s.graph = g
|
||||
}
|
||||
@@ -1,308 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// ─── Unit tests for path inspector (issue #944) ────────────────────────────────
|
||||
|
||||
func TestScoreHop_EdgeWeight(t *testing.T) {
|
||||
store := &PacketStore{}
|
||||
graph := NewNeighborGraph()
|
||||
now := time.Now()
|
||||
|
||||
// Add an edge between A and B.
|
||||
graph.mu.Lock()
|
||||
edge := &NeighborEdge{
|
||||
NodeA: "aaaa", NodeB: "bbbb",
|
||||
Count: 50, LastSeen: now.Add(-1 * time.Hour),
|
||||
Observers: map[string]bool{"obs1": true},
|
||||
}
|
||||
key := edgeKey{"aaaa", "bbbb"}
|
||||
graph.edges[key] = edge
|
||||
graph.byNode["aaaa"] = append(graph.byNode["aaaa"], edge)
|
||||
graph.byNode["bbbb"] = append(graph.byNode["bbbb"], edge)
|
||||
graph.mu.Unlock()
|
||||
|
||||
entry := beamEntry{pubkeys: []string{"aaaa"}, names: []string{"NodeA"}}
|
||||
cand := nodeInfo{PublicKey: "bbbb", Name: "NodeB", Role: "repeater"}
|
||||
|
||||
score := store.scoreHop(entry, cand, 2, graph, nil, now, 1)
|
||||
|
||||
// With edge present, edgeScore > 0. With 2 candidates, selectivity = 0.5.
|
||||
// Anti-tautology: if we zero out edge weight constant, score would change.
|
||||
if score <= 0.05 {
|
||||
t.Errorf("expected score > floor, got %f", score)
|
||||
}
|
||||
|
||||
// No edge: score should be lower.
|
||||
candNoEdge := nodeInfo{PublicKey: "cccc", Name: "NodeC", Role: "repeater"}
|
||||
scoreNoEdge := store.scoreHop(entry, candNoEdge, 2, graph, nil, now, 1)
|
||||
if scoreNoEdge >= score {
|
||||
t.Errorf("expected no-edge score (%f) < edge score (%f)", scoreNoEdge, score)
|
||||
}
|
||||
}
|
||||
|
||||
func TestScoreHop_FirstHop(t *testing.T) {
|
||||
store := &PacketStore{}
|
||||
graph := NewNeighborGraph()
|
||||
now := time.Now()
|
||||
|
||||
entry := beamEntry{pubkeys: nil, names: nil}
|
||||
cand := nodeInfo{PublicKey: "aaaa", Name: "NodeA", Role: "repeater"}
|
||||
|
||||
score := store.scoreHop(entry, cand, 3, graph, nil, now, 0)
|
||||
// First hop: edgeScore=1.0, geoScore=1.0, recencyScore=1.0, selectivity=1/3
|
||||
// = 0.35*1 + 0.20*1 + 0.15*1 + 0.30*(1/3) = 0.35+0.20+0.15+0.10 = 0.80
|
||||
expected := 0.35 + 0.20 + 0.15 + 0.30/3.0
|
||||
if score < expected-0.01 || score > expected+0.01 {
|
||||
t.Errorf("expected ~%f, got %f", expected, score)
|
||||
}
|
||||
}
|
||||
|
||||
func TestScoreHop_GeoPlausibility(t *testing.T) {
|
||||
store := &PacketStore{}
|
||||
store.nodeCache = []nodeInfo{
|
||||
{PublicKey: "aaaa", Name: "A", Role: "repeater", Lat: 37.0, Lon: -122.0, HasGPS: true},
|
||||
{PublicKey: "bbbb", Name: "B", Role: "repeater", Lat: 37.01, Lon: -122.01, HasGPS: true}, // ~1.4km
|
||||
{PublicKey: "cccc", Name: "C", Role: "repeater", Lat: 40.0, Lon: -120.0, HasGPS: true}, // ~400km
|
||||
}
|
||||
store.nodePM = buildPrefixMap(store.nodeCache)
|
||||
store.nodeCacheTime = time.Now()
|
||||
|
||||
graph := NewNeighborGraph()
|
||||
now := time.Now()
|
||||
|
||||
nodeByPK := map[string]*nodeInfo{
|
||||
"aaaa": &store.nodeCache[0],
|
||||
"bbbb": &store.nodeCache[1],
|
||||
"cccc": &store.nodeCache[2],
|
||||
}
|
||||
|
||||
entry := beamEntry{pubkeys: []string{"aaaa"}, names: []string{"A"}}
|
||||
|
||||
// Close node should score higher than far node (geo component).
|
||||
scoreClose := store.scoreHop(entry, store.nodeCache[1], 2, graph, nodeByPK, now, 1)
|
||||
scoreFar := store.scoreHop(entry, store.nodeCache[2], 2, graph, nodeByPK, now, 1)
|
||||
if scoreFar >= scoreClose {
|
||||
t.Errorf("expected far node score (%f) < close node score (%f)", scoreFar, scoreClose)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBeamSearch_WidthCap(t *testing.T) {
|
||||
store := &PacketStore{}
|
||||
graph := NewNeighborGraph()
|
||||
graph.builtAt = time.Now()
|
||||
now := time.Now()
|
||||
|
||||
// Create 25 nodes that all match prefix "aa".
|
||||
var nodes []nodeInfo
|
||||
for i := 0; i < 25; i++ {
|
||||
// Each node has pubkey starting with "aa" followed by unique hex.
|
||||
pk := "aa" + strings.Repeat("0", 4) + fmt.Sprintf("%02x", i)
|
||||
nodes = append(nodes, nodeInfo{PublicKey: pk, Name: pk, Role: "repeater"})
|
||||
}
|
||||
pm := buildPrefixMap(nodes)
|
||||
|
||||
// Two hops of "aa" — should produce 25*25=625 combos, pruned to 20.
|
||||
beam := store.beamSearch([]string{"aa", "aa"}, pm, graph, nil, now)
|
||||
if len(beam) > beamWidth {
|
||||
t.Errorf("beam exceeded width: got %d, want <= %d", len(beam), beamWidth)
|
||||
}
|
||||
// Anti-tautology: without beam pruning, we'd have up to 25*min(25,beamWidth)=500 entries.
|
||||
// The test verifies pruning is effective.
|
||||
}
|
||||
|
||||
func TestBeamSearch_Speculative(t *testing.T) {
|
||||
store := &PacketStore{}
|
||||
graph := NewNeighborGraph()
|
||||
graph.builtAt = time.Now()
|
||||
now := time.Now()
|
||||
|
||||
// Create nodes with no edges and multiple candidates — should result in low scores (speculative).
|
||||
nodes := []nodeInfo{
|
||||
{PublicKey: "aabb", Name: "N1", Role: "repeater"},
|
||||
{PublicKey: "aabb22", Name: "N1b", Role: "repeater"},
|
||||
{PublicKey: "ccdd", Name: "N2", Role: "repeater"},
|
||||
{PublicKey: "ccdd22", Name: "N2b", Role: "repeater"},
|
||||
{PublicKey: "ccdd33", Name: "N2c", Role: "repeater"},
|
||||
}
|
||||
pm := buildPrefixMap(nodes)
|
||||
|
||||
beam := store.beamSearch([]string{"aa", "cc"}, pm, graph, nil, now)
|
||||
if len(beam) == 0 {
|
||||
t.Fatal("expected at least one result")
|
||||
}
|
||||
|
||||
// Score should be < 0.7 since there's no edge and multiple candidates (speculative).
|
||||
nHops := len(beam[0].pubkeys)
|
||||
score := 1.0
|
||||
if nHops > 0 {
|
||||
product := beam[0].score
|
||||
score = pow(product, 1.0/float64(nHops))
|
||||
}
|
||||
if score >= speculativeThreshold {
|
||||
t.Errorf("expected speculative score (< %f), got %f", speculativeThreshold, score)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandlePathInspect_EmptyPrefixes(t *testing.T) {
|
||||
srv := newTestServerForInspect(t)
|
||||
body := `{"prefixes":[]}`
|
||||
rr := doInspectRequest(srv, body)
|
||||
if rr.Code != http.StatusBadRequest {
|
||||
t.Errorf("expected 400, got %d", rr.Code)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandlePathInspect_OddLengthPrefix(t *testing.T) {
|
||||
srv := newTestServerForInspect(t)
|
||||
body := `{"prefixes":["abc"]}`
|
||||
rr := doInspectRequest(srv, body)
|
||||
if rr.Code != http.StatusBadRequest {
|
||||
t.Errorf("expected 400 for odd-length prefix, got %d", rr.Code)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandlePathInspect_MixedLengths(t *testing.T) {
|
||||
srv := newTestServerForInspect(t)
|
||||
body := `{"prefixes":["aa","bbcc"]}`
|
||||
rr := doInspectRequest(srv, body)
|
||||
if rr.Code != http.StatusBadRequest {
|
||||
t.Errorf("expected 400 for mixed lengths, got %d", rr.Code)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandlePathInspect_TooLongPrefix(t *testing.T) {
|
||||
srv := newTestServerForInspect(t)
|
||||
body := `{"prefixes":["aabbccdd"]}`
|
||||
rr := doInspectRequest(srv, body)
|
||||
if rr.Code != http.StatusBadRequest {
|
||||
t.Errorf("expected 400 for >3-byte prefix, got %d", rr.Code)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandlePathInspect_TooManyPrefixes(t *testing.T) {
|
||||
srv := newTestServerForInspect(t)
|
||||
prefixes := make([]string, 65)
|
||||
for i := range prefixes {
|
||||
prefixes[i] = "aa"
|
||||
}
|
||||
b, _ := json.Marshal(map[string]interface{}{"prefixes": prefixes})
|
||||
rr := doInspectRequest(srv, string(b))
|
||||
if rr.Code != http.StatusBadRequest {
|
||||
t.Errorf("expected 400 for >64 prefixes, got %d", rr.Code)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandlePathInspect_ValidRequest(t *testing.T) {
|
||||
srv := newTestServerForInspect(t)
|
||||
|
||||
// Seed nodes in the store — multiple candidates per prefix to lower selectivity.
|
||||
srv.store.nodeCache = []nodeInfo{
|
||||
{PublicKey: "aabb1234", Name: "NodeA", Role: "repeater", Lat: 37.0, Lon: -122.0, HasGPS: true},
|
||||
{PublicKey: "aabb5678", Name: "NodeA2", Role: "repeater"},
|
||||
{PublicKey: "ccdd5678", Name: "NodeB", Role: "repeater", Lat: 37.01, Lon: -122.01, HasGPS: true},
|
||||
{PublicKey: "ccdd9999", Name: "NodeB2", Role: "repeater"},
|
||||
{PublicKey: "ccdd1111", Name: "NodeB3", Role: "repeater"},
|
||||
}
|
||||
srv.store.nodePM = buildPrefixMap(srv.store.nodeCache)
|
||||
srv.store.nodeCacheTime = time.Now()
|
||||
srv.store.graph = NewNeighborGraph()
|
||||
srv.store.graph.builtAt = time.Now()
|
||||
|
||||
body := `{"prefixes":["aa","cc"]}`
|
||||
rr := doInspectRequest(srv, body)
|
||||
if rr.Code != http.StatusOK {
|
||||
t.Fatalf("expected 200, got %d: %s", rr.Code, rr.Body.String())
|
||||
}
|
||||
|
||||
var resp pathInspectResponse
|
||||
if err := json.Unmarshal(rr.Body.Bytes(), &resp); err != nil {
|
||||
t.Fatalf("invalid JSON response: %v", err)
|
||||
}
|
||||
if len(resp.Candidates) == 0 {
|
||||
t.Error("expected at least one candidate")
|
||||
}
|
||||
if resp.Candidates[0].Speculative != true {
|
||||
// No edge between nodes, so score should be < 0.7.
|
||||
t.Error("expected speculative=true for no-edge path")
|
||||
}
|
||||
}
|
||||
|
||||
// ─── Helpers ──────────────────────────────────────────────────────────────────
|
||||
|
||||
func newTestServerForInspect(t *testing.T) *Server {
|
||||
t.Helper()
|
||||
store := &PacketStore{
|
||||
inspectCache: make(map[string]*inspectCachedResult),
|
||||
}
|
||||
store.graph = NewNeighborGraph()
|
||||
store.graph.builtAt = time.Now()
|
||||
return &Server{store: store}
|
||||
}
|
||||
|
||||
func doInspectRequest(srv *Server, body string) *httptest.ResponseRecorder {
|
||||
req := httptest.NewRequest("POST", "/api/paths/inspect", bytes.NewBufferString(body))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
rr := httptest.NewRecorder()
|
||||
srv.handlePathInspect(rr, req)
|
||||
return rr
|
||||
}
|
||||
|
||||
func pow(base, exp float64) float64 {
|
||||
return math.Pow(base, exp)
|
||||
}
|
||||
|
||||
// BenchmarkBeamSearch — performance proof for spec §2.5 (<100ms p99 for ≤64 hops).
|
||||
// Anti-tautology: removing beam pruning makes this ~625x slower; timing assertion catches it.
|
||||
func BenchmarkBeamSearch(b *testing.B) {
|
||||
// Setup: 100 nodes, 10-hop prefix input, realistic neighbor graph.
|
||||
// Anti-tautology: removing beam pruning makes this ~625x slower.
|
||||
store := &PacketStore{}
|
||||
pm := &prefixMap{m: make(map[string][]nodeInfo)}
|
||||
graph := NewNeighborGraph()
|
||||
nodes := make([]nodeInfo, 100)
|
||||
|
||||
now := time.Now()
|
||||
for i := 0; i < 100; i++ {
|
||||
pk := fmt.Sprintf("%064x", i)
|
||||
prefix := fmt.Sprintf("%02x", i%256)
|
||||
node := nodeInfo{PublicKey: pk, Name: fmt.Sprintf("Node%d", i), Role: "repeater", Lat: 37.0 + float64(i)*0.01, Lon: -122.0 + float64(i)*0.01}
|
||||
nodes[i] = node
|
||||
pm.m[prefix] = append(pm.m[prefix], node)
|
||||
// Add neighbor edges to create a connected graph.
|
||||
if i > 0 {
|
||||
prevPK := fmt.Sprintf("%064x", i-1)
|
||||
key := makeEdgeKey(prevPK, pk)
|
||||
edge := &NeighborEdge{NodeA: prevPK, NodeB: pk, LastSeen: now, Count: 10}
|
||||
graph.edges[key] = edge
|
||||
graph.byNode[prevPK] = append(graph.byNode[prevPK], edge)
|
||||
graph.byNode[pk] = append(graph.byNode[pk], edge)
|
||||
}
|
||||
}
|
||||
|
||||
// 10-hop input using prefixes that map to multiple candidates.
|
||||
prefixes := make([]string, 10)
|
||||
for i := 0; i < 10; i++ {
|
||||
prefixes[i] = fmt.Sprintf("%02x", (i*3)%256)
|
||||
}
|
||||
|
||||
nodeByPK := make(map[string]*nodeInfo)
|
||||
for idx := range nodes {
|
||||
nodeByPK[nodes[idx].PublicKey] = &nodes[idx]
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
store.beamSearch(prefixes, pm, graph, nodeByPK, now)
|
||||
}
|
||||
}
|
||||
@@ -1,212 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestCanAppearInPath(t *testing.T) {
|
||||
cases := []struct {
|
||||
role string
|
||||
want bool
|
||||
}{
|
||||
{"repeater", true},
|
||||
{"Repeater", true},
|
||||
{"REPEATER", true},
|
||||
{"room_server", true},
|
||||
{"Room_Server", true},
|
||||
{"room", true},
|
||||
{"companion", false},
|
||||
{"sensor", false},
|
||||
{"", false},
|
||||
{"unknown", false},
|
||||
}
|
||||
for _, tc := range cases {
|
||||
if got := canAppearInPath(tc.role); got != tc.want {
|
||||
t.Errorf("canAppearInPath(%q) = %v, want %v", tc.role, got, tc.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildPrefixMap_ExcludesCompanions(t *testing.T) {
|
||||
nodes := []nodeInfo{
|
||||
{PublicKey: "7a1234abcdef", Role: "companion", Name: "MyCompanion"},
|
||||
}
|
||||
pm := buildPrefixMap(nodes)
|
||||
if len(pm.m) != 0 {
|
||||
t.Fatalf("expected empty prefix map, got %d entries", len(pm.m))
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildPrefixMap_ExcludesSensors(t *testing.T) {
|
||||
nodes := []nodeInfo{
|
||||
{PublicKey: "7a1234abcdef", Role: "sensor", Name: "MySensor"},
|
||||
}
|
||||
pm := buildPrefixMap(nodes)
|
||||
if len(pm.m) != 0 {
|
||||
t.Fatalf("expected empty prefix map, got %d entries", len(pm.m))
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveWithContext_NilWhenOnlyCompanionMatchesPrefix(t *testing.T) {
|
||||
nodes := []nodeInfo{
|
||||
{PublicKey: "7a1234abcdef", Role: "companion", Name: "MyCompanion"},
|
||||
}
|
||||
pm := buildPrefixMap(nodes)
|
||||
r, _, _ := pm.resolveWithContext("7a", nil, nil)
|
||||
if r != nil {
|
||||
t.Fatalf("expected nil, got %+v", r)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveWithContext_NilWhenOnlySensorMatchesPrefix(t *testing.T) {
|
||||
nodes := []nodeInfo{
|
||||
{PublicKey: "7a1234abcdef", Role: "sensor", Name: "MySensor"},
|
||||
}
|
||||
pm := buildPrefixMap(nodes)
|
||||
r, _, _ := pm.resolveWithContext("7a", nil, nil)
|
||||
if r != nil {
|
||||
t.Fatalf("expected nil for sensor-only prefix, got %+v", r)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveWithContext_PrefersRepeaterOverCompanionAtSamePrefix(t *testing.T) {
|
||||
nodes := []nodeInfo{
|
||||
{PublicKey: "7a1234abcdef", Role: "companion", Name: "MyCompanion"},
|
||||
{PublicKey: "7a5678901234", Role: "repeater", Name: "MyRepeater"},
|
||||
}
|
||||
pm := buildPrefixMap(nodes)
|
||||
r, _, _ := pm.resolveWithContext("7a", nil, nil)
|
||||
if r == nil {
|
||||
t.Fatal("expected non-nil result")
|
||||
}
|
||||
if r.Name != "MyRepeater" {
|
||||
t.Fatalf("expected MyRepeater, got %s", r.Name)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveWithContext_PrefersRoomServerOverCompanionAtSamePrefix(t *testing.T) {
|
||||
nodes := []nodeInfo{
|
||||
{PublicKey: "ab1234abcdef", Role: "companion", Name: "MyCompanion"},
|
||||
{PublicKey: "ab5678901234", Role: "room_server", Name: "MyRoom"},
|
||||
}
|
||||
pm := buildPrefixMap(nodes)
|
||||
r, _, _ := pm.resolveWithContext("ab", nil, nil)
|
||||
if r == nil {
|
||||
t.Fatal("expected non-nil result")
|
||||
}
|
||||
if r.Name != "MyRoom" {
|
||||
t.Fatalf("expected MyRoom, got %s", r.Name)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolve_NilWhenOnlyCompanionMatchesPrefix(t *testing.T) {
|
||||
nodes := []nodeInfo{
|
||||
{PublicKey: "7a1234abcdef", Role: "companion", Name: "MyCompanion"},
|
||||
}
|
||||
pm := buildPrefixMap(nodes)
|
||||
r := pm.resolve("7a")
|
||||
if r != nil {
|
||||
t.Fatalf("expected nil from resolve() for companion-only prefix, got %+v", r)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolve_NilWhenOnlySensorMatchesPrefix(t *testing.T) {
|
||||
nodes := []nodeInfo{
|
||||
{PublicKey: "7a1234abcdef", Role: "sensor", Name: "MySensor"},
|
||||
}
|
||||
pm := buildPrefixMap(nodes)
|
||||
r := pm.resolve("7a")
|
||||
if r != nil {
|
||||
t.Fatalf("expected nil from resolve() for sensor-only prefix, got %+v", r)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveWithContext_PicksRepeaterEvenWhenCompanionHasGPS(t *testing.T) {
|
||||
// Adversarial: companion has GPS, repeater doesn't. Role filter should
|
||||
// exclude companion entirely, so repeater wins despite lacking GPS.
|
||||
nodes := []nodeInfo{
|
||||
{PublicKey: "7a1234abcdef", Role: "companion", Name: "GPSCompanion", Lat: 37.0, Lon: -122.0, HasGPS: true},
|
||||
{PublicKey: "7a5678901234", Role: "repeater", Name: "NoGPSRepeater", Lat: 0, Lon: 0, HasGPS: false},
|
||||
}
|
||||
pm := buildPrefixMap(nodes)
|
||||
r, _, _ := pm.resolveWithContext("7a", nil, nil)
|
||||
if r == nil {
|
||||
t.Fatal("expected non-nil result")
|
||||
}
|
||||
if r.Name != "NoGPSRepeater" {
|
||||
t.Fatalf("expected NoGPSRepeater (role filter excludes companion), got %s", r.Name)
|
||||
}
|
||||
}
|
||||
|
||||
func TestComputeDistancesForTx_CompanionNeverInResolvedChain(t *testing.T) {
|
||||
// Integration test: a path with a prefix matching both a companion and a
|
||||
// repeater. The resolveHop function (using buildPrefixMap) should only
|
||||
// return the repeater.
|
||||
nodes := []nodeInfo{
|
||||
{PublicKey: "7a1234abcdef", Role: "companion", Name: "BadCompanion", Lat: 37.0, Lon: -122.0, HasGPS: true},
|
||||
{PublicKey: "7a5678901234", Role: "repeater", Name: "GoodRepeater", Lat: 38.0, Lon: -123.0, HasGPS: true},
|
||||
{PublicKey: "bb1111111111", Role: "repeater", Name: "OtherRepeater", Lat: 39.0, Lon: -124.0, HasGPS: true},
|
||||
}
|
||||
pm := buildPrefixMap(nodes)
|
||||
|
||||
nodeByPk := make(map[string]*nodeInfo)
|
||||
for i := range nodes {
|
||||
nodeByPk[nodes[i].PublicKey] = &nodes[i]
|
||||
}
|
||||
repeaterSet := map[string]bool{
|
||||
"7a5678901234": true,
|
||||
"bb1111111111": true,
|
||||
}
|
||||
|
||||
// Build a synthetic StoreTx with a path ["7a", "bb"] and a sender with GPS
|
||||
senderPK := "cc0000000000"
|
||||
sender := nodeInfo{PublicKey: senderPK, Role: "repeater", Name: "Sender", Lat: 36.0, Lon: -121.0, HasGPS: true}
|
||||
nodeByPk[senderPK] = &sender
|
||||
|
||||
pathJSON, _ := json.Marshal([]string{"7a", "bb"})
|
||||
decoded, _ := json.Marshal(map[string]interface{}{"pubKey": senderPK})
|
||||
|
||||
tx := &StoreTx{
|
||||
PathJSON: string(pathJSON),
|
||||
DecodedJSON: string(decoded),
|
||||
FirstSeen: "2026-04-30T12:00",
|
||||
}
|
||||
|
||||
resolveHop := func(hop string) *nodeInfo {
|
||||
return pm.resolve(hop)
|
||||
}
|
||||
|
||||
hops, pathRec := computeDistancesForTx(tx, nodeByPk, repeaterSet, resolveHop)
|
||||
|
||||
// Verify BadCompanion's pubkey never appears in hops
|
||||
badPK := "7a1234abcdef"
|
||||
for i, h := range hops {
|
||||
if h.FromPk == badPK || h.ToPk == badPK {
|
||||
t.Fatalf("hop[%d] contains BadCompanion pubkey: from=%s to=%s", i, h.FromPk, h.ToPk)
|
||||
}
|
||||
}
|
||||
|
||||
// Verify BadCompanion's pubkey never appears in pathRec
|
||||
if pathRec == nil {
|
||||
t.Fatal("expected non-nil path record (3 GPS nodes in chain)")
|
||||
}
|
||||
for i, hop := range pathRec.Hops {
|
||||
if hop.FromPk == badPK || hop.ToPk == badPK {
|
||||
t.Fatalf("pathRec.Hops[%d] contains BadCompanion pubkey: from=%s to=%s", i, hop.FromPk, hop.ToPk)
|
||||
}
|
||||
}
|
||||
|
||||
// Verify GoodRepeater IS in the chain (proves the prefix was resolved to the right node)
|
||||
goodPK := "7a5678901234"
|
||||
foundGood := false
|
||||
for _, hop := range pathRec.Hops {
|
||||
if hop.FromPk == goodPK || hop.ToPk == goodPK {
|
||||
foundGood = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !foundGood {
|
||||
t.Fatal("expected GoodRepeater (7a5678901234) in pathRec.Hops but not found")
|
||||
}
|
||||
}
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
|
||||
func TestResolveWithContext_UniquePrefix(t *testing.T) {
|
||||
pm := buildPrefixMap([]nodeInfo{
|
||||
{Role: "repeater", PublicKey: "a1b2c3d4", Name: "Node-A", HasGPS: true, Lat: 1, Lon: 2},
|
||||
{PublicKey: "a1b2c3d4", Name: "Node-A", HasGPS: true, Lat: 1, Lon: 2},
|
||||
})
|
||||
ni, confidence, _ := pm.resolveWithContext("a1b2c3d4", nil, nil)
|
||||
if ni == nil || ni.Name != "Node-A" {
|
||||
@@ -24,7 +24,7 @@ func TestResolveWithContext_UniquePrefix(t *testing.T) {
|
||||
|
||||
func TestResolveWithContext_NoMatch(t *testing.T) {
|
||||
pm := buildPrefixMap([]nodeInfo{
|
||||
{Role: "repeater", PublicKey: "a1b2c3d4", Name: "Node-A"},
|
||||
{PublicKey: "a1b2c3d4", Name: "Node-A"},
|
||||
})
|
||||
ni, confidence, _ := pm.resolveWithContext("ff", nil, nil)
|
||||
if ni != nil {
|
||||
@@ -37,8 +37,8 @@ func TestResolveWithContext_NoMatch(t *testing.T) {
|
||||
|
||||
func TestResolveWithContext_AffinityWins(t *testing.T) {
|
||||
pm := buildPrefixMap([]nodeInfo{
|
||||
{Role: "repeater", PublicKey: "a1aaaaaa", Name: "Node-A1"},
|
||||
{Role: "repeater", PublicKey: "a1bbbbbb", Name: "Node-A2"},
|
||||
{PublicKey: "a1aaaaaa", Name: "Node-A1"},
|
||||
{PublicKey: "a1bbbbbb", Name: "Node-A2"},
|
||||
})
|
||||
|
||||
graph := NewNeighborGraph()
|
||||
@@ -60,9 +60,9 @@ func TestResolveWithContext_AffinityWins(t *testing.T) {
|
||||
|
||||
func TestResolveWithContext_AffinityTooClose_FallsToGeo(t *testing.T) {
|
||||
pm := buildPrefixMap([]nodeInfo{
|
||||
{Role: "repeater", PublicKey: "a1aaaaaa", Name: "Node-A1", HasGPS: true, Lat: 10, Lon: 20},
|
||||
{Role: "repeater", PublicKey: "a1bbbbbb", Name: "Node-A2", HasGPS: true, Lat: 11, Lon: 21},
|
||||
{Role: "repeater", PublicKey: "c0c0c0c0", Name: "Ctx", HasGPS: true, Lat: 10.1, Lon: 20.1},
|
||||
{PublicKey: "a1aaaaaa", Name: "Node-A1", HasGPS: true, Lat: 10, Lon: 20},
|
||||
{PublicKey: "a1bbbbbb", Name: "Node-A2", HasGPS: true, Lat: 11, Lon: 21},
|
||||
{PublicKey: "c0c0c0c0", Name: "Ctx", HasGPS: true, Lat: 10.1, Lon: 20.1},
|
||||
})
|
||||
|
||||
graph := NewNeighborGraph()
|
||||
@@ -85,8 +85,8 @@ func TestResolveWithContext_AffinityTooClose_FallsToGeo(t *testing.T) {
|
||||
|
||||
func TestResolveWithContext_GPSPreference(t *testing.T) {
|
||||
pm := buildPrefixMap([]nodeInfo{
|
||||
{Role: "repeater", PublicKey: "a1aaaaaa", Name: "NoGPS"},
|
||||
{Role: "repeater", PublicKey: "a1bbbbbb", Name: "HasGPS", HasGPS: true, Lat: 1, Lon: 2},
|
||||
{PublicKey: "a1aaaaaa", Name: "NoGPS"},
|
||||
{PublicKey: "a1bbbbbb", Name: "HasGPS", HasGPS: true, Lat: 1, Lon: 2},
|
||||
})
|
||||
|
||||
ni, confidence, _ := pm.resolveWithContext("a1", nil, nil)
|
||||
@@ -100,8 +100,8 @@ func TestResolveWithContext_GPSPreference(t *testing.T) {
|
||||
|
||||
func TestResolveWithContext_FirstMatchFallback(t *testing.T) {
|
||||
pm := buildPrefixMap([]nodeInfo{
|
||||
{Role: "repeater", PublicKey: "a1aaaaaa", Name: "First"},
|
||||
{Role: "repeater", PublicKey: "a1bbbbbb", Name: "Second"},
|
||||
{PublicKey: "a1aaaaaa", Name: "First"},
|
||||
{PublicKey: "a1bbbbbb", Name: "Second"},
|
||||
})
|
||||
|
||||
ni, confidence, _ := pm.resolveWithContext("a1", nil, nil)
|
||||
@@ -115,8 +115,8 @@ func TestResolveWithContext_FirstMatchFallback(t *testing.T) {
|
||||
|
||||
func TestResolveWithContext_NilGraphFallsToGPS(t *testing.T) {
|
||||
pm := buildPrefixMap([]nodeInfo{
|
||||
{Role: "repeater", PublicKey: "a1aaaaaa", Name: "NoGPS"},
|
||||
{Role: "repeater", PublicKey: "a1bbbbbb", Name: "HasGPS", HasGPS: true, Lat: 1, Lon: 2},
|
||||
{PublicKey: "a1aaaaaa", Name: "NoGPS"},
|
||||
{PublicKey: "a1bbbbbb", Name: "HasGPS", HasGPS: true, Lat: 1, Lon: 2},
|
||||
})
|
||||
|
||||
ni, confidence, _ := pm.resolveWithContext("a1", []string{"someone"}, nil)
|
||||
@@ -131,8 +131,8 @@ func TestResolveWithContext_NilGraphFallsToGPS(t *testing.T) {
|
||||
func TestResolveWithContext_BackwardCompatResolve(t *testing.T) {
|
||||
// Verify original resolve() still works unchanged
|
||||
pm := buildPrefixMap([]nodeInfo{
|
||||
{Role: "repeater", PublicKey: "a1aaaaaa", Name: "NoGPS"},
|
||||
{Role: "repeater", PublicKey: "a1bbbbbb", Name: "HasGPS", HasGPS: true, Lat: 1, Lon: 2},
|
||||
{PublicKey: "a1aaaaaa", Name: "NoGPS"},
|
||||
{PublicKey: "a1bbbbbb", Name: "HasGPS", HasGPS: true, Lat: 1, Lon: 2},
|
||||
})
|
||||
ni := pm.resolve("a1")
|
||||
if ni == nil || ni.Name != "HasGPS" {
|
||||
@@ -164,8 +164,8 @@ func TestResolveHopsAPI_UniquePrefix(t *testing.T) {
|
||||
_ = srv
|
||||
|
||||
// Insert a unique node
|
||||
srv.db.conn.Exec("INSERT OR IGNORE INTO nodes (public_key, name, lat, lon, role) VALUES (?, ?, ?, ?, ?)",
|
||||
"ff11223344", "UniqueNode", 37.0, -122.0, "repeater")
|
||||
srv.db.conn.Exec("INSERT OR IGNORE INTO nodes (public_key, name, lat, lon) VALUES (?, ?, ?, ?)",
|
||||
"ff11223344", "UniqueNode", 37.0, -122.0)
|
||||
srv.store.InvalidateNodeCache()
|
||||
|
||||
req := httptest.NewRequest("GET", "/api/resolve-hops?hops=ff11223344", nil)
|
||||
@@ -189,10 +189,10 @@ func TestResolveHopsAPI_UniquePrefix(t *testing.T) {
|
||||
func TestResolveHopsAPI_AmbiguousNoContext(t *testing.T) {
|
||||
srv, router := setupTestServer(t)
|
||||
|
||||
srv.db.conn.Exec("INSERT OR IGNORE INTO nodes (public_key, name, lat, lon, role) VALUES (?, ?, ?, ?, ?)",
|
||||
"ee1aaaaaaa", "Node-E1", 37.0, -122.0, "repeater")
|
||||
srv.db.conn.Exec("INSERT OR IGNORE INTO nodes (public_key, name, lat, lon, role) VALUES (?, ?, ?, ?, ?)",
|
||||
"ee1bbbbbbb", "Node-E2", 38.0, -121.0, "repeater")
|
||||
srv.db.conn.Exec("INSERT OR IGNORE INTO nodes (public_key, name, lat, lon) VALUES (?, ?, ?, ?)",
|
||||
"ee1aaaaaaa", "Node-E1", 37.0, -122.0)
|
||||
srv.db.conn.Exec("INSERT OR IGNORE INTO nodes (public_key, name, lat, lon) VALUES (?, ?, ?, ?)",
|
||||
"ee1bbbbbbb", "Node-E2", 38.0, -121.0)
|
||||
srv.store.InvalidateNodeCache()
|
||||
|
||||
req := httptest.NewRequest("GET", "/api/resolve-hops?hops=ee1", nil)
|
||||
@@ -224,12 +224,12 @@ func TestResolveHopsAPI_AmbiguousNoContext(t *testing.T) {
|
||||
func TestResolveHopsAPI_WithAffinityContext(t *testing.T) {
|
||||
srv, router := setupTestServer(t)
|
||||
|
||||
srv.db.conn.Exec("INSERT OR IGNORE INTO nodes (public_key, name, lat, lon, role) VALUES (?, ?, ?, ?, ?)",
|
||||
"dd1aaaaaaa", "Node-D1", 37.0, -122.0, "repeater")
|
||||
srv.db.conn.Exec("INSERT OR IGNORE INTO nodes (public_key, name, lat, lon, role) VALUES (?, ?, ?, ?, ?)",
|
||||
"dd1bbbbbbb", "Node-D2", 38.0, -121.0, "repeater")
|
||||
srv.db.conn.Exec("INSERT OR IGNORE INTO nodes (public_key, name, lat, lon, role) VALUES (?, ?, ?, ?, ?)",
|
||||
"c0c0c0c0c0", "Context", 37.1, -122.1, "repeater")
|
||||
srv.db.conn.Exec("INSERT OR IGNORE INTO nodes (public_key, name, lat, lon) VALUES (?, ?, ?, ?)",
|
||||
"dd1aaaaaaa", "Node-D1", 37.0, -122.0)
|
||||
srv.db.conn.Exec("INSERT OR IGNORE INTO nodes (public_key, name, lat, lon) VALUES (?, ?, ?, ?)",
|
||||
"dd1bbbbbbb", "Node-D2", 38.0, -121.0)
|
||||
srv.db.conn.Exec("INSERT OR IGNORE INTO nodes (public_key, name, lat, lon) VALUES (?, ?, ?, ?)",
|
||||
"c0c0c0c0c0", "Context", 37.1, -122.1)
|
||||
|
||||
// Invalidate node cache so the PM includes newly inserted nodes.
|
||||
srv.store.cacheMu.Lock()
|
||||
@@ -279,8 +279,8 @@ func TestResolveHopsAPI_WithAffinityContext(t *testing.T) {
|
||||
func TestResolveHopsAPI_ResponseShape(t *testing.T) {
|
||||
srv, router := setupTestServer(t)
|
||||
|
||||
srv.db.conn.Exec("INSERT OR IGNORE INTO nodes (public_key, name, lat, lon, role) VALUES (?, ?, ?, ?, ?)",
|
||||
"bb1aaaaaaa", "Node-B1", 37.0, -122.0, "repeater")
|
||||
srv.db.conn.Exec("INSERT OR IGNORE INTO nodes (public_key, name, lat, lon) VALUES (?, ?, ?, ?)",
|
||||
"bb1aaaaaaa", "Node-B1", 37.0, -122.0)
|
||||
|
||||
req := httptest.NewRequest("GET", "/api/resolve-hops?hops=bb1a", nil)
|
||||
rr := httptest.NewRecorder()
|
||||
|
||||
@@ -1,475 +0,0 @@
|
||||
package main
|
||||
|
||||
// Lock ordering contract (MUST be followed everywhere):
|
||||
//
|
||||
// s.mu → s.lruMu (s.mu is the outer lock, lruMu is the inner lock)
|
||||
//
|
||||
// • Never acquire s.lruMu while holding s.mu.
|
||||
// • fetchResolvedPathForObs takes lruMu independently — callers under s.mu
|
||||
// must NOT call it directly; instead collect IDs under s.mu, release, then
|
||||
// do LRU ops under lruMu separately.
|
||||
// • The backfill path (backfillResolvedPathsAsync) follows this by collecting
|
||||
// obsIDs to invalidate under s.mu, releasing it, then taking lruMu.
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"hash/fnv"
|
||||
"log"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// resolvedPubkeyHash computes a fast 64-bit hash for membership index keying.
|
||||
// Uses FNV-1a from stdlib — good distribution, no external dependency.
|
||||
func resolvedPubkeyHash(pk string) uint64 {
|
||||
h := fnv.New64a()
|
||||
h.Write([]byte(strings.ToLower(pk)))
|
||||
return h.Sum64()
|
||||
}
|
||||
|
||||
// addToResolvedPubkeyIndex adds a txID under each resolved pubkey hash.
|
||||
// Deduplicates both within a single call AND across calls — won't add the
|
||||
// same (hash, txID) pair twice even when called multiple times for the same tx.
|
||||
// Must be called under s.mu write lock.
|
||||
func (s *PacketStore) addToResolvedPubkeyIndex(txID int, resolvedPubkeys []string) {
|
||||
if !s.useResolvedPathIndex {
|
||||
return
|
||||
}
|
||||
seen := make(map[uint64]bool, len(resolvedPubkeys))
|
||||
for _, pk := range resolvedPubkeys {
|
||||
if pk == "" {
|
||||
continue
|
||||
}
|
||||
h := resolvedPubkeyHash(pk)
|
||||
if seen[h] {
|
||||
continue
|
||||
}
|
||||
seen[h] = true
|
||||
|
||||
// Cross-call dedup: check if (h, txID) already exists in forward index.
|
||||
existing := s.resolvedPubkeyIndex[h]
|
||||
alreadyPresent := false
|
||||
for _, id := range existing {
|
||||
if id == txID {
|
||||
alreadyPresent = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if alreadyPresent {
|
||||
continue
|
||||
}
|
||||
|
||||
s.resolvedPubkeyIndex[h] = append(existing, txID)
|
||||
s.resolvedPubkeyReverse[txID] = append(s.resolvedPubkeyReverse[txID], h)
|
||||
}
|
||||
}
|
||||
|
||||
// removeFromResolvedPubkeyIndex removes all index entries for a txID using the reverse map.
|
||||
// Must be called under s.mu write lock.
|
||||
func (s *PacketStore) removeFromResolvedPubkeyIndex(txID int) {
|
||||
if !s.useResolvedPathIndex {
|
||||
return
|
||||
}
|
||||
hashes := s.resolvedPubkeyReverse[txID]
|
||||
for _, h := range hashes {
|
||||
list := s.resolvedPubkeyIndex[h]
|
||||
// Remove ALL occurrences of txID (not just the first) to prevent orphans.
|
||||
filtered := list[:0]
|
||||
for _, id := range list {
|
||||
if id != txID {
|
||||
filtered = append(filtered, id)
|
||||
}
|
||||
}
|
||||
if len(filtered) == 0 {
|
||||
delete(s.resolvedPubkeyIndex, h)
|
||||
} else {
|
||||
s.resolvedPubkeyIndex[h] = filtered
|
||||
}
|
||||
}
|
||||
delete(s.resolvedPubkeyReverse, txID)
|
||||
}
|
||||
|
||||
// extractResolvedPubkeys extracts all non-nil, non-empty pubkeys from a resolved path.
|
||||
func extractResolvedPubkeys(rp []*string) []string {
|
||||
if len(rp) == 0 {
|
||||
return nil
|
||||
}
|
||||
result := make([]string, 0, len(rp))
|
||||
for _, p := range rp {
|
||||
if p != nil && *p != "" {
|
||||
result = append(result, *p)
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// mergeResolvedPubkeys collects unique non-empty pubkeys from multiple resolved paths.
|
||||
func mergeResolvedPubkeys(paths ...[]*string) []string {
|
||||
seen := make(map[string]bool)
|
||||
var result []string
|
||||
for _, rp := range paths {
|
||||
for _, p := range rp {
|
||||
if p != nil && *p != "" && !seen[*p] {
|
||||
seen[*p] = true
|
||||
result = append(result, *p)
|
||||
}
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// nodeInResolvedPathViaIndex checks whether a transmission is associated with
|
||||
// a target pubkey using the membership index + collision-safety SQL check.
|
||||
// Must be called under s.mu RLock at minimum.
|
||||
func (s *PacketStore) nodeInResolvedPathViaIndex(tx *StoreTx, targetPK string) bool {
|
||||
if !s.useResolvedPathIndex {
|
||||
// Flag off: can't disambiguate, keep candidate (conservative)
|
||||
return true
|
||||
}
|
||||
|
||||
// If this tx has no indexed pubkeys at all, we can't disambiguate —
|
||||
// keep the candidate (same as old behavior for NULL resolved_path).
|
||||
if _, hasReverse := s.resolvedPubkeyReverse[tx.ID]; !hasReverse {
|
||||
return true
|
||||
}
|
||||
|
||||
h := resolvedPubkeyHash(targetPK)
|
||||
txIDs := s.resolvedPubkeyIndex[h]
|
||||
|
||||
// Check if this tx's ID is in the candidate list
|
||||
for _, id := range txIDs {
|
||||
if id == tx.ID {
|
||||
// Found in index. Collision-safety: verify with SQL.
|
||||
if s.db != nil && s.db.conn != nil {
|
||||
return s.confirmResolvedPathContains(tx.ID, targetPK)
|
||||
}
|
||||
return true // no DB, trust the index
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// confirmResolvedPathContains verifies an exact pubkey match in resolved_path
|
||||
// via SQL. This is the collision-safety fallback for the membership index.
|
||||
func (s *PacketStore) confirmResolvedPathContains(txID int, pubkey string) bool {
|
||||
if s.db == nil || s.db.conn == nil {
|
||||
return true
|
||||
}
|
||||
// Use INSTR with surrounding quotes for exact match — avoids LIKE escape issues.
|
||||
// resolved_path format: ["pubkey1","pubkey2",...]
|
||||
needle := `"` + strings.ToLower(pubkey) + `"`
|
||||
var count int
|
||||
err := s.db.conn.QueryRow(
|
||||
`SELECT COUNT(*) FROM observations WHERE transmission_id = ? AND INSTR(LOWER(resolved_path), ?) > 0`,
|
||||
txID, needle,
|
||||
).Scan(&count)
|
||||
if err != nil {
|
||||
return true // on error, keep the candidate
|
||||
}
|
||||
return count > 0
|
||||
}
|
||||
|
||||
// fetchResolvedPathsForTx fetches resolved_path from SQLite for all observations
|
||||
// of a transmission. Used for on-demand API responses and eviction cleanup.
|
||||
func (s *PacketStore) fetchResolvedPathsForTx(txID int) map[int][]*string {
|
||||
if s.db == nil || s.db.conn == nil {
|
||||
return nil
|
||||
}
|
||||
rows, err := s.db.conn.Query(
|
||||
`SELECT id, resolved_path FROM observations WHERE transmission_id = ? AND resolved_path IS NOT NULL`,
|
||||
txID,
|
||||
)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
result := make(map[int][]*string)
|
||||
for rows.Next() {
|
||||
var obsID int
|
||||
var rpJSON sql.NullString
|
||||
if err := rows.Scan(&obsID, &rpJSON); err != nil {
|
||||
continue
|
||||
}
|
||||
if rpJSON.Valid && rpJSON.String != "" {
|
||||
result[obsID] = unmarshalResolvedPath(rpJSON.String)
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// fetchResolvedPathForObs fetches resolved_path for a single observation,
|
||||
// using the LRU cache.
|
||||
func (s *PacketStore) fetchResolvedPathForObs(obsID int) []*string {
|
||||
if s.db == nil || s.db.conn == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check LRU cache first
|
||||
s.lruMu.RLock()
|
||||
if s.apiResolvedPathLRU != nil {
|
||||
if entry, ok := s.apiResolvedPathLRU[obsID]; ok {
|
||||
s.lruMu.RUnlock()
|
||||
return entry
|
||||
}
|
||||
}
|
||||
s.lruMu.RUnlock()
|
||||
|
||||
var rpJSON sql.NullString
|
||||
err := s.db.conn.QueryRow(
|
||||
`SELECT resolved_path FROM observations WHERE id = ?`, obsID,
|
||||
).Scan(&rpJSON)
|
||||
if err != nil || !rpJSON.Valid {
|
||||
return nil
|
||||
}
|
||||
rp := unmarshalResolvedPath(rpJSON.String)
|
||||
|
||||
// Store in LRU
|
||||
s.lruMu.Lock()
|
||||
s.lruPut(obsID, rp)
|
||||
s.lruMu.Unlock()
|
||||
|
||||
return rp
|
||||
}
|
||||
|
||||
// fetchResolvedPathForTxBest returns the best observation's resolved_path for a tx.
|
||||
//
|
||||
// "Best" = the longest path_json among observations that actually have a stored
|
||||
// resolved_path. Earlier versions picked the longest-path obs unconditionally
|
||||
// and queried SQL for that single ID — if the longest-path obs had NULL
|
||||
// resolved_path while a shorter sibling had one, the call returned nil and
|
||||
// callers (e.g. /api/nodes/{pk}/health.recentPackets) lost the field. Fixes
|
||||
// #810 by checking all observations and falling back to the longest sibling
|
||||
// that has a stored path.
|
||||
func (s *PacketStore) fetchResolvedPathForTxBest(tx *StoreTx) []*string {
|
||||
if tx == nil || len(tx.Observations) == 0 {
|
||||
return nil
|
||||
}
|
||||
// Fast path: try the longest-path obs first via the LRU/SQL helper.
|
||||
longest := tx.Observations[0]
|
||||
longestLen := pathLen(longest.PathJSON)
|
||||
for _, obs := range tx.Observations[1:] {
|
||||
if l := pathLen(obs.PathJSON); l > longestLen {
|
||||
longest = obs
|
||||
longestLen = l
|
||||
}
|
||||
}
|
||||
if rp := s.fetchResolvedPathForObs(longest.ID); rp != nil {
|
||||
return rp
|
||||
}
|
||||
// Fallback: longest-path obs has no stored resolved_path. Query all
|
||||
// observations for this tx and pick the one with the longest path_json
|
||||
// that actually has a stored resolved_path.
|
||||
rpMap := s.fetchResolvedPathsForTx(tx.ID)
|
||||
if len(rpMap) == 0 {
|
||||
return nil
|
||||
}
|
||||
var bestRP []*string
|
||||
bestObsID := 0
|
||||
bestLen := -1
|
||||
for _, obs := range tx.Observations {
|
||||
rp, ok := rpMap[obs.ID]
|
||||
if !ok || rp == nil {
|
||||
continue
|
||||
}
|
||||
if l := pathLen(obs.PathJSON); l > bestLen {
|
||||
bestLen = l
|
||||
bestRP = rp
|
||||
bestObsID = obs.ID
|
||||
}
|
||||
}
|
||||
// Populate LRU so repeat lookups for this tx don't re-issue the multi-row
|
||||
// SQL fallback (e.g. dashboard polling /api/nodes/{pk}/health).
|
||||
if bestRP != nil && bestObsID != 0 {
|
||||
s.lruMu.Lock()
|
||||
s.lruPut(bestObsID, bestRP)
|
||||
s.lruMu.Unlock()
|
||||
}
|
||||
return bestRP
|
||||
}
|
||||
|
||||
// --- Simple LRU cache for resolved paths ---
|
||||
|
||||
const lruMaxSize = 10000
|
||||
|
||||
// lruPut adds an entry. Must be called under s.lruMu write lock.
|
||||
func (s *PacketStore) lruPut(obsID int, rp []*string) {
|
||||
if s.apiResolvedPathLRU == nil {
|
||||
return
|
||||
}
|
||||
if _, exists := s.apiResolvedPathLRU[obsID]; exists {
|
||||
return
|
||||
}
|
||||
// Compact lruOrder if stale entries exceed 50% of capacity.
|
||||
// This prevents effective capacity degradation after bulk deletions.
|
||||
if len(s.lruOrder) >= lruMaxSize && len(s.apiResolvedPathLRU) < lruMaxSize/2 {
|
||||
compacted := make([]int, 0, len(s.apiResolvedPathLRU))
|
||||
for _, id := range s.lruOrder {
|
||||
if _, ok := s.apiResolvedPathLRU[id]; ok {
|
||||
compacted = append(compacted, id)
|
||||
}
|
||||
}
|
||||
s.lruOrder = compacted
|
||||
}
|
||||
if len(s.lruOrder) >= lruMaxSize {
|
||||
// Evict oldest, skipping stale entries
|
||||
for len(s.lruOrder) > 0 {
|
||||
evictID := s.lruOrder[0]
|
||||
s.lruOrder = s.lruOrder[1:]
|
||||
if _, ok := s.apiResolvedPathLRU[evictID]; ok {
|
||||
delete(s.apiResolvedPathLRU, evictID)
|
||||
break
|
||||
}
|
||||
// stale entry — skip and continue
|
||||
}
|
||||
}
|
||||
s.apiResolvedPathLRU[obsID] = rp
|
||||
s.lruOrder = append(s.lruOrder, obsID)
|
||||
}
|
||||
|
||||
// lruDelete removes an entry. Must be called under s.lruMu write lock.
|
||||
func (s *PacketStore) lruDelete(obsID int) {
|
||||
if s.apiResolvedPathLRU == nil {
|
||||
return
|
||||
}
|
||||
delete(s.apiResolvedPathLRU, obsID)
|
||||
// Don't scan lruOrder — eviction handles stale entries naturally.
|
||||
}
|
||||
|
||||
// resolvedPubkeysForEvictionBatch fetches resolved pubkeys for multiple txIDs
|
||||
// from SQL in a single batched query. Returns a map from txID to unique pubkeys.
|
||||
// MUST be called WITHOUT holding s.mu — this is the whole point of the batch approach.
|
||||
// Chunks queries to stay under SQLite's 500-parameter limit.
|
||||
func (s *PacketStore) resolvedPubkeysForEvictionBatch(txIDs []int) map[int][]string {
|
||||
result := make(map[int][]string, len(txIDs))
|
||||
if len(txIDs) == 0 || s.db == nil || s.db.conn == nil {
|
||||
return result
|
||||
}
|
||||
|
||||
const chunkSize = 499 // SQLite SQLITE_MAX_VARIABLE_NUMBER default is 999; stay well under
|
||||
for start := 0; start < len(txIDs); start += chunkSize {
|
||||
end := start + chunkSize
|
||||
if end > len(txIDs) {
|
||||
end = len(txIDs)
|
||||
}
|
||||
chunk := txIDs[start:end]
|
||||
|
||||
// Build query with placeholders
|
||||
placeholders := make([]byte, 0, len(chunk)*2)
|
||||
args := make([]interface{}, len(chunk))
|
||||
for i, id := range chunk {
|
||||
if i > 0 {
|
||||
placeholders = append(placeholders, ',')
|
||||
}
|
||||
placeholders = append(placeholders, '?')
|
||||
args[i] = id
|
||||
}
|
||||
|
||||
query := "SELECT transmission_id, resolved_path FROM observations WHERE transmission_id IN (" +
|
||||
string(placeholders) + ") AND resolved_path IS NOT NULL"
|
||||
|
||||
rows, err := s.db.conn.Query(query, args...)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
for rows.Next() {
|
||||
var txID int
|
||||
var rpJSON sql.NullString
|
||||
if err := rows.Scan(&txID, &rpJSON); err != nil {
|
||||
continue
|
||||
}
|
||||
if !rpJSON.Valid || rpJSON.String == "" {
|
||||
continue
|
||||
}
|
||||
rp := unmarshalResolvedPath(rpJSON.String)
|
||||
for _, p := range rp {
|
||||
if p != nil && *p != "" {
|
||||
result[txID] = append(result[txID], *p)
|
||||
}
|
||||
}
|
||||
}
|
||||
rows.Close()
|
||||
}
|
||||
|
||||
// Deduplicate per-txID
|
||||
for txID, pks := range result {
|
||||
seen := make(map[string]bool, len(pks))
|
||||
deduped := pks[:0]
|
||||
for _, pk := range pks {
|
||||
if !seen[pk] {
|
||||
seen[pk] = true
|
||||
deduped = append(deduped, pk)
|
||||
}
|
||||
}
|
||||
result[txID] = deduped
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// initResolvedPathIndex initializes the resolved path index data structures.
|
||||
func (s *PacketStore) initResolvedPathIndex() {
|
||||
s.resolvedPubkeyIndex = make(map[uint64][]int, 4096)
|
||||
s.resolvedPubkeyReverse = make(map[int][]uint64, 4096)
|
||||
s.apiResolvedPathLRU = make(map[int][]*string, lruMaxSize)
|
||||
s.lruOrder = make([]int, 0, lruMaxSize)
|
||||
}
|
||||
|
||||
// CompactResolvedPubkeyIndex reclaims memory from the resolved pubkey index maps
|
||||
// after eviction. It removes empty forward-index entries (shouldn't exist if
|
||||
// removeFromResolvedPubkeyIndex is correct, but defense in depth) and clips
|
||||
// oversized slice backing arrays where cap > 2*len.
|
||||
// Must be called under s.mu write lock.
|
||||
func (s *PacketStore) CompactResolvedPubkeyIndex() {
|
||||
if !s.useResolvedPathIndex {
|
||||
return
|
||||
}
|
||||
for h, ids := range s.resolvedPubkeyIndex {
|
||||
if len(ids) == 0 {
|
||||
delete(s.resolvedPubkeyIndex, h)
|
||||
continue
|
||||
}
|
||||
// Clip oversized backing arrays: if cap > 2*len, reallocate.
|
||||
if cap(ids) > 2*len(ids)+8 {
|
||||
clipped := make([]int, len(ids))
|
||||
copy(clipped, ids)
|
||||
s.resolvedPubkeyIndex[h] = clipped
|
||||
}
|
||||
}
|
||||
for txID, hashes := range s.resolvedPubkeyReverse {
|
||||
if len(hashes) == 0 {
|
||||
delete(s.resolvedPubkeyReverse, txID)
|
||||
continue
|
||||
}
|
||||
if cap(hashes) > 2*len(hashes)+8 {
|
||||
clipped := make([]uint64, len(hashes))
|
||||
copy(clipped, hashes)
|
||||
s.resolvedPubkeyReverse[txID] = clipped
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// defaultMaxResolvedPubkeyIndexEntries is the default hard cap for the forward
|
||||
// index. When exceeded, a warning is logged. No auto-eviction — that's the
|
||||
// eviction ticker's job.
|
||||
const defaultMaxResolvedPubkeyIndexEntries = 5_000_000
|
||||
|
||||
// CheckResolvedPubkeyIndexSize logs a warning if the resolved pubkey forward
|
||||
// index exceeds the configured maximum entries. Must be called under s.mu
|
||||
// read lock at minimum.
|
||||
func (s *PacketStore) CheckResolvedPubkeyIndexSize() {
|
||||
if !s.useResolvedPathIndex {
|
||||
return
|
||||
}
|
||||
maxEntries := s.maxResolvedPubkeyIndexEntries
|
||||
if maxEntries <= 0 {
|
||||
maxEntries = defaultMaxResolvedPubkeyIndexEntries
|
||||
}
|
||||
fwdLen := len(s.resolvedPubkeyIndex)
|
||||
revLen := len(s.resolvedPubkeyReverse)
|
||||
if fwdLen > maxEntries || revLen > maxEntries {
|
||||
log.Printf("[store] WARNING: resolvedPubkeyIndex size exceeds limit — forward=%d reverse=%d limit=%d",
|
||||
fwdLen, revLen, maxEntries)
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
+33
-417
@@ -16,7 +16,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/meshcore-analyzer/packetpath"
|
||||
)
|
||||
|
||||
// Server holds shared state for route handlers.
|
||||
@@ -125,7 +124,6 @@ func (s *Server) RegisterRoutes(r *mux.Router) {
|
||||
r.Handle("/api/perf/reset", s.requireAPIKey(http.HandlerFunc(s.handlePerfReset))).Methods("POST")
|
||||
r.Handle("/api/admin/prune", s.requireAPIKey(http.HandlerFunc(s.handleAdminPrune))).Methods("POST")
|
||||
r.Handle("/api/debug/affinity", s.requireAPIKey(http.HandlerFunc(s.handleDebugAffinity))).Methods("GET")
|
||||
r.Handle("/api/dropped-packets", s.requireAPIKey(http.HandlerFunc(s.handleDroppedPackets))).Methods("GET")
|
||||
|
||||
// Packet endpoints
|
||||
r.HandleFunc("/api/packets/observations", s.handleBatchObservations).Methods("POST")
|
||||
@@ -144,9 +142,6 @@ func (s *Server) RegisterRoutes(r *mux.Router) {
|
||||
r.HandleFunc("/api/nodes/{pubkey}/health", s.handleNodeHealth).Methods("GET")
|
||||
r.HandleFunc("/api/nodes/{pubkey}/paths", s.handleNodePaths).Methods("GET")
|
||||
r.HandleFunc("/api/nodes/{pubkey}/analytics", s.handleNodeAnalytics).Methods("GET")
|
||||
r.HandleFunc("/api/nodes/clock-skew", s.handleFleetClockSkew).Methods("GET")
|
||||
r.HandleFunc("/api/nodes/{pubkey}/clock-skew", s.handleNodeClockSkew).Methods("GET")
|
||||
r.HandleFunc("/api/observers/clock-skew", s.handleObserverClockSkew).Methods("GET")
|
||||
r.HandleFunc("/api/nodes/{pubkey}/neighbors", s.handleNodeNeighbors).Methods("GET")
|
||||
r.HandleFunc("/api/nodes/{pubkey}", s.handleNodeDetail).Methods("GET")
|
||||
r.HandleFunc("/api/nodes", s.handleNodes).Methods("GET")
|
||||
@@ -173,7 +168,6 @@ func (s *Server) RegisterRoutes(r *mux.Router) {
|
||||
r.HandleFunc("/api/observers/{id}", s.handleObserverDetail).Methods("GET")
|
||||
r.HandleFunc("/api/observers", s.handleObservers).Methods("GET")
|
||||
r.HandleFunc("/api/traces/{hash}", s.handleTraces).Methods("GET")
|
||||
r.HandleFunc("/api/paths/inspect", s.handlePathInspect).Methods("POST")
|
||||
r.HandleFunc("/api/iata-coords", s.handleIATACoords).Methods("GET")
|
||||
r.HandleFunc("/api/audio-lab/buckets", s.handleAudioLabBuckets).Methods("GET")
|
||||
|
||||
@@ -452,12 +446,10 @@ func (s *Server) handleHealth(w http.ResponseWriter, r *http.Request) {
|
||||
// Real packet store stats
|
||||
pktCount := 0
|
||||
var pktEstMB float64
|
||||
var pktTrackedMB float64
|
||||
if s.store != nil {
|
||||
ps := s.store.GetPerfStoreStatsTyped()
|
||||
pktCount = ps.TotalLoaded
|
||||
pktEstMB = ps.EstimatedMB
|
||||
pktTrackedMB = ps.TrackedMB
|
||||
}
|
||||
|
||||
// Real cache stats
|
||||
@@ -523,7 +515,6 @@ func (s *Server) handleHealth(w http.ResponseWriter, r *http.Request) {
|
||||
PacketStore: HealthPacketStoreStats{
|
||||
Packets: pktCount,
|
||||
EstimatedMB: pktEstMB,
|
||||
TrackedMB: pktTrackedMB,
|
||||
},
|
||||
Perf: HealthPerfStats{
|
||||
TotalRequests: int(perfRequests),
|
||||
@@ -571,16 +562,6 @@ func (s *Server) handleStats(w http.ResponseWriter, r *http.Request) {
|
||||
backfillProgress = 1
|
||||
}
|
||||
|
||||
// Memory accounting (#832). storeDataMB is the in-store packet byte
|
||||
// estimate (the old "trackedMB"); processRSSMB / goHeapInuseMB / goSysMB
|
||||
// give ops the breakdown needed to reason about real RSS. All values
|
||||
// share a single 1s-cached snapshot to amortize ReadMemStats cost.
|
||||
var storeDataMB float64
|
||||
if s.store != nil {
|
||||
storeDataMB = s.store.trackedMemoryMB()
|
||||
}
|
||||
mem := s.getMemorySnapshot(storeDataMB)
|
||||
|
||||
resp := &StatsResponse{
|
||||
TotalPackets: stats.TotalPackets,
|
||||
TotalTransmissions: &stats.TotalTransmissions,
|
||||
@@ -600,16 +581,8 @@ func (s *Server) handleStats(w http.ResponseWriter, r *http.Request) {
|
||||
Companions: counts["companions"],
|
||||
Sensors: counts["sensors"],
|
||||
},
|
||||
Backfilling: backfilling,
|
||||
BackfillProgress: backfillProgress,
|
||||
SignatureDrops: s.db.GetSignatureDropCount(),
|
||||
HashMigrationComplete: s.store != nil && s.store.hashMigrationComplete.Load(),
|
||||
|
||||
TrackedMB: mem.StoreDataMB, // deprecated alias
|
||||
StoreDataMB: mem.StoreDataMB,
|
||||
ProcessRSSMB: mem.ProcessRSSMB,
|
||||
GoHeapInuseMB: mem.GoHeapInuseMB,
|
||||
GoSysMB: mem.GoSysMB,
|
||||
Backfilling: backfilling,
|
||||
BackfillProgress: backfillProgress,
|
||||
}
|
||||
|
||||
s.statsMu.Lock()
|
||||
@@ -792,7 +765,6 @@ func (s *Server) handlePackets(w http.ResponseWriter, r *http.Request) {
|
||||
Until: r.URL.Query().Get("until"),
|
||||
Region: r.URL.Query().Get("region"),
|
||||
Node: r.URL.Query().Get("node"),
|
||||
Channel: r.URL.Query().Get("channel"),
|
||||
Order: "DESC",
|
||||
ExpandObservations: r.URL.Query().Get("expand") == "observations",
|
||||
}
|
||||
@@ -895,11 +867,9 @@ func (s *Server) handleBatchObservations(w http.ResponseWriter, r *http.Request)
|
||||
func (s *Server) handlePacketDetail(w http.ResponseWriter, r *http.Request) {
|
||||
param := mux.Vars(r)["id"]
|
||||
var packet map[string]interface{}
|
||||
fromDB := false
|
||||
|
||||
isHash := hashPattern.MatchString(strings.ToLower(param))
|
||||
if s.store != nil {
|
||||
if isHash {
|
||||
if hashPattern.MatchString(strings.ToLower(param)) {
|
||||
packet = s.store.GetPacketByHash(param)
|
||||
}
|
||||
if packet == nil {
|
||||
@@ -912,25 +882,6 @@ func (s *Server) handlePacketDetail(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
}
|
||||
}
|
||||
// DB fallback: in-memory PacketStore prunes old entries, but the SQLite
|
||||
// DB retains them and is the source for /api/nodes recentAdverts. Without
|
||||
// this fallback, links from node-detail pages 404 once the packet ages out.
|
||||
if packet == nil && s.db != nil {
|
||||
if isHash {
|
||||
if dbPkt, err := s.db.GetPacketByHash(param); err == nil && dbPkt != nil {
|
||||
packet = dbPkt
|
||||
fromDB = true
|
||||
}
|
||||
}
|
||||
if packet == nil {
|
||||
if id, parseErr := strconv.Atoi(param); parseErr == nil {
|
||||
if dbPkt, err := s.db.GetTransmissionByID(id); err == nil && dbPkt != nil {
|
||||
packet = dbPkt
|
||||
fromDB = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if packet == nil {
|
||||
writeError(w, 404, "Not found")
|
||||
return
|
||||
@@ -941,9 +892,6 @@ func (s *Server) handlePacketDetail(w http.ResponseWriter, r *http.Request) {
|
||||
if s.store != nil {
|
||||
observations = s.store.GetObservationsForHash(hash)
|
||||
}
|
||||
if len(observations) == 0 && fromDB && s.db != nil && hash != "" {
|
||||
observations = s.db.GetObservationsForHash(hash)
|
||||
}
|
||||
observationCount := len(observations)
|
||||
if observationCount == 0 {
|
||||
observationCount = 1
|
||||
@@ -959,9 +907,11 @@ func (s *Server) handlePacketDetail(w http.ResponseWriter, r *http.Request) {
|
||||
pathHops = []interface{}{}
|
||||
}
|
||||
|
||||
rawHex, _ := packet["raw_hex"].(string)
|
||||
writeJSON(w, PacketDetailResponse{
|
||||
Packet: packet,
|
||||
Path: pathHops,
|
||||
Breakdown: BuildBreakdown(rawHex),
|
||||
ObservationCount: observationCount,
|
||||
Observations: mapSliceToObservations(observations),
|
||||
})
|
||||
@@ -980,7 +930,7 @@ func (s *Server) handleDecode(w http.ResponseWriter, r *http.Request) {
|
||||
writeError(w, 400, "hex is required")
|
||||
return
|
||||
}
|
||||
decoded, err := DecodePacket(hexStr, true)
|
||||
decoded, err := DecodePacket(hexStr)
|
||||
if err != nil {
|
||||
writeError(w, 400, err.Error())
|
||||
return
|
||||
@@ -1012,7 +962,7 @@ func (s *Server) handlePostPacket(w http.ResponseWriter, r *http.Request) {
|
||||
writeError(w, 400, "hex is required")
|
||||
return
|
||||
}
|
||||
decoded, err := DecodePacket(hexStr, false)
|
||||
decoded, err := DecodePacket(hexStr)
|
||||
if err != nil {
|
||||
writeError(w, 400, err.Error())
|
||||
return
|
||||
@@ -1020,17 +970,8 @@ func (s *Server) handlePostPacket(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
contentHash := ComputeContentHash(hexStr)
|
||||
pathJSON := "[]"
|
||||
// For TRACE packets, path_json must be the payload-decoded route hops
|
||||
// (decoded.Path.Hops), NOT the raw_hex header bytes which are SNR values.
|
||||
// For all other packet types, derive path from raw_hex (#886).
|
||||
if !packetpath.PathBytesAreHops(byte(decoded.Header.PayloadType)) {
|
||||
if len(decoded.Path.Hops) > 0 {
|
||||
if pj, e := json.Marshal(decoded.Path.Hops); e == nil {
|
||||
pathJSON = string(pj)
|
||||
}
|
||||
}
|
||||
} else if hops, err := packetpath.DecodePathFromRawHex(hexStr); err == nil && len(hops) > 0 {
|
||||
if pj, e := json.Marshal(hops); e == nil {
|
||||
if len(decoded.Path.Hops) > 0 {
|
||||
if pj, e := json.Marshal(decoded.Path.Hops); e == nil {
|
||||
pathJSON = string(pj)
|
||||
}
|
||||
}
|
||||
@@ -1104,17 +1045,6 @@ func (s *Server) handleNodes(w http.ResponseWriter, r *http.Request) {
|
||||
total = len(filtered)
|
||||
nodes = filtered
|
||||
}
|
||||
// Filter blacklisted nodes
|
||||
if len(s.cfg.NodeBlacklist) > 0 {
|
||||
filtered := nodes[:0]
|
||||
for _, node := range nodes {
|
||||
if pk, ok := node["public_key"].(string); !ok || !s.cfg.IsBlacklisted(pk) {
|
||||
filtered = append(filtered, node)
|
||||
}
|
||||
}
|
||||
total = len(filtered)
|
||||
nodes = filtered
|
||||
}
|
||||
writeJSON(w, NodeListResponse{Nodes: nodes, Total: total, Counts: counts})
|
||||
}
|
||||
|
||||
@@ -1129,25 +1059,11 @@ func (s *Server) handleNodeSearch(w http.ResponseWriter, r *http.Request) {
|
||||
writeError(w, 500, err.Error())
|
||||
return
|
||||
}
|
||||
// Filter blacklisted nodes from search results
|
||||
if len(s.cfg.NodeBlacklist) > 0 {
|
||||
filtered := make([]map[string]interface{}, 0, len(nodes))
|
||||
for _, node := range nodes {
|
||||
if pk, ok := node["public_key"].(string); !ok || !s.cfg.IsBlacklisted(pk) {
|
||||
filtered = append(filtered, node)
|
||||
}
|
||||
}
|
||||
nodes = filtered
|
||||
}
|
||||
writeJSON(w, NodeSearchResponse{Nodes: nodes})
|
||||
}
|
||||
|
||||
func (s *Server) handleNodeDetail(w http.ResponseWriter, r *http.Request) {
|
||||
pubkey := mux.Vars(r)["pubkey"]
|
||||
if s.cfg.IsBlacklisted(pubkey) {
|
||||
writeError(w, 404, "Not found")
|
||||
return
|
||||
}
|
||||
node, err := s.db.GetNodeByPubkey(pubkey)
|
||||
if err != nil || node == nil {
|
||||
writeError(w, 404, "Not found")
|
||||
@@ -1173,10 +1089,6 @@ func (s *Server) handleNodeDetail(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
func (s *Server) handleNodeHealth(w http.ResponseWriter, r *http.Request) {
|
||||
pubkey := mux.Vars(r)["pubkey"]
|
||||
if s.cfg.IsBlacklisted(pubkey) {
|
||||
writeError(w, 404, "Not found")
|
||||
return
|
||||
}
|
||||
if s.store != nil {
|
||||
result, err := s.store.GetNodeHealth(pubkey)
|
||||
if err != nil || result == nil {
|
||||
@@ -1197,19 +1109,7 @@ func (s *Server) handleBulkHealth(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
if s.store != nil {
|
||||
region := r.URL.Query().Get("region")
|
||||
results := s.store.GetBulkHealth(limit, region)
|
||||
// Filter blacklisted nodes
|
||||
if len(s.cfg.NodeBlacklist) > 0 {
|
||||
filtered := make([]map[string]interface{}, 0, len(results))
|
||||
for _, entry := range results {
|
||||
if pk, ok := entry["public_key"].(string); !ok || !s.cfg.IsBlacklisted(pk) {
|
||||
filtered = append(filtered, entry)
|
||||
}
|
||||
}
|
||||
writeJSON(w, filtered)
|
||||
return
|
||||
}
|
||||
writeJSON(w, results)
|
||||
writeJSON(w, s.store.GetBulkHealth(limit, region))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1228,10 +1128,6 @@ func (s *Server) handleNetworkStatus(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
func (s *Server) handleNodePaths(w http.ResponseWriter, r *http.Request) {
|
||||
pubkey := mux.Vars(r)["pubkey"]
|
||||
if s.cfg.IsBlacklisted(pubkey) {
|
||||
writeError(w, 404, "Not found")
|
||||
return
|
||||
}
|
||||
node, err := s.db.GetNodeByPubkey(pubkey)
|
||||
if err != nil || node == nil {
|
||||
writeError(w, 404, "Not found")
|
||||
@@ -1280,55 +1176,6 @@ func (s *Server) handleNodePaths(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
}
|
||||
|
||||
// Post-filter: verify target node actually appears in each candidate's resolved_path.
|
||||
// The byPathHop index uses short prefixes which can collide (e.g. "c0" matches multiple nodes).
|
||||
// We lean on resolved_path (from neighbor affinity graph) to disambiguate.
|
||||
//
|
||||
// Collect candidate IDs and index membership under the read lock, then release
|
||||
// the lock before running SQL queries (confirmResolvedPathContains does disk I/O).
|
||||
type candidateCheck struct {
|
||||
tx *StoreTx
|
||||
hasReverse bool
|
||||
inIndex bool
|
||||
}
|
||||
checks := make([]candidateCheck, len(candidates))
|
||||
for i, tx := range candidates {
|
||||
cc := candidateCheck{tx: tx}
|
||||
if !s.store.useResolvedPathIndex {
|
||||
cc.inIndex = true // flag off — keep all
|
||||
} else if _, hasRev := s.store.resolvedPubkeyReverse[tx.ID]; !hasRev {
|
||||
cc.inIndex = true // no indexed pubkeys — keep (conservative)
|
||||
} else {
|
||||
h := resolvedPubkeyHash(lowerPK)
|
||||
for _, id := range s.store.resolvedPubkeyIndex[h] {
|
||||
if id == tx.ID {
|
||||
cc.hasReverse = true // needs SQL confirmation
|
||||
break
|
||||
}
|
||||
}
|
||||
// If not in index at all, it's a definite no
|
||||
}
|
||||
checks[i] = cc
|
||||
}
|
||||
s.store.mu.RUnlock()
|
||||
|
||||
// Now run SQL checks outside the lock for candidates that need confirmation.
|
||||
filtered := candidates[:0]
|
||||
for _, cc := range checks {
|
||||
if cc.inIndex {
|
||||
filtered = append(filtered, cc.tx)
|
||||
} else if cc.hasReverse {
|
||||
if s.store.confirmResolvedPathContains(cc.tx.ID, lowerPK) {
|
||||
filtered = append(filtered, cc.tx)
|
||||
}
|
||||
}
|
||||
// else: not in index → exclude
|
||||
}
|
||||
candidates = filtered
|
||||
|
||||
// Re-acquire read lock for the aggregation phase that reads store data.
|
||||
s.store.mu.RLock()
|
||||
|
||||
type pathAgg struct {
|
||||
Hops []PathHopResp
|
||||
Count int
|
||||
@@ -1433,10 +1280,6 @@ func (s *Server) handleNodePaths(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
func (s *Server) handleNodeAnalytics(w http.ResponseWriter, r *http.Request) {
|
||||
pubkey := mux.Vars(r)["pubkey"]
|
||||
if s.cfg.IsBlacklisted(pubkey) {
|
||||
writeError(w, 404, "Not found")
|
||||
return
|
||||
}
|
||||
days := queryInt(r, "days", 7)
|
||||
if days < 1 {
|
||||
days = 1
|
||||
@@ -1458,36 +1301,6 @@ func (s *Server) handleNodeAnalytics(w http.ResponseWriter, r *http.Request) {
|
||||
writeError(w, 404, "Not found")
|
||||
}
|
||||
|
||||
func (s *Server) handleNodeClockSkew(w http.ResponseWriter, r *http.Request) {
|
||||
pubkey := mux.Vars(r)["pubkey"]
|
||||
if s.store == nil {
|
||||
writeError(w, 404, "Not found")
|
||||
return
|
||||
}
|
||||
result := s.store.GetNodeClockSkew(pubkey)
|
||||
if result == nil {
|
||||
writeError(w, 404, "No clock skew data for this node")
|
||||
return
|
||||
}
|
||||
writeJSON(w, result)
|
||||
}
|
||||
|
||||
func (s *Server) handleObserverClockSkew(w http.ResponseWriter, r *http.Request) {
|
||||
if s.store == nil {
|
||||
writeJSON(w, []ObserverCalibration{})
|
||||
return
|
||||
}
|
||||
writeJSON(w, s.store.GetObserverCalibrations())
|
||||
}
|
||||
|
||||
func (s *Server) handleFleetClockSkew(w http.ResponseWriter, r *http.Request) {
|
||||
if s.store == nil {
|
||||
writeJSON(w, []*NodeClockSkew{})
|
||||
return
|
||||
}
|
||||
writeJSON(w, s.store.GetFleetClockSkew())
|
||||
}
|
||||
|
||||
// --- Analytics Handlers ---
|
||||
|
||||
func (s *Server) handleAnalyticsRF(w http.ResponseWriter, r *http.Request) {
|
||||
@@ -1513,11 +1326,7 @@ func (s *Server) handleAnalyticsRF(w http.ResponseWriter, r *http.Request) {
|
||||
func (s *Server) handleAnalyticsTopology(w http.ResponseWriter, r *http.Request) {
|
||||
region := r.URL.Query().Get("region")
|
||||
if s.store != nil {
|
||||
data := s.store.GetAnalyticsTopology(region)
|
||||
if s.cfg != nil && len(s.cfg.NodeBlacklist) > 0 {
|
||||
data = s.filterBlacklistedFromTopology(data)
|
||||
}
|
||||
writeJSON(w, data)
|
||||
writeJSON(w, s.store.GetAnalyticsTopology(region))
|
||||
return
|
||||
}
|
||||
writeJSON(w, TopologyResponse{
|
||||
@@ -1605,11 +1414,7 @@ func (s *Server) handleAnalyticsSubpaths(w http.ResponseWriter, r *http.Request)
|
||||
}
|
||||
maxLen := queryInt(r, "maxLen", 8)
|
||||
limit := queryInt(r, "limit", 100)
|
||||
data := s.store.GetAnalyticsSubpaths(region, minLen, maxLen, limit)
|
||||
if s.cfg != nil && len(s.cfg.NodeBlacklist) > 0 {
|
||||
data = s.filterBlacklistedFromSubpaths(data)
|
||||
}
|
||||
writeJSON(w, data)
|
||||
writeJSON(w, s.store.GetAnalyticsSubpaths(region, minLen, maxLen, limit))
|
||||
return
|
||||
}
|
||||
writeJSON(w, SubpathsResponse{
|
||||
@@ -1661,11 +1466,6 @@ func (s *Server) handleAnalyticsSubpathsBulk(w http.ResponseWriter, r *http.Requ
|
||||
}
|
||||
|
||||
results := s.store.GetAnalyticsSubpathsBulk(region, groups)
|
||||
if s.cfg != nil && len(s.cfg.NodeBlacklist) > 0 {
|
||||
for i, r := range results {
|
||||
results[i] = s.filterBlacklistedFromSubpaths(r)
|
||||
}
|
||||
}
|
||||
writeJSON(w, map[string]interface{}{"results": results})
|
||||
}
|
||||
|
||||
@@ -1685,15 +1485,6 @@ func (s *Server) handleAnalyticsSubpathDetail(w http.ResponseWriter, r *http.Req
|
||||
writeJSON(w, ErrorResp{Error: "Need at least 2 hops"})
|
||||
return
|
||||
}
|
||||
// Reject if any hop is a blacklisted node.
|
||||
if s.cfg != nil && len(s.cfg.NodeBlacklist) > 0 {
|
||||
for _, hop := range rawHops {
|
||||
if s.cfg.IsBlacklisted(hop) {
|
||||
writeError(w, 404, "Not found")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
if s.store != nil {
|
||||
writeJSON(w, s.store.GetSubpathDetail(rawHops))
|
||||
return
|
||||
@@ -1759,10 +1550,6 @@ func (s *Server) handleResolveHops(w http.ResponseWriter, r *http.Request) {
|
||||
if pm != nil {
|
||||
if matched, ok := pm.m[hopLower]; ok {
|
||||
for _, ni := range matched {
|
||||
// Skip blacklisted nodes from resolution results.
|
||||
if s.cfg != nil && s.cfg.IsBlacklisted(ni.PublicKey) {
|
||||
continue
|
||||
}
|
||||
c := HopCandidate{Pubkey: ni.PublicKey}
|
||||
if ni.Name != "" {
|
||||
c.Name = ni.Name
|
||||
@@ -1831,8 +1618,7 @@ func (s *Server) handleResolveHops(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
// Use the resolved node as the default (best-effort pick).
|
||||
// Skip if the best pick is a blacklisted node.
|
||||
if best != nil && !(s.cfg != nil && s.cfg.IsBlacklisted(best.PublicKey)) {
|
||||
if best != nil {
|
||||
hr.Name = best.Name
|
||||
hr.Pubkey = best.PublicKey
|
||||
}
|
||||
@@ -1855,35 +1641,18 @@ func (s *Server) handleResolveHops(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
func (s *Server) handleChannels(w http.ResponseWriter, r *http.Request) {
|
||||
region := r.URL.Query().Get("region")
|
||||
includeEncrypted := r.URL.Query().Get("includeEncrypted") == "true"
|
||||
// Prefer DB for full history (in-memory store has limited retention)
|
||||
if s.db != nil {
|
||||
channels, err := s.db.GetChannels(region)
|
||||
if err != nil {
|
||||
writeError(w, 500, err.Error())
|
||||
return
|
||||
}
|
||||
if includeEncrypted {
|
||||
encrypted, err := s.db.GetEncryptedChannels(region)
|
||||
if err != nil {
|
||||
log.Printf("WARN GetEncryptedChannels: %v", err)
|
||||
} else {
|
||||
channels = append(channels, encrypted...)
|
||||
}
|
||||
}
|
||||
writeJSON(w, ChannelListResponse{Channels: channels})
|
||||
return
|
||||
}
|
||||
if s.store != nil {
|
||||
region := r.URL.Query().Get("region")
|
||||
channels := s.store.GetChannels(region)
|
||||
if includeEncrypted {
|
||||
channels = append(channels, s.store.GetEncryptedChannels(region)...)
|
||||
}
|
||||
writeJSON(w, ChannelListResponse{Channels: channels})
|
||||
return
|
||||
}
|
||||
writeJSON(w, ChannelListResponse{Channels: []map[string]interface{}{}})
|
||||
channels, err := s.db.GetChannels()
|
||||
if err != nil {
|
||||
writeError(w, 500, err.Error())
|
||||
return
|
||||
}
|
||||
writeJSON(w, ChannelListResponse{Channels: channels})
|
||||
}
|
||||
|
||||
func (s *Server) handleChannelMessages(w http.ResponseWriter, r *http.Request) {
|
||||
@@ -1891,22 +1660,17 @@ func (s *Server) handleChannelMessages(w http.ResponseWriter, r *http.Request) {
|
||||
limit := queryInt(r, "limit", 100)
|
||||
offset := queryInt(r, "offset", 0)
|
||||
region := r.URL.Query().Get("region")
|
||||
// Prefer DB for full history (in-memory store has limited retention)
|
||||
if s.db != nil {
|
||||
messages, total, err := s.db.GetChannelMessages(hash, limit, offset, region)
|
||||
if err != nil {
|
||||
writeError(w, 500, err.Error())
|
||||
return
|
||||
}
|
||||
writeJSON(w, ChannelMessagesResponse{Messages: messages, Total: total})
|
||||
return
|
||||
}
|
||||
if s.store != nil {
|
||||
messages, total := s.store.GetChannelMessages(hash, limit, offset, region)
|
||||
writeJSON(w, ChannelMessagesResponse{Messages: messages, Total: total})
|
||||
return
|
||||
}
|
||||
writeJSON(w, ChannelMessagesResponse{Messages: []map[string]interface{}{}, Total: 0})
|
||||
messages, total, err := s.db.GetChannelMessages(hash, limit, offset, region)
|
||||
if err != nil {
|
||||
writeError(w, 500, err.Error())
|
||||
return
|
||||
}
|
||||
writeJSON(w, ChannelMessagesResponse{Messages: messages, Total: total})
|
||||
}
|
||||
|
||||
func (s *Server) handleObservers(w http.ResponseWriter, r *http.Request) {
|
||||
@@ -2375,6 +2139,9 @@ func mapSliceToTransmissions(maps []map[string]interface{}) []TransmissionResp {
|
||||
tx.PathJSON = m["path_json"]
|
||||
tx.Direction = m["direction"]
|
||||
tx.Score = m["score"]
|
||||
if rp, ok := m["resolved_path"].([]*string); ok {
|
||||
tx.ResolvedPath = rp
|
||||
}
|
||||
result = append(result, tx)
|
||||
}
|
||||
return result
|
||||
@@ -2395,10 +2162,10 @@ func mapSliceToObservations(maps []map[string]interface{}) []ObservationResp {
|
||||
obs.SNR = m["snr"]
|
||||
obs.RSSI = m["rssi"]
|
||||
obs.PathJSON = m["path_json"]
|
||||
obs.ResolvedPath = m["resolved_path"]
|
||||
obs.Direction = m["direction"]
|
||||
obs.RawHex = m["raw_hex"]
|
||||
obs.Timestamp = m["timestamp"]
|
||||
if rp, ok := m["resolved_path"].([]*string); ok {
|
||||
obs.ResolvedPath = rp
|
||||
}
|
||||
result = append(result, obs)
|
||||
}
|
||||
return result
|
||||
@@ -2549,167 +2316,16 @@ func (s *Server) handleAdminPrune(w http.ResponseWriter, r *http.Request) {
|
||||
writeError(w, 400, "days parameter required (or set retention.packetDays in config)")
|
||||
return
|
||||
}
|
||||
|
||||
results := map[string]interface{}{}
|
||||
|
||||
// Prune old packets
|
||||
n, err := s.db.PruneOldPackets(days)
|
||||
if err != nil {
|
||||
writeError(w, 500, err.Error())
|
||||
return
|
||||
}
|
||||
log.Printf("[prune] deleted %d transmissions older than %d days", n, days)
|
||||
results["packets_deleted"] = n
|
||||
results["deleted"] = n // legacy alias
|
||||
|
||||
// Also mark stale observers as inactive if observerDays is configured
|
||||
observerDays := s.cfg.ObserverDaysOrDefault()
|
||||
if observerDays > 0 {
|
||||
obsN, obsErr := s.db.RemoveStaleObservers(observerDays)
|
||||
if obsErr != nil {
|
||||
log.Printf("[prune] observer prune error: %v", obsErr)
|
||||
} else {
|
||||
results["observers_inactive"] = obsN
|
||||
}
|
||||
}
|
||||
|
||||
results["days"] = days
|
||||
writeJSON(w, results)
|
||||
writeJSON(w, map[string]interface{}{"deleted": n, "days": days})
|
||||
}
|
||||
|
||||
// constantTimeEqual compares two strings in constant time to prevent timing attacks.
|
||||
func constantTimeEqual(a, b string) bool {
|
||||
return subtle.ConstantTimeCompare([]byte(a), []byte(b)) == 1
|
||||
}
|
||||
|
||||
// filterBlacklistedFromTopology removes blacklisted node references from the
|
||||
// topology analytics response (TopRepeaters, TopPairs, BestPathList, MultiObsNodes, PerObserverReach).
|
||||
func (s *Server) filterBlacklistedFromTopology(data map[string]interface{}) map[string]interface{} {
|
||||
// Filter TopRepeaters
|
||||
if repeaters, ok := data["topRepeaters"]; ok {
|
||||
if arr, ok := repeaters.([]TopRepeater); ok {
|
||||
var filtered []TopRepeater
|
||||
for _, r := range arr {
|
||||
if pk, ok := r.Pubkey.(string); ok && s.cfg.IsBlacklisted(pk) {
|
||||
continue
|
||||
}
|
||||
filtered = append(filtered, r)
|
||||
}
|
||||
data["topRepeaters"] = filtered
|
||||
}
|
||||
}
|
||||
|
||||
// Filter TopPairs
|
||||
if pairs, ok := data["topPairs"]; ok {
|
||||
if arr, ok := pairs.([]TopPair); ok {
|
||||
var filtered []TopPair
|
||||
for _, p := range arr {
|
||||
if pkA, ok := p.PubkeyA.(string); ok && s.cfg.IsBlacklisted(pkA) {
|
||||
continue
|
||||
}
|
||||
if pkB, ok := p.PubkeyB.(string); ok && s.cfg.IsBlacklisted(pkB) {
|
||||
continue
|
||||
}
|
||||
filtered = append(filtered, p)
|
||||
}
|
||||
data["topPairs"] = filtered
|
||||
}
|
||||
}
|
||||
|
||||
// Filter BestPathList
|
||||
if paths, ok := data["bestPathList"]; ok {
|
||||
if arr, ok := paths.([]BestPathEntry); ok {
|
||||
var filtered []BestPathEntry
|
||||
for _, p := range arr {
|
||||
if pk, ok := p.Pubkey.(string); ok && s.cfg.IsBlacklisted(pk) {
|
||||
continue
|
||||
}
|
||||
filtered = append(filtered, p)
|
||||
}
|
||||
data["bestPathList"] = filtered
|
||||
}
|
||||
}
|
||||
|
||||
// Filter MultiObsNodes
|
||||
if nodes, ok := data["multiObsNodes"]; ok {
|
||||
if arr, ok := nodes.([]MultiObsNode); ok {
|
||||
var filtered []MultiObsNode
|
||||
for _, n := range arr {
|
||||
if pk, ok := n.Pubkey.(string); ok && s.cfg.IsBlacklisted(pk) {
|
||||
continue
|
||||
}
|
||||
filtered = append(filtered, n)
|
||||
}
|
||||
data["multiObsNodes"] = filtered
|
||||
}
|
||||
}
|
||||
|
||||
// Filter PerObserverReach
|
||||
if reach, ok := data["perObserverReach"]; ok {
|
||||
if m, ok := reach.(map[string]*ObserverReach); ok {
|
||||
for k, v := range m {
|
||||
for ri := range v.Rings {
|
||||
var filteredNodes []ReachNode
|
||||
for _, rn := range v.Rings[ri].Nodes {
|
||||
if pk, ok := rn.Pubkey.(string); ok && s.cfg.IsBlacklisted(pk) {
|
||||
continue
|
||||
}
|
||||
filteredNodes = append(filteredNodes, rn)
|
||||
}
|
||||
v.Rings[ri].Nodes = filteredNodes
|
||||
}
|
||||
m[k] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return data
|
||||
}
|
||||
|
||||
// filterBlacklistedFromSubpaths removes blacklisted node references from
|
||||
// the subpaths analytics response.
|
||||
func (s *Server) filterBlacklistedFromSubpaths(data map[string]interface{}) map[string]interface{} {
|
||||
if subpaths, ok := data["subpaths"]; ok {
|
||||
if arr, ok := subpaths.([]interface{}); ok {
|
||||
var filtered []interface{}
|
||||
for _, item := range arr {
|
||||
if m, ok := item.(map[string]interface{}); ok {
|
||||
if hops, ok := m["hops"].([]interface{}); ok {
|
||||
skip := false
|
||||
for _, h := range hops {
|
||||
if hp, ok := h.(string); ok && s.cfg.IsBlacklisted(hp) {
|
||||
skip = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if skip {
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
filtered = append(filtered, item)
|
||||
}
|
||||
data["subpaths"] = filtered
|
||||
}
|
||||
}
|
||||
return data
|
||||
}
|
||||
|
||||
// handleDroppedPackets returns recently dropped packets for investigation.
|
||||
func (s *Server) handleDroppedPackets(w http.ResponseWriter, r *http.Request) {
|
||||
limit := 100
|
||||
if v := r.URL.Query().Get("limit"); v != "" {
|
||||
if n, err := strconv.Atoi(v); err == nil && n > 0 {
|
||||
limit = n
|
||||
}
|
||||
}
|
||||
observerID := r.URL.Query().Get("observer")
|
||||
nodePubkey := r.URL.Query().Get("pubkey")
|
||||
|
||||
results, err := s.db.GetDroppedPackets(limit, observerID, nodePubkey)
|
||||
if err != nil {
|
||||
writeError(w, 500, err.Error())
|
||||
return
|
||||
}
|
||||
writeJSON(w, results)
|
||||
}
|
||||
|
||||
+16
-367
@@ -6,7 +6,6 @@ import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -170,9 +169,6 @@ func TestHealthEndpoint(t *testing.T) {
|
||||
if _, ok := pktStore["estimatedMB"]; !ok {
|
||||
t.Error("expected estimatedMB in packetStore")
|
||||
}
|
||||
if _, ok := pktStore["trackedMB"]; !ok {
|
||||
t.Error("expected trackedMB in packetStore")
|
||||
}
|
||||
|
||||
// Verify eventLoop (GC pause metrics matching Node.js shape)
|
||||
el, ok := body["eventLoop"].(map[string]interface{})
|
||||
@@ -777,67 +773,6 @@ func TestNodeHealthNotFound(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestNodeHealthPartialFromPackets verifies that a node with packets in the
|
||||
// in-memory store but no DB entry returns a partial 200 response instead of 404.
|
||||
// This is the fix for issue #665 (companion nodes without adverts).
|
||||
func TestNodeHealthPartialFromPackets(t *testing.T) {
|
||||
srv, router := setupTestServer(t)
|
||||
|
||||
// Inject a packet into byNode for a pubkey that doesn't exist in the nodes table
|
||||
ghostPubkey := "ghost_companion_no_advert"
|
||||
now := time.Now().UTC().Format(time.RFC3339)
|
||||
snr := 5.0
|
||||
srv.store.mu.Lock()
|
||||
if srv.store.byNode == nil {
|
||||
srv.store.byNode = make(map[string][]*StoreTx)
|
||||
}
|
||||
if srv.store.nodeHashes == nil {
|
||||
srv.store.nodeHashes = make(map[string]map[string]bool)
|
||||
}
|
||||
srv.store.byNode[ghostPubkey] = []*StoreTx{
|
||||
{Hash: "abc123", FirstSeen: now, SNR: &snr, ObservationCount: 1},
|
||||
}
|
||||
srv.store.nodeHashes[ghostPubkey] = map[string]bool{"abc123": true}
|
||||
srv.store.mu.Unlock()
|
||||
|
||||
req := httptest.NewRequest("GET", "/api/nodes/"+ghostPubkey+"/health", nil)
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != 200 {
|
||||
t.Fatalf("expected 200 for ghost companion, got %d (body: %s)", w.Code, w.Body.String())
|
||||
}
|
||||
|
||||
var body map[string]interface{}
|
||||
if err := json.Unmarshal(w.Body.Bytes(), &body); err != nil {
|
||||
t.Fatalf("json unmarshal: %v", err)
|
||||
}
|
||||
|
||||
// Should have a synthetic node stub
|
||||
node, ok := body["node"].(map[string]interface{})
|
||||
if !ok || node == nil {
|
||||
t.Fatal("expected node in response")
|
||||
}
|
||||
if node["role"] != "unknown" {
|
||||
t.Errorf("expected role=unknown, got %v", node["role"])
|
||||
}
|
||||
if node["public_key"] != ghostPubkey {
|
||||
t.Errorf("expected public_key=%s, got %v", ghostPubkey, node["public_key"])
|
||||
}
|
||||
|
||||
// Should have stats from the packet
|
||||
stats, ok := body["stats"].(map[string]interface{})
|
||||
if !ok || stats == nil {
|
||||
t.Fatal("expected stats in response")
|
||||
}
|
||||
if stats["totalPackets"] != 1.0 { // JSON numbers are float64
|
||||
t.Errorf("expected totalPackets=1, got %v", stats["totalPackets"])
|
||||
}
|
||||
if stats["lastHeard"] == nil {
|
||||
t.Error("expected lastHeard to be set")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBulkHealthEndpoint(t *testing.T) {
|
||||
_, router := setupTestServer(t)
|
||||
req := httptest.NewRequest("GET", "/api/nodes/bulk-health?limit=10", nil)
|
||||
@@ -2219,8 +2154,8 @@ pk := "abcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890"
|
||||
db.conn.Exec("INSERT OR IGNORE INTO nodes (public_key, name, role) VALUES (?, 'TestNode', 'repeater')", pk)
|
||||
|
||||
decoded := `{"name":"TestNode","pubKey":"` + pk + `"}`
|
||||
raw1 := "11" + "01" + "aabb"
|
||||
raw2 := "11" + "41" + "aabb"
|
||||
raw1 := "04" + "00" + "aabb"
|
||||
raw2 := "04" + "40" + "aabb"
|
||||
|
||||
payloadType := 4
|
||||
for i := 0; i < 3; i++ {
|
||||
@@ -2267,8 +2202,8 @@ pk := "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"
|
||||
db.conn.Exec("INSERT OR IGNORE INTO nodes (public_key, name, role) VALUES (?, 'Repeater2B', 'repeater')", pk)
|
||||
|
||||
decoded := `{"name":"Repeater2B","pubKey":"` + pk + `"}`
|
||||
raw1byte := "11" + "01" + "aabb" // FLOOD, pathByte=0x01 → hashSize=1
|
||||
raw2byte := "11" + "41" + "aabb" // FLOOD, pathByte=0x41 → hashSize=2
|
||||
raw1byte := "04" + "00" + "aabb" // pathByte=0x00 → hashSize=1 (direct send, no hops)
|
||||
raw2byte := "04" + "40" + "aabb" // pathByte=0x40 → hashSize=2
|
||||
|
||||
payloadType := 4
|
||||
// 1 packet with hashSize=1, 4 packets with hashSize=2 (latest is 2-byte)
|
||||
@@ -2310,8 +2245,8 @@ func TestGetNodeHashSizeInfoLatestWins(t *testing.T) {
|
||||
db.conn.Exec("INSERT OR IGNORE INTO nodes (public_key, name, role) VALUES (?, 'LatestWins', 'repeater')", pk)
|
||||
|
||||
decoded := `{"name":"LatestWins","pubKey":"` + pk + `"}`
|
||||
raw1byte := "11" + "01" + "aabb" // FLOOD, pathByte=0x01 → hashSize=1
|
||||
raw2byte := "11" + "41" + "aabb" // FLOOD, pathByte=0x41 → hashSize=2
|
||||
raw1byte := "04" + "00" + "aabb" // pathByte=0x00 → hashSize=1
|
||||
raw2byte := "04" + "40" + "aabb" // pathByte=0x40 → hashSize=2
|
||||
|
||||
payloadType := 4
|
||||
// 4 historical 1-byte adverts, then 1 recent 2-byte advert (latest).
|
||||
@@ -2516,7 +2451,6 @@ func TestHashAnalyticsZeroHopAdvert(t *testing.T) {
|
||||
|
||||
pk := "eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee"
|
||||
db.conn.Exec("INSERT OR IGNORE INTO nodes (public_key, name, role) VALUES (?, 'ZeroHop', 'repeater')", pk)
|
||||
store.InvalidateNodeCache()
|
||||
|
||||
decoded := `{"name":"ZeroHop","pubKey":"` + pk + `"}`
|
||||
// header 0x05 → routeType=1 (FLOOD), pathByte=0x00 → hashSize=1
|
||||
@@ -2570,11 +2504,6 @@ func TestAnalyticsHashSizeSameNameDifferentPubkey(t *testing.T) {
|
||||
pk1 := "aaaa111122223333444455556666777788889999aaaabbbbccccddddeeee1111"
|
||||
pk2 := "aaaa111122223333444455556666777788889999aaaabbbbccccddddeeee2222"
|
||||
|
||||
// Insert both nodes as repeaters so they appear in distributionByRepeaters.
|
||||
db.conn.Exec("INSERT OR IGNORE INTO nodes (public_key, name, role) VALUES (?, 'SameName', 'repeater')", pk1)
|
||||
db.conn.Exec("INSERT OR IGNORE INTO nodes (public_key, name, role) VALUES (?, 'SameName', 'repeater')", pk2)
|
||||
store.InvalidateNodeCache()
|
||||
|
||||
decoded1 := `{"name":"SameName","pubKey":"` + pk1 + `"}`
|
||||
decoded2 := `{"name":"SameName","pubKey":"` + pk2 + `"}`
|
||||
|
||||
@@ -3603,133 +3532,29 @@ func TestNodePathsEndpointUsesIndex(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestNodePathsPrefixCollisionFilter(t *testing.T) {
|
||||
// Two nodes share the "aa" prefix: TestRepeater (aabbccdd11223344) and a
|
||||
// second node (aacafe0000000000). Packets whose resolved_path points to
|
||||
// the second node must NOT appear when querying TestRepeater's paths.
|
||||
srv, router := setupTestServer(t)
|
||||
|
||||
// Manually inject a transmission whose raw path contains "aa" but whose
|
||||
// resolved_path points to the other node (aacafe0000000000).
|
||||
now := time.Now().UTC()
|
||||
recent := now.Add(-30 * time.Minute).Format(time.RFC3339)
|
||||
recentEpoch := now.Add(-30 * time.Minute).Unix()
|
||||
|
||||
// Insert a second node with the same 2-char prefix
|
||||
srv.db.conn.Exec(`INSERT OR IGNORE INTO nodes (public_key, name, role, last_seen, first_seen, advert_count)
|
||||
VALUES ('aacafe0000000000', 'CollisionNode', 'repeater', ?, '2026-01-01T00:00:00Z', 5)`, recent)
|
||||
|
||||
// Insert a transmission with path hop "aa" that resolves to the OTHER node
|
||||
srv.db.conn.Exec(`INSERT INTO transmissions (raw_hex, hash, first_seen, route_type, payload_type, decoded_json)
|
||||
VALUES ('FF01', 'collision_test_hash', ?, 1, 4, '{}')`, recent)
|
||||
// Get its ID
|
||||
var collisionTxID int
|
||||
srv.db.conn.QueryRow(`SELECT id FROM transmissions WHERE hash='collision_test_hash'`).Scan(&collisionTxID)
|
||||
|
||||
srv.db.conn.Exec(`INSERT INTO observations (transmission_id, observer_idx, snr, rssi, path_json, timestamp, resolved_path)
|
||||
VALUES (?, 1, 10.0, -90, '["aa","bb"]', ?, '["aacafe0000000000","eeff00112233aabb"]')`,
|
||||
collisionTxID, recentEpoch)
|
||||
|
||||
// Reload store to pick up new data
|
||||
store := NewPacketStore(srv.db, nil)
|
||||
if err := store.Load(); err != nil {
|
||||
t.Fatalf("store.Load failed: %v", err)
|
||||
}
|
||||
srv.store = store
|
||||
|
||||
// Query paths for TestRepeater — should NOT include the collision packet
|
||||
req := httptest.NewRequest("GET", "/api/nodes/aabbccdd11223344/paths", nil)
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != 200 {
|
||||
t.Fatalf("expected 200, got %d: %s", w.Code, w.Body.String())
|
||||
}
|
||||
|
||||
var resp struct {
|
||||
Paths []json.RawMessage `json:"paths"`
|
||||
TotalTransmissions int `json:"totalTransmissions"`
|
||||
}
|
||||
if err := json.Unmarshal(w.Body.Bytes(), &resp); err != nil {
|
||||
t.Fatalf("bad JSON: %v", err)
|
||||
}
|
||||
|
||||
// The collision packet should be filtered out. Only transmission 1 (and 3
|
||||
// if prefix matches) should remain — but transmission 3 has path "cc" and
|
||||
// resolved_path pointing to TestRoom, so only tx 1 should match.
|
||||
// Check that collision_test_hash is not in any path group.
|
||||
bodyStr := w.Body.String()
|
||||
if strings.Contains(bodyStr, "collision_test_hash") {
|
||||
t.Error("collision packet should have been filtered out but appeared in response")
|
||||
}
|
||||
|
||||
// Query paths for CollisionNode — should include the collision packet
|
||||
req2 := httptest.NewRequest("GET", "/api/nodes/aacafe0000000000/paths", nil)
|
||||
w2 := httptest.NewRecorder()
|
||||
router.ServeHTTP(w2, req2)
|
||||
|
||||
if w2.Code != 200 {
|
||||
t.Fatalf("expected 200 for CollisionNode, got %d: %s", w2.Code, w2.Body.String())
|
||||
}
|
||||
|
||||
body2 := w2.Body.String()
|
||||
if !strings.Contains(body2, "collision_test_hash") {
|
||||
t.Error("collision packet should appear for CollisionNode but was missing")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNodeInResolvedPath(t *testing.T) {
|
||||
target := "aabbccdd11223344"
|
||||
|
||||
// After #800, nodeInResolvedPath is replaced by nodeInResolvedPathViaIndex
|
||||
// which uses the membership index. Test the index-based approach.
|
||||
store := &PacketStore{
|
||||
byNode: make(map[string][]*StoreTx),
|
||||
nodeHashes: make(map[string]map[string]bool),
|
||||
useResolvedPathIndex: true,
|
||||
}
|
||||
store.initResolvedPathIndex()
|
||||
|
||||
// Case 1: tx indexed with target pubkey
|
||||
tx1 := &StoreTx{ID: 1}
|
||||
store.addToResolvedPubkeyIndex(1, []string{target})
|
||||
if !store.nodeInResolvedPathViaIndex(tx1, target) {
|
||||
t.Error("should match when index contains target")
|
||||
}
|
||||
|
||||
// Case 2: tx indexed with different pubkey
|
||||
tx2 := &StoreTx{ID: 2}
|
||||
store.addToResolvedPubkeyIndex(2, []string{"aacafe0000000000"})
|
||||
if store.nodeInResolvedPathViaIndex(tx2, target) {
|
||||
t.Error("should not match when index contains different node")
|
||||
}
|
||||
|
||||
// Case 3: tx not in index at all — should match (no data to disambiguate)
|
||||
tx3 := &StoreTx{ID: 3}
|
||||
if !store.nodeInResolvedPathViaIndex(tx3, target) {
|
||||
t.Error("should match when tx has no index entries (no data to disambiguate)")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPathHopIndexIncrementalUpdate(t *testing.T) {
|
||||
// After #800, addTxToPathHopIndex only indexes raw hops (not resolved pubkeys).
|
||||
// Resolved pubkeys are handled by the resolved pubkey membership index.
|
||||
// Test that addTxToPathHopIndex and removeTxFromPathHopIndex work correctly
|
||||
idx := make(map[string][]*StoreTx)
|
||||
|
||||
pk1 := "fullpubkey1"
|
||||
tx1 := &StoreTx{
|
||||
ID: 1,
|
||||
PathJSON: `["ab","cd"]`,
|
||||
ResolvedPath: []*string{&pk1, nil},
|
||||
}
|
||||
|
||||
addTxToPathHopIndex(idx, tx1)
|
||||
|
||||
// Should be indexed under "ab" and "cd" only (no resolved pubkey)
|
||||
// Should be indexed under "ab", "cd", and "fullpubkey1"
|
||||
if len(idx["ab"]) != 1 {
|
||||
t.Errorf("expected 1 entry for 'ab', got %d", len(idx["ab"]))
|
||||
}
|
||||
if len(idx["cd"]) != 1 {
|
||||
t.Errorf("expected 1 entry for 'cd', got %d", len(idx["cd"]))
|
||||
}
|
||||
if len(idx["fullpubkey1"]) != 1 {
|
||||
t.Errorf("expected 1 entry for resolved pubkey, got %d", len(idx["fullpubkey1"]))
|
||||
}
|
||||
|
||||
// Add another tx with overlapping hop
|
||||
tx2 := &StoreTx{
|
||||
@@ -3754,6 +3579,9 @@ func TestPathHopIndexIncrementalUpdate(t *testing.T) {
|
||||
if _, ok := idx["cd"]; ok {
|
||||
t.Error("expected 'cd' key to be deleted after removal")
|
||||
}
|
||||
if _, ok := idx["fullpubkey1"]; ok {
|
||||
t.Error("expected resolved pubkey key to be deleted after removal")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMetricsAPIEndpoints(t *testing.T) {
|
||||
@@ -3793,182 +3621,3 @@ func TestMetricsAPIEndpoints(t *testing.T) {
|
||||
t.Errorf("expected 1 observer in summary, got %v", resp2["observers"])
|
||||
}
|
||||
}
|
||||
|
||||
// TestNodeHealth_RecentPackets_ResolvedPath verifies that recentPackets in the
|
||||
// node health endpoint include resolved_path (regression for Codex review item #2).
|
||||
func TestNodeHealth_RecentPackets_ResolvedPath(t *testing.T) {
|
||||
_, router := setupTestServer(t)
|
||||
req := httptest.NewRequest("GET", "/api/nodes/aabbccdd11223344/health", nil)
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != 200 {
|
||||
t.Fatalf("expected 200, got %d (body: %s)", w.Code, w.Body.String())
|
||||
}
|
||||
var body map[string]interface{}
|
||||
if err := json.Unmarshal(w.Body.Bytes(), &body); err != nil {
|
||||
t.Fatalf("json decode: %v", err)
|
||||
}
|
||||
rp, ok := body["recentPackets"].([]interface{})
|
||||
if !ok || len(rp) == 0 {
|
||||
t.Fatal("expected non-empty recentPackets")
|
||||
}
|
||||
// At least one packet should have resolved_path (tx 1 has observations with resolved_path)
|
||||
found := false
|
||||
for _, p := range rp {
|
||||
pm, ok := p.(map[string]interface{})
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if pm["resolved_path"] != nil {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Error("expected at least one recentPacket with resolved_path")
|
||||
}
|
||||
}
|
||||
|
||||
// TestPacketsExpand_ResolvedPath verifies that expandObservations=true includes
|
||||
// resolved_path on expanded observations (regression for Codex review item #3).
|
||||
func TestPacketsExpand_ResolvedPath(t *testing.T) {
|
||||
_, router := setupTestServer(t)
|
||||
req := httptest.NewRequest("GET", "/api/packets?expand=observations&limit=10", nil)
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != 200 {
|
||||
t.Fatalf("expected 200, got %d (body: %s)", w.Code, w.Body.String())
|
||||
}
|
||||
var body map[string]interface{}
|
||||
if err := json.Unmarshal(w.Body.Bytes(), &body); err != nil {
|
||||
t.Fatalf("json decode: %v", err)
|
||||
}
|
||||
packets, ok := body["packets"].([]interface{})
|
||||
if !ok || len(packets) == 0 {
|
||||
t.Fatal("expected non-empty packets")
|
||||
}
|
||||
// Find a packet with observations that should have resolved_path
|
||||
found := false
|
||||
for _, p := range packets {
|
||||
pm, ok := p.(map[string]interface{})
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
obs, ok := pm["observations"].([]interface{})
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
for _, o := range obs {
|
||||
om, ok := o.(map[string]interface{})
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if om["resolved_path"] != nil {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if found {
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Error("expected at least one expanded observation with resolved_path")
|
||||
}
|
||||
}
|
||||
|
||||
// TestPacketDetailFallsBackToDBWhenStoreMisses verifies that handlePacketDetail
|
||||
// serves transmissions present in the DB but absent from the in-memory store.
|
||||
// This is the recentAdverts → "Not found" bug (#827).
|
||||
func TestPacketDetailFallsBackToDBWhenStoreMisses(t *testing.T) {
|
||||
srv, router := setupTestServer(t)
|
||||
// Insert a transmission directly into the DB AFTER store.Load(), so the
|
||||
// in-memory PacketStore won't see it. Mirrors the production case where
|
||||
// the store has pruned an entry but the DB still has it.
|
||||
const dbOnlyHash = "deadbeef00112233"
|
||||
now := time.Now().UTC().Format(time.RFC3339)
|
||||
if _, err := srv.db.conn.Exec(`INSERT INTO transmissions
|
||||
(raw_hex, hash, first_seen, route_type, payload_type, decoded_json)
|
||||
VALUES ('FFEE', ?, ?, 1, 4, '{"type":"ADVERT"}')`, dbOnlyHash, now); err != nil {
|
||||
t.Fatalf("insert: %v", err)
|
||||
}
|
||||
var txID int
|
||||
if err := srv.db.conn.QueryRow("SELECT id FROM transmissions WHERE hash = ?", dbOnlyHash).Scan(&txID); err != nil {
|
||||
t.Fatalf("lookup tx id: %v", err)
|
||||
}
|
||||
if _, err := srv.db.conn.Exec(`INSERT INTO observations
|
||||
(transmission_id, observer_idx, snr, rssi, path_json, timestamp)
|
||||
VALUES (?, 1, 7.5, -99, '[]', ?)`, txID, time.Now().Unix()); err != nil {
|
||||
t.Fatalf("insert obs: %v", err)
|
||||
}
|
||||
|
||||
// Confirm the store really doesn't have it (precondition for the fix).
|
||||
if got := srv.store.GetPacketByHash(dbOnlyHash); got != nil {
|
||||
t.Fatalf("test precondition failed: store unexpectedly has %s", dbOnlyHash)
|
||||
}
|
||||
|
||||
req := httptest.NewRequest("GET", "/api/packets/"+dbOnlyHash, nil)
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
if w.Code != 200 {
|
||||
t.Fatalf("expected 200, got %d (body: %s)", w.Code, w.Body.String())
|
||||
}
|
||||
var body map[string]interface{}
|
||||
if err := json.Unmarshal(w.Body.Bytes(), &body); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
pkt, ok := body["packet"].(map[string]interface{})
|
||||
if !ok {
|
||||
t.Fatal("expected packet object")
|
||||
}
|
||||
if pkt["hash"] != dbOnlyHash {
|
||||
t.Errorf("expected hash %s, got %v", dbOnlyHash, pkt["hash"])
|
||||
}
|
||||
// Observations fallback should populate from DB too.
|
||||
obs, _ := body["observations"].([]interface{})
|
||||
if len(obs) == 0 {
|
||||
t.Errorf("expected DB observations to be returned, got 0")
|
||||
}
|
||||
}
|
||||
|
||||
// TestPacketDetail404WhenAbsentFromBoth verifies that a hash present in
|
||||
// neither store nor DB still returns 404 (no false positives from the fallback).
|
||||
func TestPacketDetail404WhenAbsentFromBoth(t *testing.T) {
|
||||
_, router := setupTestServer(t)
|
||||
req := httptest.NewRequest("GET", "/api/packets/0011223344556677", nil)
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
if w.Code != 404 {
|
||||
t.Errorf("expected 404, got %d (body: %s)", w.Code, w.Body.String())
|
||||
}
|
||||
}
|
||||
|
||||
// TestPacketDetailPrefersStoreOverDB verifies the store result wins when the
|
||||
// hash exists in both — the DB fallback must not double-fetch / overwrite.
|
||||
func TestPacketDetailPrefersStoreOverDB(t *testing.T) {
|
||||
srv, router := setupTestServer(t)
|
||||
// abc123def4567890 is seeded in both DB and (after Load) the store.
|
||||
const hash = "abc123def4567890"
|
||||
if got := srv.store.GetPacketByHash(hash); got == nil {
|
||||
t.Fatalf("test precondition failed: store should have %s", hash)
|
||||
}
|
||||
|
||||
req := httptest.NewRequest("GET", "/api/packets/"+hash, nil)
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
if w.Code != 200 {
|
||||
t.Fatalf("expected 200, got %d", w.Code)
|
||||
}
|
||||
var body map[string]interface{}
|
||||
json.Unmarshal(w.Body.Bytes(), &body)
|
||||
pkt, _ := body["packet"].(map[string]interface{})
|
||||
if pkt == nil || pkt["hash"] != hash {
|
||||
t.Fatalf("expected packet with hash %s, got %v", hash, pkt)
|
||||
}
|
||||
// observation_count comes from store observations (2 seeded for tx 1).
|
||||
if cnt, _ := body["observation_count"].(float64); cnt != 2 {
|
||||
t.Errorf("expected observation_count=2 (from store), got %v", body["observation_count"])
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,95 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// TestStatsMemoryFields verifies that /api/stats exposes the new memory
|
||||
// breakdown introduced for issue #832: storeDataMB, processRSSMB,
|
||||
// goHeapInuseMB, goSysMB, plus the deprecated trackedMB alias.
|
||||
//
|
||||
// We assert presence, type, sign, and ordering invariants — but NOT
|
||||
// "RSS within X% of true RSS" because that is flaky in CI under cgo,
|
||||
// containerization, and shared-runner load.
|
||||
func TestStatsMemoryFields(t *testing.T) {
|
||||
_, router := setupTestServer(t)
|
||||
req := httptest.NewRequest("GET", "/api/stats", nil)
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != 200 {
|
||||
t.Fatalf("expected 200, got %d", w.Code)
|
||||
}
|
||||
var body map[string]interface{}
|
||||
if err := json.Unmarshal(w.Body.Bytes(), &body); err != nil {
|
||||
t.Fatalf("json decode: %v", err)
|
||||
}
|
||||
|
||||
required := []string{"trackedMB", "storeDataMB", "processRSSMB", "goHeapInuseMB", "goSysMB"}
|
||||
values := make(map[string]float64, len(required))
|
||||
for _, k := range required {
|
||||
v, ok := body[k]
|
||||
if !ok {
|
||||
t.Fatalf("missing field %q in /api/stats response", k)
|
||||
}
|
||||
f, ok := v.(float64)
|
||||
if !ok {
|
||||
t.Fatalf("field %q is %T, expected float64", k, v)
|
||||
}
|
||||
if f < 0 {
|
||||
t.Errorf("field %q is negative: %v", k, f)
|
||||
}
|
||||
values[k] = f
|
||||
}
|
||||
|
||||
// trackedMB is a deprecated alias for storeDataMB; they must match.
|
||||
if values["trackedMB"] != values["storeDataMB"] {
|
||||
t.Errorf("trackedMB (%v) != storeDataMB (%v); they must remain aliased",
|
||||
values["trackedMB"], values["storeDataMB"])
|
||||
}
|
||||
|
||||
// Ordering invariants. goSys is the runtime's view of total OS memory;
|
||||
// HeapInuse is a subset of it. storeData is a subset of HeapInuse.
|
||||
// processRSS may be 0 in environments without /proc — treat 0 as
|
||||
// "unknown" rather than a failure.
|
||||
if values["goHeapInuseMB"] > values["goSysMB"]+0.5 {
|
||||
t.Errorf("invariant violated: goHeapInuseMB (%v) > goSysMB (%v)",
|
||||
values["goHeapInuseMB"], values["goSysMB"])
|
||||
}
|
||||
if values["storeDataMB"] > values["goHeapInuseMB"]+0.5 && values["storeDataMB"] > 0 {
|
||||
// In the test fixture storeDataMB is typically 0 (no packets in
|
||||
// store); only enforce the bound when both are nonzero.
|
||||
t.Errorf("invariant violated: storeDataMB (%v) > goHeapInuseMB (%v)",
|
||||
values["storeDataMB"], values["goHeapInuseMB"])
|
||||
}
|
||||
if values["processRSSMB"] > 0 && values["goSysMB"] > 0 {
|
||||
// goSys can briefly exceed RSS if pages are reserved-but-not-touched,
|
||||
// so allow some slack.
|
||||
if values["goSysMB"] > values["processRSSMB"]*4 {
|
||||
t.Errorf("suspicious: goSysMB (%v) >> processRSSMB (%v)",
|
||||
values["goSysMB"], values["processRSSMB"])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestStatsMemoryFieldsRawJSON spot-checks that the JSON wire format uses
|
||||
// the documented camelCase names (no accidental rename through struct tags).
|
||||
func TestStatsMemoryFieldsRawJSON(t *testing.T) {
|
||||
_, router := setupTestServer(t)
|
||||
req := httptest.NewRequest("GET", "/api/stats", nil)
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
|
||||
body := w.Body.String()
|
||||
for _, key := range []string{
|
||||
`"trackedMB":`, `"storeDataMB":`,
|
||||
`"processRSSMB":`, `"goHeapInuseMB":`, `"goSysMB":`,
|
||||
} {
|
||||
if !strings.Contains(body, key) {
|
||||
t.Errorf("missing %s in raw response: %s", key, body)
|
||||
}
|
||||
}
|
||||
}
|
||||
+196
-1167
File diff suppressed because it is too large
Load Diff
@@ -1,116 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func f64(v float64) *float64 { return &v }
|
||||
|
||||
func TestDedupeTopHopsByPair(t *testing.T) {
|
||||
hops := []distHopRecord{
|
||||
{FromPk: "AAA", ToPk: "BBB", FromName: "A", ToName: "B", Dist: 100, Type: "R↔R", SNR: f64(5.0), Hash: "h1", Timestamp: "t1"},
|
||||
{FromPk: "AAA", ToPk: "BBB", FromName: "A", ToName: "B", Dist: 90, Type: "R↔R", SNR: f64(8.0), Hash: "h2", Timestamp: "t2"},
|
||||
{FromPk: "BBB", ToPk: "AAA", FromName: "B", ToName: "A", Dist: 80, Type: "R↔R", SNR: f64(3.0), Hash: "h3", Timestamp: "t3"},
|
||||
{FromPk: "AAA", ToPk: "BBB", FromName: "A", ToName: "B", Dist: 70, Type: "R↔R", SNR: f64(6.0), Hash: "h4", Timestamp: "t4"},
|
||||
{FromPk: "AAA", ToPk: "BBB", FromName: "A", ToName: "B", Dist: 60, Type: "R↔R", SNR: f64(4.0), Hash: "h5", Timestamp: "t5"},
|
||||
{FromPk: "CCC", ToPk: "DDD", FromName: "C", ToName: "D", Dist: 50, Type: "C↔R", SNR: f64(7.0), Hash: "h6", Timestamp: "t6"},
|
||||
}
|
||||
|
||||
result := dedupeHopsByPair(hops, 20)
|
||||
|
||||
if len(result) != 2 {
|
||||
t.Fatalf("expected 2 entries, got %d", len(result))
|
||||
}
|
||||
|
||||
// First entry: A↔B pair, max distance = 100, obsCount = 5
|
||||
ab := result[0]
|
||||
if ab["dist"].(float64) != 100 {
|
||||
t.Errorf("expected dist 100, got %v", ab["dist"])
|
||||
}
|
||||
if ab["obsCount"].(int) != 5 {
|
||||
t.Errorf("expected obsCount 5, got %v", ab["obsCount"])
|
||||
}
|
||||
if ab["hash"].(string) != "h1" {
|
||||
t.Errorf("expected hash h1 (from max-dist record), got %v", ab["hash"])
|
||||
}
|
||||
if ab["bestSnr"].(float64) != 8.0 {
|
||||
t.Errorf("expected bestSnr 8.0, got %v", ab["bestSnr"])
|
||||
}
|
||||
// medianSnr of [3,4,5,6,8] = 5.0
|
||||
if ab["medianSnr"].(float64) != 5.0 {
|
||||
t.Errorf("expected medianSnr 5.0, got %v", ab["medianSnr"])
|
||||
}
|
||||
|
||||
// Second entry: C↔D pair
|
||||
cd := result[1]
|
||||
if cd["dist"].(float64) != 50 {
|
||||
t.Errorf("expected dist 50, got %v", cd["dist"])
|
||||
}
|
||||
if cd["obsCount"].(int) != 1 {
|
||||
t.Errorf("expected obsCount 1, got %v", cd["obsCount"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestDedupeTopHopsReversePairMerges(t *testing.T) {
|
||||
hops := []distHopRecord{
|
||||
{FromPk: "BBB", ToPk: "AAA", FromName: "B", ToName: "A", Dist: 50, Type: "R↔R", Hash: "h1"},
|
||||
{FromPk: "AAA", ToPk: "BBB", FromName: "A", ToName: "B", Dist: 80, Type: "R↔R", Hash: "h2"},
|
||||
}
|
||||
result := dedupeHopsByPair(hops, 20)
|
||||
if len(result) != 1 {
|
||||
t.Fatalf("expected 1 entry, got %d", len(result))
|
||||
}
|
||||
if result[0]["obsCount"].(int) != 2 {
|
||||
t.Errorf("expected obsCount 2, got %v", result[0]["obsCount"])
|
||||
}
|
||||
if result[0]["dist"].(float64) != 80 {
|
||||
t.Errorf("expected dist 80, got %v", result[0]["dist"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestDedupeTopHopsNilSNR(t *testing.T) {
|
||||
hops := []distHopRecord{
|
||||
{FromPk: "AAA", ToPk: "BBB", FromName: "A", ToName: "B", Dist: 100, Type: "R↔R", SNR: nil, Hash: "h1"},
|
||||
{FromPk: "AAA", ToPk: "BBB", FromName: "A", ToName: "B", Dist: 90, Type: "R↔R", SNR: nil, Hash: "h2"},
|
||||
}
|
||||
result := dedupeHopsByPair(hops, 20)
|
||||
if len(result) != 1 {
|
||||
t.Fatalf("expected 1 entry, got %d", len(result))
|
||||
}
|
||||
if result[0]["bestSnr"] != nil {
|
||||
t.Errorf("expected bestSnr nil, got %v", result[0]["bestSnr"])
|
||||
}
|
||||
if result[0]["medianSnr"] != nil {
|
||||
t.Errorf("expected medianSnr nil, got %v", result[0]["medianSnr"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestDedupeTopHopsLimit(t *testing.T) {
|
||||
// Generate 25 unique pairs, verify limit=20 caps output
|
||||
hops := make([]distHopRecord, 25)
|
||||
for i := range hops {
|
||||
hops[i] = distHopRecord{
|
||||
FromPk: "A", ToPk: string(rune('a' + i)),
|
||||
Dist: float64(i), Type: "R↔R", Hash: "h",
|
||||
}
|
||||
}
|
||||
result := dedupeHopsByPair(hops, 20)
|
||||
if len(result) != 20 {
|
||||
t.Errorf("expected 20 entries, got %d", len(result))
|
||||
}
|
||||
}
|
||||
|
||||
func TestDedupeTopHopsEvenMedian(t *testing.T) {
|
||||
// Even count: median = avg of two middle values
|
||||
hops := []distHopRecord{
|
||||
{FromPk: "A", ToPk: "B", Dist: 10, Type: "R↔R", SNR: f64(2.0), Hash: "h1"},
|
||||
{FromPk: "A", ToPk: "B", Dist: 20, Type: "R↔R", SNR: f64(4.0), Hash: "h2"},
|
||||
{FromPk: "A", ToPk: "B", Dist: 30, Type: "R↔R", SNR: f64(6.0), Hash: "h3"},
|
||||
{FromPk: "A", ToPk: "B", Dist: 40, Type: "R↔R", SNR: f64(8.0), Hash: "h4"},
|
||||
}
|
||||
result := dedupeHopsByPair(hops, 20)
|
||||
// sorted SNR: [2,4,6,8], median = (4+6)/2 = 5.0
|
||||
if result[0]["medianSnr"].(float64) != 5.0 {
|
||||
t.Errorf("expected medianSnr 5.0, got %v", result[0]["medianSnr"])
|
||||
}
|
||||
}
|
||||
+4
-13
@@ -42,20 +42,14 @@
|
||||
"type": {
|
||||
"type": "string"
|
||||
},
|
||||
"snr": {
|
||||
"type": "number"
|
||||
},
|
||||
"hash": {
|
||||
"type": "string"
|
||||
},
|
||||
"timestamp": {
|
||||
"type": "string"
|
||||
},
|
||||
"bestSnr": {
|
||||
"type": "number"
|
||||
},
|
||||
"medianSnr": {
|
||||
"type": "number"
|
||||
},
|
||||
"obsCount": {
|
||||
"type": "number"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -922,9 +916,6 @@
|
||||
},
|
||||
"estimatedMB": {
|
||||
"type": "number"
|
||||
},
|
||||
"trackedMB": {
|
||||
"type": "number"
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -1586,4 +1577,4 @@
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,126 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
_ "modernc.org/sqlite"
|
||||
)
|
||||
|
||||
func TestTouchNodeLastSeen_UpdatesDB(t *testing.T) {
|
||||
db := setupTestDB(t)
|
||||
defer db.Close()
|
||||
|
||||
// Insert a node with no last_seen
|
||||
db.conn.Exec("INSERT INTO nodes (public_key, name, role) VALUES (?, ?, ?)", "abc123", "relay1", "REPEATER")
|
||||
|
||||
err := db.TouchNodeLastSeen("abc123", "2026-04-12T04:00:00Z")
|
||||
if err != nil {
|
||||
t.Fatalf("TouchNodeLastSeen returned error: %v", err)
|
||||
}
|
||||
|
||||
var lastSeen sql.NullString
|
||||
db.conn.QueryRow("SELECT last_seen FROM nodes WHERE public_key = ?", "abc123").Scan(&lastSeen)
|
||||
if !lastSeen.Valid || lastSeen.String != "2026-04-12T04:00:00Z" {
|
||||
t.Fatalf("expected last_seen=2026-04-12T04:00:00Z, got %v", lastSeen)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTouchNodeLastSeen_DoesNotGoBackwards(t *testing.T) {
|
||||
db := setupTestDB(t)
|
||||
defer db.Close()
|
||||
|
||||
db.conn.Exec("INSERT INTO nodes (public_key, name, role, last_seen) VALUES (?, ?, ?, ?)",
|
||||
"abc123", "relay1", "REPEATER", "2026-04-12T05:00:00Z")
|
||||
|
||||
// Try to set an older timestamp
|
||||
err := db.TouchNodeLastSeen("abc123", "2026-04-12T04:00:00Z")
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
var lastSeen string
|
||||
db.conn.QueryRow("SELECT last_seen FROM nodes WHERE public_key = ?", "abc123").Scan(&lastSeen)
|
||||
if lastSeen != "2026-04-12T05:00:00Z" {
|
||||
t.Fatalf("last_seen went backwards: got %s", lastSeen)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTouchNodeLastSeen_NonExistentNode(t *testing.T) {
|
||||
db := setupTestDB(t)
|
||||
defer db.Close()
|
||||
|
||||
// Should not error for non-existent node
|
||||
err := db.TouchNodeLastSeen("nonexistent", "2026-04-12T04:00:00Z")
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error for non-existent node: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTouchRelayLastSeen_Debouncing(t *testing.T) {
|
||||
db := setupTestDB(t)
|
||||
defer db.Close()
|
||||
|
||||
db.conn.Exec("INSERT INTO nodes (public_key, name, role) VALUES (?, ?, ?)", "relay1", "R1", "REPEATER")
|
||||
|
||||
s := &PacketStore{
|
||||
db: db,
|
||||
lastSeenTouched: make(map[string]time.Time),
|
||||
}
|
||||
|
||||
// After #800, touchRelayLastSeen takes a []string of pubkeys (from decode-window)
|
||||
pks := []string{"relay1"}
|
||||
|
||||
now := time.Now()
|
||||
s.touchRelayLastSeen(pks, now)
|
||||
|
||||
// Verify it was written
|
||||
var lastSeen sql.NullString
|
||||
db.conn.QueryRow("SELECT last_seen FROM nodes WHERE public_key = ?", "relay1").Scan(&lastSeen)
|
||||
if !lastSeen.Valid {
|
||||
t.Fatal("expected last_seen to be set after first touch")
|
||||
}
|
||||
|
||||
// Reset last_seen to check debounce prevents second write
|
||||
db.conn.Exec("UPDATE nodes SET last_seen = NULL WHERE public_key = ?", "relay1")
|
||||
|
||||
// Call again within 5 minutes — should be debounced (no write)
|
||||
s.touchRelayLastSeen(pks, now.Add(2*time.Minute))
|
||||
|
||||
db.conn.QueryRow("SELECT last_seen FROM nodes WHERE public_key = ?", "relay1").Scan(&lastSeen)
|
||||
if lastSeen.Valid {
|
||||
t.Fatal("expected debounce to prevent second write within 5 minutes")
|
||||
}
|
||||
|
||||
// Call after 5 minutes — should write again
|
||||
s.touchRelayLastSeen(pks, now.Add(6*time.Minute))
|
||||
db.conn.QueryRow("SELECT last_seen FROM nodes WHERE public_key = ?", "relay1").Scan(&lastSeen)
|
||||
if !lastSeen.Valid {
|
||||
t.Fatal("expected write after debounce interval expired")
|
||||
}
|
||||
}
|
||||
|
||||
func TestTouchRelayLastSeen_SkipsEmptyPubkeys(t *testing.T) {
|
||||
db := setupTestDB(t)
|
||||
defer db.Close()
|
||||
|
||||
s := &PacketStore{
|
||||
db: db,
|
||||
lastSeenTouched: make(map[string]time.Time),
|
||||
}
|
||||
|
||||
// Empty pubkeys — should not panic or error
|
||||
s.touchRelayLastSeen([]string{}, time.Now())
|
||||
s.touchRelayLastSeen(nil, time.Now())
|
||||
}
|
||||
|
||||
func TestTouchRelayLastSeen_NilDB(t *testing.T) {
|
||||
s := &PacketStore{
|
||||
db: nil,
|
||||
lastSeenTouched: make(map[string]time.Time),
|
||||
}
|
||||
|
||||
// Should not panic with nil db
|
||||
s.touchRelayLastSeen([]string{"abc"}, time.Now())
|
||||
}
|
||||
@@ -1,166 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// TestEstimateStoreTxBytes_ReasonableValues verifies the estimate function
|
||||
// returns reasonable values for different packet sizes.
|
||||
func TestEstimateStoreTxBytes_ReasonableValues(t *testing.T) {
|
||||
tx := &StoreTx{
|
||||
Hash: "abcdef1234567890",
|
||||
RawHex: "deadbeef",
|
||||
DecodedJSON: `{"type":"GRP_TXT"}`,
|
||||
PathJSON: `["hop1","hop2","hop3"]`,
|
||||
parsedPath: []string{"hop1", "hop2", "hop3"},
|
||||
pathParsed: true,
|
||||
}
|
||||
got := estimateStoreTxBytes(tx)
|
||||
|
||||
// Should be at least base (384) + maps (200) + indexes + path/subpath costs
|
||||
if got < 700 {
|
||||
t.Errorf("estimate too low for 3-hop tx: %d", got)
|
||||
}
|
||||
if got > 5000 {
|
||||
t.Errorf("estimate unreasonably high for 3-hop tx: %d", got)
|
||||
}
|
||||
}
|
||||
|
||||
// TestEstimateStoreTxBytes_ManyHopsSubpaths verifies that packets with many
|
||||
// hops estimate significantly more due to O(path²) subpath index entries.
|
||||
func TestEstimateStoreTxBytes_ManyHopsSubpaths(t *testing.T) {
|
||||
tx2 := &StoreTx{
|
||||
Hash: "aabb",
|
||||
parsedPath: []string{"a", "b"},
|
||||
pathParsed: true,
|
||||
}
|
||||
tx10 := &StoreTx{
|
||||
Hash: "aabb",
|
||||
parsedPath: []string{"a", "b", "c", "d", "e", "f", "g", "h", "i", "j"},
|
||||
pathParsed: true,
|
||||
}
|
||||
est2 := estimateStoreTxBytes(tx2)
|
||||
est10 := estimateStoreTxBytes(tx10)
|
||||
|
||||
// 10 hops → 45 subpath combos × 40 = 1800 bytes just for subpaths
|
||||
if est10 <= est2 {
|
||||
t.Errorf("10-hop (%d) should estimate more than 2-hop (%d)", est10, est2)
|
||||
}
|
||||
if est10 < est2+1500 {
|
||||
t.Errorf("10-hop (%d) should estimate at least 1500 more than 2-hop (%d)", est10, est2)
|
||||
}
|
||||
}
|
||||
|
||||
// TestEstimateStoreObsBytes_AfterRefactor verifies that after #800 refactor,
|
||||
// observations no longer have ResolvedPath overhead in their estimate.
|
||||
func TestEstimateStoreObsBytes_AfterRefactor(t *testing.T) {
|
||||
obs := &StoreObs{
|
||||
ObserverID: "obs1",
|
||||
PathJSON: `["a","b"]`,
|
||||
}
|
||||
|
||||
est := estimateStoreObsBytes(obs)
|
||||
if est <= 0 {
|
||||
t.Errorf("estimate should be positive, got %d", est)
|
||||
}
|
||||
// After #800, all obs estimates should be the same (no RP field variation)
|
||||
obs2 := &StoreObs{
|
||||
ObserverID: "obs1",
|
||||
PathJSON: `["a","b"]`,
|
||||
}
|
||||
est2 := estimateStoreObsBytes(obs2)
|
||||
if est != est2 {
|
||||
t.Errorf("estimates should be equal after #800 (no RP field), got %d vs %d", est, est2)
|
||||
}
|
||||
}
|
||||
|
||||
// TestEstimateStoreObsBytes_ManyObservations verifies that 15 observations
|
||||
// estimate significantly more than 1.
|
||||
func TestEstimateStoreObsBytes_ManyObservations(t *testing.T) {
|
||||
est1 := estimateStoreObsBytes(&StoreObs{ObserverID: "a", PathJSON: `["x"]`})
|
||||
est15 := int64(0)
|
||||
for i := 0; i < 15; i++ {
|
||||
est15 += estimateStoreObsBytes(&StoreObs{ObserverID: "a", PathJSON: `["x"]`})
|
||||
}
|
||||
if est15 <= est1*10 {
|
||||
t.Errorf("15 obs total (%d) should be >10x single obs (%d)", est15, est1)
|
||||
}
|
||||
}
|
||||
|
||||
// TestTrackedBytesMatchesSumAfterInsert verifies that trackedBytes equals the
|
||||
// sum of individual estimates after inserting packets via makeTestStore.
|
||||
func TestTrackedBytesMatchesSumAfterInsert(t *testing.T) {
|
||||
store := makeTestStore(20, time.Now().Add(-2*time.Hour), 5)
|
||||
|
||||
// Manually compute trackedBytes as sum of estimates
|
||||
var expectedSum int64
|
||||
for _, tx := range store.packets {
|
||||
expectedSum += estimateStoreTxBytes(tx)
|
||||
for _, obs := range tx.Observations {
|
||||
expectedSum += estimateStoreObsBytes(obs)
|
||||
}
|
||||
}
|
||||
|
||||
if store.trackedBytes != expectedSum {
|
||||
t.Errorf("trackedBytes=%d, expected sum=%d", store.trackedBytes, expectedSum)
|
||||
}
|
||||
}
|
||||
|
||||
// TestEvictionTriggersWithImprovedEstimates verifies that eviction triggers
|
||||
// at the right point with the improved (higher) estimates.
|
||||
func TestEvictionTriggersWithImprovedEstimates(t *testing.T) {
|
||||
store := makeTestStore(100, time.Now().Add(-10*time.Hour), 5)
|
||||
|
||||
// trackedBytes for 100 packets is small — artificially set maxMemoryMB
|
||||
// so highWatermark is just below trackedBytes to trigger eviction.
|
||||
highWatermarkBytes := store.trackedBytes - 1000
|
||||
if highWatermarkBytes < 1 {
|
||||
highWatermarkBytes = 1
|
||||
}
|
||||
// maxMemoryMB * 1048576 = highWatermark, so maxMemoryMB = ceil(highWatermarkBytes / 1048576)
|
||||
// But that'll be 0 for small values. Instead, directly set trackedBytes high.
|
||||
store.trackedBytes = 6 * 1048576 // 6MB
|
||||
store.maxMemoryMB = 3 // 3MB limit
|
||||
|
||||
beforeCount := len(store.packets)
|
||||
store.RunEviction()
|
||||
afterCount := len(store.packets)
|
||||
|
||||
if afterCount >= beforeCount {
|
||||
t.Errorf("expected eviction to remove packets: before=%d, after=%d, trackedBytes=%d, maxMB=%d",
|
||||
beforeCount, afterCount, store.trackedBytes, store.maxMemoryMB)
|
||||
}
|
||||
// trackedBytes should have decreased
|
||||
if store.trackedBytes >= 6*1048576 {
|
||||
t.Errorf("trackedBytes should have decreased after eviction")
|
||||
}
|
||||
}
|
||||
|
||||
// BenchmarkEstimateStoreTxBytes verifies the estimate function is fast.
|
||||
func BenchmarkEstimateStoreTxBytes(b *testing.B) {
|
||||
tx := &StoreTx{
|
||||
Hash: "abcdef1234567890",
|
||||
RawHex: "deadbeefdeadbeef",
|
||||
DecodedJSON: `{"type":"GRP_TXT","payload":"hello"}`,
|
||||
PathJSON: `["hop1","hop2","hop3","hop4","hop5"]`,
|
||||
parsedPath: []string{"hop1", "hop2", "hop3", "hop4", "hop5"},
|
||||
pathParsed: true,
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
estimateStoreTxBytes(tx)
|
||||
}
|
||||
}
|
||||
|
||||
// BenchmarkEstimateStoreObsBytes verifies the obs estimate function is fast.
|
||||
func BenchmarkEstimateStoreObsBytes(b *testing.B) {
|
||||
obs := &StoreObs{
|
||||
ObserverID: "observer1234",
|
||||
PathJSON: `["a","b","c"]`,
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
estimateStoreObsBytes(obs)
|
||||
}
|
||||
}
|
||||
+6
-30
@@ -68,26 +68,8 @@ type StatsResponse struct {
|
||||
Commit string `json:"commit"`
|
||||
BuildTime string `json:"buildTime"`
|
||||
Counts RoleCounts `json:"counts"`
|
||||
Backfilling bool `json:"backfilling"`
|
||||
BackfillProgress float64 `json:"backfillProgress"`
|
||||
SignatureDrops int64 `json:"signatureDrops,omitempty"`
|
||||
HashMigrationComplete bool `json:"hashMigrationComplete"`
|
||||
|
||||
// Memory accounting (issue #832). All values in MB.
|
||||
//
|
||||
// StoreDataMB ("trackedMB" historically) is the in-store packet byte
|
||||
// estimate — useful packet bytes only. Subset of HeapInuse. Used as
|
||||
// the eviction watermark input. NOT a proxy for RSS; ops dashboards
|
||||
// should prefer ProcessRSSMB for capacity decisions.
|
||||
//
|
||||
// Old field name TrackedMB is retained for backward compatibility
|
||||
// with pre-v3.6 consumers; it carries the same value as StoreDataMB
|
||||
// and is deprecated.
|
||||
TrackedMB float64 `json:"trackedMB"` // deprecated alias for storeDataMB
|
||||
StoreDataMB float64 `json:"storeDataMB"` // in-store packet bytes (subset of heap)
|
||||
ProcessRSSMB float64 `json:"processRSSMB"` // process RSS from /proc (Linux) or runtime.Sys fallback
|
||||
GoHeapInuseMB float64 `json:"goHeapInuseMB"` // runtime.MemStats.HeapInuse
|
||||
GoSysMB float64 `json:"goSysMB"` // runtime.MemStats.Sys (total Go-managed)
|
||||
Backfilling bool `json:"backfilling"`
|
||||
BackfillProgress float64 `json:"backfillProgress"`
|
||||
}
|
||||
|
||||
// ─── Health ────────────────────────────────────────────────────────────────────
|
||||
@@ -133,7 +115,6 @@ type WebSocketStatsResp struct {
|
||||
type HealthPacketStoreStats struct {
|
||||
Packets int `json:"packets"`
|
||||
EstimatedMB float64 `json:"estimatedMB"`
|
||||
TrackedMB float64 `json:"trackedMB"`
|
||||
}
|
||||
|
||||
type SlowQuery struct {
|
||||
@@ -193,8 +174,6 @@ type PerfPacketStoreStats struct {
|
||||
SqliteOnly bool `json:"sqliteOnly"`
|
||||
MaxPackets int `json:"maxPackets"`
|
||||
EstimatedMB float64 `json:"estimatedMB"`
|
||||
TrackedMB float64 `json:"trackedMB"`
|
||||
AvgBytesPerPacket int64 `json:"avgBytesPerPacket"`
|
||||
MaxMB int `json:"maxMB"`
|
||||
Indexes PacketStoreIndexes `json:"indexes"`
|
||||
}
|
||||
@@ -263,6 +242,7 @@ type TransmissionResp struct {
|
||||
SNR interface{} `json:"snr"`
|
||||
RSSI interface{} `json:"rssi"`
|
||||
PathJSON interface{} `json:"path_json"`
|
||||
ResolvedPath []*string `json:"resolved_path,omitempty"`
|
||||
Direction interface{} `json:"direction"`
|
||||
Score interface{} `json:"score,omitempty"`
|
||||
Observations []ObservationResp `json:"observations,omitempty"`
|
||||
@@ -277,9 +257,7 @@ type ObservationResp struct {
|
||||
SNR interface{} `json:"snr"`
|
||||
RSSI interface{} `json:"rssi"`
|
||||
PathJSON interface{} `json:"path_json"`
|
||||
ResolvedPath interface{} `json:"resolved_path,omitempty"`
|
||||
Direction interface{} `json:"direction,omitempty"`
|
||||
RawHex interface{} `json:"raw_hex,omitempty"`
|
||||
ResolvedPath []*string `json:"resolved_path,omitempty"`
|
||||
Timestamp interface{} `json:"timestamp"`
|
||||
}
|
||||
|
||||
@@ -315,6 +293,7 @@ type PacketTimestampsResponse struct {
|
||||
type PacketDetailResponse struct {
|
||||
Packet interface{} `json:"packet"`
|
||||
Path []interface{} `json:"path"`
|
||||
Breakdown *Breakdown `json:"breakdown"`
|
||||
ObservationCount int `json:"observation_count"`
|
||||
Observations []ObservationResp `json:"observations,omitempty"`
|
||||
}
|
||||
@@ -487,7 +466,6 @@ type NodeAnalyticsResponse struct {
|
||||
PeerInteractions []PeerInteraction `json:"peerInteractions"`
|
||||
UptimeHeatmap []HeatmapCell `json:"uptimeHeatmap"`
|
||||
ComputedStats ComputedNodeStats `json:"computedStats"`
|
||||
ClockSkew *NodeClockSkew `json:"clockSkew,omitempty"`
|
||||
}
|
||||
|
||||
// ─── Analytics — RF ────────────────────────────────────────────────────────────
|
||||
@@ -680,9 +658,7 @@ type DistanceHop struct {
|
||||
ToPk string `json:"toPk"`
|
||||
Dist float64 `json:"dist"`
|
||||
Type string `json:"type"`
|
||||
BestSnr interface{} `json:"bestSnr"`
|
||||
MedianSnr interface{} `json:"medianSnr"`
|
||||
ObsCount int `json:"obsCount"`
|
||||
SNR interface{} `json:"snr"`
|
||||
Hash string `json:"hash"`
|
||||
Timestamp string `json:"timestamp"`
|
||||
}
|
||||
|
||||
@@ -1,84 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"time"
|
||||
)
|
||||
|
||||
// checkAutoVacuum inspects the current auto_vacuum mode and logs a warning
|
||||
// if it's not INCREMENTAL. Optionally performs a one-time full VACUUM if
|
||||
// the operator has set db.vacuumOnStartup: true in config (#919).
|
||||
func checkAutoVacuum(db *DB, cfg *Config, dbPath string) {
|
||||
var autoVacuum int
|
||||
if err := db.conn.QueryRow("PRAGMA auto_vacuum").Scan(&autoVacuum); err != nil {
|
||||
log.Printf("[db] warning: could not read auto_vacuum: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
if autoVacuum == 2 {
|
||||
log.Printf("[db] auto_vacuum=INCREMENTAL")
|
||||
return
|
||||
}
|
||||
|
||||
modes := map[int]string{0: "NONE", 1: "FULL", 2: "INCREMENTAL"}
|
||||
mode := modes[autoVacuum]
|
||||
if mode == "" {
|
||||
mode = fmt.Sprintf("UNKNOWN(%d)", autoVacuum)
|
||||
}
|
||||
|
||||
log.Printf("[db] auto_vacuum=%s — DB needs one-time VACUUM to enable incremental auto-vacuum. "+
|
||||
"Set db.vacuumOnStartup: true in config to migrate (will block startup for several minutes on large DBs). "+
|
||||
"See https://github.com/Kpa-clawbot/CoreScope/issues/919", mode)
|
||||
|
||||
if cfg.DB != nil && cfg.DB.VacuumOnStartup {
|
||||
// WARNING: Full VACUUM creates a temporary copy of the entire DB file.
|
||||
// Requires ~2× the DB file size in free disk space or it will fail.
|
||||
log.Printf("[db] vacuumOnStartup=true — starting one-time full VACUUM (ensure 2x DB size free disk space)...")
|
||||
start := time.Now()
|
||||
|
||||
rw, err := openRW(dbPath)
|
||||
if err != nil {
|
||||
log.Printf("[db] VACUUM failed: could not open RW connection: %v", err)
|
||||
return
|
||||
}
|
||||
defer rw.Close()
|
||||
|
||||
if _, err := rw.Exec("PRAGMA auto_vacuum = INCREMENTAL"); err != nil {
|
||||
log.Printf("[db] VACUUM failed: could not set auto_vacuum: %v", err)
|
||||
return
|
||||
}
|
||||
if _, err := rw.Exec("VACUUM"); err != nil {
|
||||
log.Printf("[db] VACUUM failed: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
elapsed := time.Since(start)
|
||||
log.Printf("[db] VACUUM complete in %v — auto_vacuum is now INCREMENTAL", elapsed.Round(time.Millisecond))
|
||||
|
||||
// Re-check
|
||||
var newMode int
|
||||
if err := db.conn.QueryRow("PRAGMA auto_vacuum").Scan(&newMode); err == nil {
|
||||
if newMode == 2 {
|
||||
log.Printf("[db] auto_vacuum=INCREMENTAL (confirmed after VACUUM)")
|
||||
} else {
|
||||
log.Printf("[db] warning: auto_vacuum=%d after VACUUM — expected 2", newMode)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// runIncrementalVacuum runs PRAGMA incremental_vacuum(N) on a read-write
|
||||
// connection. Safe to call on auto_vacuum=NONE databases (noop).
|
||||
func runIncrementalVacuum(dbPath string, pages int) {
|
||||
rw, err := openRW(dbPath)
|
||||
if err != nil {
|
||||
log.Printf("[vacuum] could not open RW connection: %v", err)
|
||||
return
|
||||
}
|
||||
defer rw.Close()
|
||||
|
||||
if _, err := rw.Exec(fmt.Sprintf("PRAGMA incremental_vacuum(%d)", pages)); err != nil {
|
||||
log.Printf("[vacuum] incremental_vacuum error: %v", err)
|
||||
}
|
||||
}
|
||||
+3
-22
@@ -1,18 +1,10 @@
|
||||
{
|
||||
"port": 3000,
|
||||
"apiKey": "your-secret-api-key-here",
|
||||
"nodeBlacklist": [],
|
||||
"_comment_nodeBlacklist": "Public keys of nodes to hide from all API responses. Use for trolls, offensive names, or nodes reporting false data that operators refuse to fix.",
|
||||
"retention": {
|
||||
"nodeDays": 7,
|
||||
"observerDays": 14,
|
||||
"packetDays": 30,
|
||||
"_comment": "nodeDays: nodes not seen in N days moved to inactive_nodes (default 7). observerDays: observers not sending data in N days are removed (-1 = keep forever, default 14). packetDays: transmissions older than N days are deleted (0 = disabled)."
|
||||
},
|
||||
"db": {
|
||||
"vacuumOnStartup": false,
|
||||
"incrementalVacuumPages": 1024,
|
||||
"_comment": "vacuumOnStartup: run one-time full VACUUM to enable incremental auto-vacuum on existing DBs (blocks startup for minutes on large DBs; requires 2x DB file size in free disk space). incrementalVacuumPages: free pages returned to OS after each retention reaper cycle (default 1024). See #919."
|
||||
"_comment": "nodeDays: nodes not seen in N days are moved to inactive_nodes (default 7). packetDays: transmissions+observations older than N days are deleted daily (0 = disabled)."
|
||||
},
|
||||
"https": {
|
||||
"cert": "/path/to/cert.pem",
|
||||
@@ -133,7 +125,7 @@
|
||||
}
|
||||
],
|
||||
"channelKeys": {
|
||||
"Public": "8b3387e9c5cdea6ac9e5edbaa115cd72"
|
||||
"public": "8b3387e9c5cdea6ac9e5edbaa115cd72"
|
||||
},
|
||||
"hashChannels": [
|
||||
"#LongFast",
|
||||
@@ -161,16 +153,6 @@
|
||||
],
|
||||
"zoom": 9
|
||||
},
|
||||
"geo_filter": {
|
||||
"polygon": [
|
||||
[37.80, -122.52],
|
||||
[37.80, -121.80],
|
||||
[37.20, -121.80],
|
||||
[37.20, -122.52]
|
||||
],
|
||||
"bufferKm": 20,
|
||||
"_comment": "Optional. Restricts ingestion and API responses to nodes within the polygon + bufferKm. Polygon is an array of [lat, lon] pairs (minimum 3). Use tools/geofilter-builder.html to draw a polygon visually. Remove this section to disable filtering. Nodes with no GPS fix are always allowed through."
|
||||
},
|
||||
"regions": {
|
||||
"SJC": "San Jose, US",
|
||||
"SFO": "San Francisco, US",
|
||||
@@ -213,8 +195,7 @@
|
||||
"packetStore": {
|
||||
"maxMemoryMB": 1024,
|
||||
"estimatedPacketBytes": 450,
|
||||
"retentionHours": 168,
|
||||
"_comment": "In-memory packet store. maxMemoryMB caps RAM usage. retentionHours: only packets younger than this are loaded on startup and kept in memory (0 = unlimited, not recommended for large DBs — causes OOM on cold start). 168 = 7 days. Must be ≤ retention.packetDays * 24."
|
||||
"_comment": "In-memory packet store. maxMemoryMB caps RAM usage. All packets loaded on startup, served from RAM."
|
||||
},
|
||||
"resolvedPath": {
|
||||
"backfillHours": 24,
|
||||
|
||||
@@ -15,11 +15,15 @@ services:
|
||||
restart: unless-stopped
|
||||
stop_grace_period: 30s
|
||||
stop_signal: SIGTERM
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 3g
|
||||
extra_hosts:
|
||||
- "host.docker.internal:host-gateway"
|
||||
ports:
|
||||
- "${STAGING_GO_HTTP_PORT:-80}:80"
|
||||
- "${STAGING_GO_MQTT_PORT:-1883}:1883"
|
||||
- "${STAGING_GO_HTTP_PORT:-82}:80"
|
||||
- "${STAGING_GO_MQTT_PORT:-1885}:1883"
|
||||
- "6060:6060" # pprof server
|
||||
- "6061:6061" # pprof ingestor
|
||||
volumes:
|
||||
@@ -29,7 +33,6 @@ services:
|
||||
- NODE_ENV=staging
|
||||
- ENABLE_PPROF=true
|
||||
- DISABLE_MOSQUITTO=${DISABLE_MOSQUITTO:-false}
|
||||
- DISABLE_CADDY=${DISABLE_CADDY:-false}
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "-qO-", "http://localhost:3000/api/stats"]
|
||||
interval: 30s
|
||||
|
||||
@@ -29,7 +29,6 @@ services:
|
||||
environment:
|
||||
- NODE_ENV=production
|
||||
- DISABLE_MOSQUITTO=${DISABLE_MOSQUITTO:-false}
|
||||
- DISABLE_CADDY=${DISABLE_CADDY:-false}
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "-qO-", "http://localhost:3000/api/stats"]
|
||||
interval: 30s
|
||||
|
||||
+1
-15
@@ -14,24 +14,10 @@ if [ -f /app/data/theme.json ]; then
|
||||
ln -sf /app/data/theme.json /app/theme.json
|
||||
fi
|
||||
|
||||
# Source .env from data volume if present (works with any launch method)
|
||||
if [ -f /app/data/.env ]; then
|
||||
set -a
|
||||
. /app/data/.env
|
||||
set +a
|
||||
fi
|
||||
|
||||
SUPERVISORD_CONF="/etc/supervisor/conf.d/supervisord.conf"
|
||||
if [ "${DISABLE_MOSQUITTO:-false}" = "true" ] && [ "${DISABLE_CADDY:-false}" = "true" ]; then
|
||||
echo "[config] internal MQTT broker disabled (DISABLE_MOSQUITTO=true)"
|
||||
echo "[config] Caddy reverse proxy disabled (DISABLE_CADDY=true)"
|
||||
SUPERVISORD_CONF="/etc/supervisor/conf.d/supervisord-no-mosquitto-no-caddy.conf"
|
||||
elif [ "${DISABLE_MOSQUITTO:-false}" = "true" ]; then
|
||||
if [ "${DISABLE_MOSQUITTO:-false}" = "true" ]; then
|
||||
echo "[config] internal MQTT broker disabled (DISABLE_MOSQUITTO=true)"
|
||||
SUPERVISORD_CONF="/etc/supervisor/conf.d/supervisord-no-mosquitto.conf"
|
||||
elif [ "${DISABLE_CADDY:-false}" = "true" ]; then
|
||||
echo "[config] Caddy reverse proxy disabled (DISABLE_CADDY=true)"
|
||||
SUPERVISORD_CONF="/etc/supervisor/conf.d/supervisord-no-caddy.conf"
|
||||
fi
|
||||
|
||||
exec /usr/bin/supervisord -c "$SUPERVISORD_CONF"
|
||||
|
||||
@@ -1,43 +0,0 @@
|
||||
[supervisord]
|
||||
nodaemon=true
|
||||
user=root
|
||||
logfile=/dev/stdout
|
||||
logfile_maxbytes=0
|
||||
pidfile=/var/run/supervisord.pid
|
||||
|
||||
[program:mosquitto]
|
||||
command=/usr/sbin/mosquitto -c /etc/mosquitto/mosquitto.conf
|
||||
autostart=true
|
||||
autorestart=true
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/dev/stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
|
||||
[program:corescope-ingestor]
|
||||
command=/app/corescope-ingestor -config /app/config.json
|
||||
directory=/app
|
||||
autostart=true
|
||||
autorestart=true
|
||||
startretries=10
|
||||
startsecs=2
|
||||
stopsignal=TERM
|
||||
stopwaitsecs=20
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/dev/stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
|
||||
[program:corescope-server]
|
||||
command=/app/corescope-server -config-dir /app -db /app/data/meshcore.db -public /app/public -port 3000
|
||||
directory=/app
|
||||
autostart=true
|
||||
autorestart=true
|
||||
startretries=10
|
||||
startsecs=2
|
||||
stopsignal=TERM
|
||||
stopwaitsecs=20
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/dev/stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
@@ -1,34 +0,0 @@
|
||||
[supervisord]
|
||||
nodaemon=true
|
||||
user=root
|
||||
logfile=/dev/stdout
|
||||
logfile_maxbytes=0
|
||||
pidfile=/var/run/supervisord.pid
|
||||
|
||||
[program:corescope-ingestor]
|
||||
command=/app/corescope-ingestor -config /app/config.json
|
||||
directory=/app
|
||||
autostart=true
|
||||
autorestart=true
|
||||
startretries=10
|
||||
startsecs=2
|
||||
stopsignal=TERM
|
||||
stopwaitsecs=20
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/dev/stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
|
||||
[program:corescope-server]
|
||||
command=/app/corescope-server -config-dir /app -db /app/data/meshcore.db -public /app/public -port 3000
|
||||
directory=/app
|
||||
autostart=true
|
||||
autorestart=true
|
||||
startretries=10
|
||||
startsecs=2
|
||||
stopsignal=TERM
|
||||
stopwaitsecs=20
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/dev/stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
+2
-83
@@ -48,52 +48,9 @@ No `config.json` is required. The server starts with sensible defaults:
|
||||
- Ingestor connects to `mqtt://localhost:1883` automatically
|
||||
- SQLite database at `/app/data/meshcore.db`
|
||||
|
||||
### Full `docker run` Reference (recommended)
|
||||
### Docker Compose (recommended for production)
|
||||
|
||||
The bare `docker run` command is the primary deployment method. One image, documented parameters — run it however you want.
|
||||
|
||||
```bash
|
||||
docker run -d --name corescope \
|
||||
--restart=unless-stopped \
|
||||
-p 80:80 -p 443:443 -p 1883:1883 \
|
||||
-e DISABLE_MOSQUITTO=false \
|
||||
-e DISABLE_CADDY=false \
|
||||
-v /your/data:/app/data \
|
||||
-v /your/Caddyfile:/etc/caddy/Caddyfile:ro \
|
||||
-v /your/caddy-data:/data/caddy \
|
||||
ghcr.io/kpa-clawbot/corescope:latest
|
||||
```
|
||||
|
||||
#### Parameters
|
||||
|
||||
| Parameter | Required | Description |
|
||||
|-----------|----------|-------------|
|
||||
| `-p 80:80` | Yes | HTTP web UI |
|
||||
| `-p 443:443` | No | HTTPS (only if using built-in Caddy with a domain) |
|
||||
| `-p 1883:1883` | No | MQTT broker (expose if external gateways connect directly) |
|
||||
| `-v /your/data:/app/data` | Yes | Persistent data: SQLite DB, config.json, theme.json |
|
||||
| `-v /your/Caddyfile:/etc/caddy/Caddyfile:ro` | No | Custom Caddyfile for HTTPS |
|
||||
| `-v /your/caddy-data:/data/caddy` | No | Caddy TLS certificate storage |
|
||||
| `-e DISABLE_MOSQUITTO=true` | No | Skip the internal Mosquitto broker (use your own) |
|
||||
| `-e DISABLE_CADDY=true` | No | Skip the built-in Caddy reverse proxy |
|
||||
| `-e MQTT_BROKER=mqtt://host:1883` | No | Override MQTT broker URL |
|
||||
|
||||
#### `/app/data/.env` convenience file
|
||||
|
||||
Instead of passing `-e` flags, you can drop a `.env` file in your data volume:
|
||||
|
||||
```bash
|
||||
# /your/data/.env
|
||||
DISABLE_MOSQUITTO=true
|
||||
DISABLE_CADDY=true
|
||||
MQTT_BROKER=mqtt://my-broker:1883
|
||||
```
|
||||
|
||||
The entrypoint sources this file before starting services. This works with any launch method (`docker run`, compose, or manage.sh).
|
||||
|
||||
### Docker Compose (legacy alternative)
|
||||
|
||||
Docker Compose files are maintained for backward compatibility but are no longer the recommended approach.
|
||||
Download the example compose file:
|
||||
|
||||
```bash
|
||||
curl -sL https://raw.githubusercontent.com/Kpa-clawbot/CoreScope/master/docker-compose.example.yml \
|
||||
@@ -108,11 +65,6 @@ docker compose up -d
|
||||
| `HTTP_PORT` | `80` | Host port for the web UI |
|
||||
| `DATA_DIR` | `./data` | Host path for persistent data |
|
||||
| `DISABLE_MOSQUITTO` | `false` | Set `true` to use an external MQTT broker |
|
||||
| `DISABLE_CADDY` | `false` | Set `true` to skip the built-in Caddy proxy |
|
||||
|
||||
### manage.sh (legacy alternative)
|
||||
|
||||
The `manage.sh` wrapper script provides a setup wizard and convenience commands. It uses Docker Compose internally. See [DEPLOY.md](../DEPLOY.md) for usage. New deployments should prefer bare `docker run`.
|
||||
|
||||
### Image tags
|
||||
|
||||
@@ -159,7 +111,6 @@ CoreScope uses a layered configuration system (highest priority wins):
|
||||
| `MQTT_TOPIC` | `meshcore/#` | MQTT topic subscription pattern |
|
||||
| `DB_PATH` | `data/meshcore.db` | SQLite database path |
|
||||
| `DISABLE_MOSQUITTO` | `false` | Skip the internal Mosquitto broker |
|
||||
| `DISABLE_CADDY` | `false` | Skip the built-in Caddy reverse proxy |
|
||||
|
||||
### config.json
|
||||
|
||||
@@ -310,38 +261,6 @@ Caddy handles certificate issuance and renewal automatically.
|
||||
|
||||
---
|
||||
|
||||
## API Documentation
|
||||
|
||||
CoreScope auto-generates an OpenAPI 3.0 specification from its route definitions. The spec is always in sync with the running server — no manual maintenance required.
|
||||
|
||||
### Endpoints
|
||||
|
||||
| URL | Description |
|
||||
|-----|-------------|
|
||||
| `/api/spec` | OpenAPI 3.0 JSON schema — machine-readable API definition |
|
||||
| `/api/docs` | Interactive Swagger UI — browse and test all 40+ endpoints |
|
||||
|
||||
### Usage
|
||||
|
||||
**Browse the API interactively:**
|
||||
```
|
||||
http://your-instance/api/docs
|
||||
```
|
||||
|
||||
**Fetch the spec programmatically:**
|
||||
```bash
|
||||
curl http://your-instance/api/spec | jq .
|
||||
```
|
||||
|
||||
**For bot/integration developers:** The spec includes all request parameters, response schemas, and example values. Import it into Postman, Insomnia, or any OpenAPI-compatible tool.
|
||||
|
||||
### Public instance
|
||||
The live instance at [analyzer.00id.net](https://analyzer.00id.net) has all API endpoints publicly accessible:
|
||||
- Spec: [analyzer.00id.net/api/spec](https://analyzer.00id.net/api/spec)
|
||||
- Docs: [analyzer.00id.net/api/docs](https://analyzer.00id.net/api/docs)
|
||||
|
||||
---
|
||||
|
||||
## Monitoring & Health Checks
|
||||
|
||||
### Docker health check
|
||||
|
||||
@@ -1,135 +0,0 @@
|
||||
# CoreScope v3.5.0 🚀
|
||||
|
||||
The "stop building from source and start analyzing your mesh" release. 95 commits.
|
||||
|
||||
---
|
||||
|
||||
## 🐳 Pre-built Docker Images
|
||||
|
||||
CoreScope now ships as a ready-to-run Docker image on GitHub Container Registry. No cloning, no building, no dependencies — just pull and run.
|
||||
|
||||
```bash
|
||||
docker run -d --name corescope -p 80:80 -p 443:443 -p 1883:1883 \
|
||||
-v corescope-data:/app/data \
|
||||
ghcr.io/kpa-clawbot/corescope:v3.5.0
|
||||
```
|
||||
|
||||
**Using HTTPS with a custom domain?** Mount your Caddyfile and certs directory:
|
||||
```bash
|
||||
docker run -d --name corescope -p 80:80 -p 443:443 -p 1883:1883 \
|
||||
-v /your/data:/app/data \
|
||||
-v /your/Caddyfile:/etc/caddy/Caddyfile:ro \
|
||||
-v /your/caddy-data:/data/caddy \
|
||||
ghcr.io/kpa-clawbot/corescope:v3.5.0
|
||||
```
|
||||
Caddy auto-provisions Let's Encrypt certs. Your Caddyfile just needs:
|
||||
```
|
||||
yourdomain.example.com {
|
||||
reverse_proxy localhost:3000
|
||||
}
|
||||
```
|
||||
|
||||
That's it. Zero config required — MQTT broker, Caddy HTTPS, and SQLite are built in.
|
||||
|
||||
**Already running CoreScope?**
|
||||
```bash
|
||||
# 1. Find your running container name
|
||||
docker ps --format '{{.Names}}'
|
||||
|
||||
# 2. Stop and remove it
|
||||
docker stop <container-name> && docker rm <container-name>
|
||||
|
||||
# 3. Pull the pre-built image
|
||||
docker pull ghcr.io/kpa-clawbot/corescope:v3.5.0
|
||||
|
||||
# 4. Run with your existing data directory
|
||||
docker run -d --name corescope -p 80:80 -p 443:443 -p 1883:1883 \
|
||||
-v /your/data:/app/data \
|
||||
-v /your/Caddyfile:/etc/caddy/Caddyfile:ro \
|
||||
-v /your/caddy-data:/data/caddy \
|
||||
ghcr.io/kpa-clawbot/corescope:v3.5.0
|
||||
```
|
||||
Your data volume stays. Nothing to migrate.
|
||||
|
||||
Tags: `v3.5.0` (this release) · `latest` (latest tagged release) · `edge` (master tip, for testing). Env: `DISABLE_CADDY=true` / `DISABLE_MOSQUITTO=true` if you bring your own.
|
||||
|
||||
---
|
||||
|
||||
## ⚡ 83% Faster
|
||||
|
||||
35 performance commits. Packets endpoint p50 dropped from 16.7ms → 2.7ms. Server now serves HTTP within 2 minutes on *any* DB size — async background backfill means you're never staring at a loading screen. N+1 API calls killed everywhere. Prefix map memory cut 10x. WebSocket renders batched via rAF.
|
||||
|
||||
---
|
||||
|
||||
## 🔬 RF Health Dashboard
|
||||
|
||||
New Analytics tab. Per-observer noise floor as color-coded columns (green/yellow/red), airtime utilization, error rates, battery levels. Click any observer for the full breakdown. Region-filterable. This is the beginning of making CoreScope more than just a packet viewer.
|
||||
|
||||
---
|
||||
|
||||
## 🗺️ See Where Traces Actually Go
|
||||
|
||||
Send a trace → watch it on the live map. Solid animated line shows how far it got. Dashed ghost shows where it didn't reach. Finally know *where* your trace failed, not just *that* it failed.
|
||||
|
||||
---
|
||||
|
||||
## 📊 Things That Were Lying To You
|
||||
|
||||
- "By Repeaters" was counting companions. Fixed.
|
||||
- Zero-hop adverts claimed "1 byte hash" when the hash size was unknowable. Fixed.
|
||||
- "Packets through this node" showed packets through a *different* node with the same prefix. Fixed — now uses the neighbor affinity graph.
|
||||
- Table sorting on nodes/neighbors/observers silently did nothing. Fixed.
|
||||
|
||||
---
|
||||
|
||||
## 🔗 Deep Links · 🎨 Channel Colors · 📱 Mobile · 🔑 Security
|
||||
|
||||
**Deep links** — every page state goes in the URL. Share a link to a specific node, filter, or analytics tab.
|
||||
|
||||
**Channel colors** — click the color dot next to any channel, pick from 8 colors, see it highlighted across the feed. Persists in localStorage.
|
||||
|
||||
**Distance units** — km, miles, or auto-detect from locale. Customizer → Display.
|
||||
|
||||
**Mobile** — 44px touch targets, ARIA labels, responsive breakpoints.
|
||||
|
||||
**Security** — weak API keys rejected at startup. License: GPL v3.
|
||||
|
||||
---
|
||||
|
||||
## 📡 Full API Documentation
|
||||
|
||||
Every endpoint is now documented with an auto-generated OpenAPI 3.0 spec — always in sync with the running server.
|
||||
|
||||
- **Interactive Swagger UI:** [analyzer.00id.net/api/docs](https://analyzer.00id.net/api/docs) — browse and test all 40+ endpoints
|
||||
- **Machine-readable spec:** [analyzer.00id.net/api/spec](https://analyzer.00id.net/api/spec) — import into Postman, Insomnia, or use for bot/integration development
|
||||
|
||||
On your own instance: `/api/docs` and `/api/spec`.
|
||||
|
||||
---
|
||||
|
||||
## 🐛 14 Bugs Squashed
|
||||
|
||||
Live map crash, zero-hop hash lies, animation freezes, repeater miscounts, prefix collisions, dead channel picker, invisible buttons, broken sorting, memory leak, and more.
|
||||
|
||||
---
|
||||
|
||||
## Upgrade
|
||||
|
||||
```bash
|
||||
docker stop <container-name> && docker rm <container-name>
|
||||
docker pull ghcr.io/kpa-clawbot/corescope:v3.5.0
|
||||
|
||||
# HTTP only:
|
||||
docker run -d --name corescope -p 80:80 -p 1883:1883 \
|
||||
-v /your/data:/app/data \
|
||||
ghcr.io/kpa-clawbot/corescope:v3.5.0
|
||||
|
||||
# With HTTPS (custom domain):
|
||||
docker run -d --name corescope -p 80:80 -p 443:443 -p 1883:1883 \
|
||||
-v /your/data:/app/data \
|
||||
-v /your/Caddyfile:/etc/caddy/Caddyfile:ro \
|
||||
-v /your/caddy-data:/data/caddy \
|
||||
ghcr.io/kpa-clawbot/corescope:v3.5.0
|
||||
```
|
||||
|
||||
First start backfills `resolved_path` in the background. No downtime. No breaking changes.
|
||||
@@ -1,266 +0,0 @@
|
||||
# Timestamp-Based Packet Filters
|
||||
|
||||
**Issue:** #289
|
||||
**Status:** Draft
|
||||
**Depends on:** #286 (timestamp display config)
|
||||
|
||||
## Summary
|
||||
|
||||
Extend the existing filter engine (`packet-filter.js`) with a `time` field type supporting absolute ISO timestamps, relative durations, and range expressions. The filter compiles date expressions to epoch milliseconds at parse time so per-packet evaluation is a single numeric comparison — no date parsing in the hot path.
|
||||
|
||||
## Syntax
|
||||
|
||||
### Absolute (ISO 8601)
|
||||
|
||||
```
|
||||
time > "2024-01-01T00:00:00Z"
|
||||
time <= "2024-06-15"
|
||||
time == "2024-03-01"
|
||||
```
|
||||
|
||||
Quoted strings after `time` are parsed as dates. Partial dates (`"2024-01-01"`) are treated as midnight UTC. All absolute values are interpreted as UTC regardless of the user's display preference.
|
||||
|
||||
### Relative
|
||||
|
||||
```
|
||||
time > 2h ago
|
||||
time > 30m ago
|
||||
time > 7d ago
|
||||
```
|
||||
|
||||
The lexer recognizes `<number><unit> ago` as a relative time literal. Supported units: `s` (seconds), `m` (minutes), `h` (hours), `d` (days). At compile time, the relative offset is resolved to an absolute epoch ms value (`Date.now() - offset`). This means a compiled filter's relative thresholds are frozen at compile time — recompile to refresh.
|
||||
|
||||
### Shorthand
|
||||
|
||||
```
|
||||
time.ago < 30m
|
||||
time.ago < 2h
|
||||
```
|
||||
|
||||
`time.ago` resolves to `Date.now() - packet.timestamp`. The comparison value is a duration literal (`30m`, `2h`, `7d`). This is syntactic sugar and semantically equivalent to the relative form but reads more naturally for "show me recent packets."
|
||||
|
||||
### Range
|
||||
|
||||
```
|
||||
time between "2024-01-01" "2024-01-02"
|
||||
time between 1h ago 30m ago
|
||||
```
|
||||
|
||||
`between` is a ternary operator: `field between <low> <high>`. Compiles to `low <= field && field <= high`. Both bounds are inclusive.
|
||||
|
||||
### Combinable with existing filters
|
||||
|
||||
```
|
||||
type == Advert && time > 1h ago
|
||||
snr > 5 && time between "2024-01-01" "2024-01-02"
|
||||
(type == GRP_TXT || type == TXT_MSG) && time.ago < 30m
|
||||
```
|
||||
|
||||
## Grammar Extension
|
||||
|
||||
### New token types
|
||||
|
||||
| Token | Pattern | Example |
|
||||
|-------|---------|---------|
|
||||
| `DURATION` | `/^\d+[smhd]$/` | `30m`, `2h`, `7d` |
|
||||
| `AGO` | keyword `ago` | `ago` |
|
||||
| `BETWEEN` | keyword `between` | `between` |
|
||||
|
||||
### Lexer changes
|
||||
|
||||
1. After reading an identifier that matches `\d+[smhd]`, emit `DURATION` token instead of `FIELD`.
|
||||
2. Recognize `ago` and `between` as keywords (like `and`/`or`).
|
||||
|
||||
### Parser changes
|
||||
|
||||
In `parseComparison()`:
|
||||
|
||||
1. **Relative time:** If field is `time` and value tokens are `DURATION AGO`, compute `Date.now() - durationToMs(duration)` and store as a numeric epoch ms value in the AST node.
|
||||
2. **Absolute time:** If field is `time` and value is a `STRING`, attempt `new Date(value).getTime()`. If `NaN`, return parse error. Store epoch ms.
|
||||
3. **`time.ago` shorthand:** If field is `time.ago`, the value is a `DURATION`. Store the duration in ms. At evaluation, compute `now - packet_ts` and compare against the duration.
|
||||
4. **`between`:** If operator token is `BETWEEN`, consume two values (same type resolution as above). Emit `{ type: 'between', field, low, high }`.
|
||||
|
||||
### AST node shapes
|
||||
|
||||
```js
|
||||
// Absolute/relative (pre-resolved to epoch ms)
|
||||
{ type: 'comparison', field: 'time', op: '>', value: 1704067200000 }
|
||||
|
||||
// time.ago (duration in ms)
|
||||
{ type: 'comparison', field: 'time.ago', op: '<', value: 1800000 }
|
||||
|
||||
// between (both bounds as epoch ms)
|
||||
{ type: 'between', field: 'time', low: 1704067200000, high: 1704153600000 }
|
||||
```
|
||||
|
||||
## Field Resolution
|
||||
|
||||
Add to `resolveField()`:
|
||||
|
||||
```js
|
||||
if (field === 'time') return packet.timestamp; // epoch ms
|
||||
if (field === 'time.ago') return Date.now() - packet.timestamp;
|
||||
```
|
||||
|
||||
`packet.timestamp` is the packet's capture time in epoch milliseconds. This field already exists in the data model (populated from the DB `created_at` column).
|
||||
|
||||
## Time Semantics
|
||||
|
||||
- **Filter expressions:** Always UTC. `"2024-01-01"` means `2024-01-01T00:00:00Z`.
|
||||
- **Display:** Follows the user's timestamp config from #286 (UTC/local/relative).
|
||||
- **Relative times:** Computed against `Date.now()` at compile time. The compiled filter is a snapshot — if the filter stays active for hours, relative thresholds drift. This is acceptable; filters are typically short-lived or recompiled on interaction.
|
||||
|
||||
**No timezone specifiers in the filter syntax.** UTC only. This avoids ambiguity and parsing complexity. Users who think in local time can use the relative syntax (`time > 2h ago`) which is timezone-agnostic.
|
||||
|
||||
## Performance
|
||||
|
||||
### Compile-time work (once)
|
||||
|
||||
- Parse date strings → epoch ms via `new Date().getTime()` (~1μs per date)
|
||||
- Parse duration strings → ms via multiplication (~0ns, trivial arithmetic)
|
||||
- Relative `ago` → `Date.now() - offset` (~0ns)
|
||||
|
||||
### Per-packet evaluation (hot path)
|
||||
|
||||
- `time` comparison: one numeric read + one numeric compare. Same cost as `snr > 5`.
|
||||
- `time.ago`: one subtraction + one compare. Two arithmetic ops. **Important:** cache `Date.now()` once per filter pass (e.g., in a closure variable set before iterating packets), not per-packet. 30K `Date.now()` calls are ~1ms but it's a pointless syscall tax.
|
||||
- `between`: two numeric compares.
|
||||
|
||||
**No `Date` objects created per packet. No string parsing per packet. No regex per packet.**
|
||||
|
||||
At 30K packets, the time filter adds ~0.1ms total to filter evaluation — dominated by the existing field resolution and AST walk overhead. No measurable regression.
|
||||
|
||||
### Implementation note: `between` as sugar
|
||||
|
||||
`between` should compile to `{ type: 'and', left: { type: 'comparison', field, op: '>=', value: low }, right: { type: 'comparison', field, op: '<=', value: high } }` — reusing existing comparison evaluation. No new AST node type, no new evaluator branch. The parser desugars it; the evaluator never sees `between`.
|
||||
|
||||
### Implementation note: `time.ago` and `Date.now()` caching
|
||||
|
||||
The `compile()` function should return a filter that accepts an optional `now` parameter:
|
||||
|
||||
```js
|
||||
var compiled = compile('time.ago < 30m');
|
||||
var now = Date.now();
|
||||
packets.filter(function(p) { return compiled.filter(p, now); });
|
||||
```
|
||||
|
||||
If `now` is not passed, `Date.now()` is called once on the first invocation and reused for the entire filter pass. This avoids 30K syscalls and ensures consistent evaluation within a single pass.
|
||||
|
||||
## Carmack Review Notes
|
||||
|
||||
Reviewed with a performance-first lens (30K+ packets, real-time updates):
|
||||
|
||||
1. **✅ No allocations in hot path.** All date parsing happens at compile time. Per-packet evaluation is pure numeric comparison — same cost as existing `snr > 5` filters.
|
||||
|
||||
2. **⚠️ `Date.now()` per-packet for `time.ago`.** Fixed above — cache once per filter pass via optional `now` parameter or closure. Without this, 30K packets × `Date.now()` = ~1ms wasted on a monotonic clock syscall that returns the same value.
|
||||
|
||||
3. **✅ `between` as sugar, not a new node type.** Desugar in the parser to reuse existing `and` + `comparison` nodes. Zero new code paths in the evaluator = zero new bugs in the evaluator.
|
||||
|
||||
4. **✅ Parser complexity is bounded.** Three new token types, one new keyword. The parser remains LL(1) — no backtracking, no ambiguity. `DURATION AGO` is a clear two-token lookahead only when field is `time`.
|
||||
|
||||
5. **✅ Memory impact negligible.** Compiled time filters add one or two floats to the AST. At 16 bytes per node, even complex expressions with multiple time clauses are <100 bytes.
|
||||
|
||||
6. **⚠️ Compiled filter staleness for relative times.** Spec acknowledges this. Acceptable for a web UI where filters are recompiled on user interaction. If filters persist across long WebSocket sessions, consider recompiling on a timer (every 60s). This is a future concern, not a blocker.
|
||||
|
||||
7. **✅ No regex in hot path.** Duration parsing uses a simple char check on the last character + `parseInt`. Cheaper than any regex.
|
||||
|
||||
A compiled time filter adds one or two 64-bit float values to the AST. Negligible — roughly 16 bytes per time comparison node.
|
||||
|
||||
## URL Integration
|
||||
|
||||
Time filters appear in the URL hash query string like any other filter:
|
||||
|
||||
```
|
||||
#/packets?filter=time%20%3E%201h%20ago
|
||||
#/packets?filter=type%20%3D%3D%20Advert%20%26%26%20time%20%3E%20%222024-01-01%22
|
||||
```
|
||||
|
||||
The filter text is URL-encoded and round-trips through `encodeURIComponent`/`decodeURIComponent`. No special handling needed — the existing filter-in-URL mechanism (#286 or current) works unchanged.
|
||||
|
||||
For convenience, a future milestone could add dedicated `timeFrom`/`timeTo` query params that inject into the filter, but this is not required for the initial implementation.
|
||||
|
||||
## Wireshark Compatibility
|
||||
|
||||
| Wireshark syntax | CoreScope equivalent | Notes |
|
||||
|------------------|---------------------|-------|
|
||||
| `frame.time >= "2024-01-01"` | `time >= "2024-01-01"` | We use `time` instead of `frame.time` for brevity. Could alias `frame.time` → `time` later. |
|
||||
| `frame.time_relative < 60` | `time.ago < 60s` | Wireshark uses seconds float; we use duration literals |
|
||||
| `frame.time_delta` | Not supported | Inter-packet delta is a different feature |
|
||||
|
||||
We intentionally diverge from Wireshark where their syntax is verbose or requires pcap-specific concepts. CoreScope's filter language prioritizes brevity and readability for a web UI. A `frame.time` alias for `time` can be added trivially in the field resolver if users request it.
|
||||
|
||||
## Milestones
|
||||
|
||||
### M1: Core time filtering (parser + evaluator)
|
||||
- Add `DURATION`, `AGO`, `BETWEEN` tokens to lexer
|
||||
- Extend parser for `time` field special handling
|
||||
- Add `time` and `time.ago` to `resolveField()`
|
||||
- Implement `between` AST node evaluation
|
||||
- Unit tests: absolute, relative, ago, between, combined with existing filters, edge cases (bad dates, invalid units)
|
||||
- **Test:** filter 30K packets by time in <50ms (assert in test)
|
||||
|
||||
### M2: UI integration
|
||||
- Filter bar autocomplete hints for time syntax
|
||||
- Help tooltip / cheat sheet update with time examples
|
||||
- Verify URL round-trip with time filters
|
||||
- Playwright E2E test: enter time filter, verify packet list updates
|
||||
|
||||
### M3: Polish
|
||||
- `frame.time` alias
|
||||
- Error messages for common mistakes ("did you mean `time > 1h ago`?")
|
||||
- Consider dedicated time range picker UI widget (out of scope for this spec)
|
||||
|
||||
## Testing
|
||||
|
||||
### Unit tests (add to `test-packet-filter.js`)
|
||||
|
||||
```js
|
||||
// Absolute time
|
||||
c = compile('time > "2024-01-01"');
|
||||
assert(c.filter({ timestamp: new Date('2024-06-01').getTime() }), 'after 2024-01-01');
|
||||
assert(!c.filter({ timestamp: new Date('2023-06-01').getTime() }), 'before 2024-01-01');
|
||||
|
||||
// Relative time
|
||||
c = compile('time > 1h ago');
|
||||
assert(c.filter({ timestamp: Date.now() - 30 * 60000 }), '30m ago passes 1h filter');
|
||||
assert(!c.filter({ timestamp: Date.now() - 2 * 3600000 }), '2h ago fails 1h filter');
|
||||
|
||||
// time.ago shorthand
|
||||
c = compile('time.ago < 30m');
|
||||
assert(c.filter({ timestamp: Date.now() - 10 * 60000 }), '10m ago < 30m');
|
||||
assert(!c.filter({ timestamp: Date.now() - 60 * 60000 }), '60m ago not < 30m');
|
||||
|
||||
// between
|
||||
c = compile('time between "2024-01-01" "2024-01-02"');
|
||||
assert(c.filter({ timestamp: new Date('2024-01-01T12:00:00Z').getTime() }), 'in range');
|
||||
assert(!c.filter({ timestamp: new Date('2024-01-03').getTime() }), 'out of range');
|
||||
|
||||
// Combined
|
||||
c = compile('type == Advert && time > 1h ago');
|
||||
assert(c.filter({ payload_type: 4, timestamp: Date.now() - 1000 }), 'combined pass');
|
||||
assert(!c.filter({ payload_type: 4, timestamp: Date.now() - 7200000 }), 'combined fail time');
|
||||
assert(!c.filter({ payload_type: 1, timestamp: Date.now() - 1000 }), 'combined fail type');
|
||||
|
||||
// Error cases
|
||||
c = compile('time > "not-a-date"');
|
||||
assert(c.error, 'invalid date string');
|
||||
|
||||
c = compile('time > 5x ago');
|
||||
assert(c.error, 'invalid duration unit');
|
||||
|
||||
// Performance
|
||||
var start = Date.now();
|
||||
c = compile('time > 1h ago && type == Advert');
|
||||
var packets = [];
|
||||
for (var i = 0; i < 30000; i++) {
|
||||
packets.push({ payload_type: i % 5, timestamp: Date.now() - i * 1000 });
|
||||
}
|
||||
packets.forEach(function(p) { c.filter(p); });
|
||||
assert(Date.now() - start < 50, 'filter 30K packets in <50ms');
|
||||
```
|
||||
|
||||
### Playwright tests
|
||||
|
||||
- Enter `time > 1h ago` in filter bar → verify packet count decreases
|
||||
- Enter invalid time filter → verify error message appears
|
||||
- Reload page with time filter in URL → verify filter is applied
|
||||
@@ -1,674 +0,0 @@
|
||||
# Deep Linking P1 Implementation Plan
|
||||
|
||||
> **For agentic workers:** REQUIRED SUB-SKILL: Use superpowers:subagent-driven-development (recommended) or superpowers:executing-plans to implement this plan task-by-task. Steps use checkbox (`- [ ]`) syntax for tracking.
|
||||
|
||||
**Goal:** Make P1 UI states in nodes, packets, and channels URL-addressable so they survive refresh and can be shared.
|
||||
|
||||
**Architecture:** Each page reads URL params from `location.hash.split('?')[1]` on init (router strips query string before passing `routeParam`, so pages must read `location.hash` directly). State changes call `history.replaceState` to keep the URL in sync. localStorage remains the fallback default; URL params override when present.
|
||||
|
||||
**Tech Stack:** Vanilla JS (ES5/6), browser History API, URLSearchParams
|
||||
|
||||
---
|
||||
|
||||
## Files Changed
|
||||
|
||||
| File | Changes |
|
||||
|---|---|
|
||||
| `public/region-filter.js` | Add `setSelected(codesArray)`, track `_container` for re-render |
|
||||
| `public/nodes.js` | Read `?tab=`/`?search=` on init; `updateNodesUrl()` on tab/search change; expose `buildNodesQuery` on `window` |
|
||||
| `public/packets.js` | Read `?timeWindow=`/`?region=` on init; `updatePacketsUrl()` on timeWindow/region change; expose `buildPacketsUrl` on `window` |
|
||||
| `public/channels.js` | Read `?node=` on init; update URL in `showNodeDetail`/`closeNodeDetail` |
|
||||
| `test-frontend-helpers.js` | Add unit tests for `buildNodesQuery` and `buildPacketsUrl` |
|
||||
| `test-e2e-playwright.js` | Add Playwright tests: tab URL persistence, timeWindow URL persistence |
|
||||
|
||||
---
|
||||
|
||||
## Task 1: Add `setSelected` to RegionFilter
|
||||
|
||||
**Files:**
|
||||
- Modify: `public/region-filter.js`
|
||||
|
||||
- [ ] **Step 1: Write the failing unit test**
|
||||
|
||||
Add to `test-frontend-helpers.js` before the `// ===== SUMMARY =====` line:
|
||||
|
||||
```javascript
|
||||
// ===== REGION-FILTER.JS: setSelected =====
|
||||
console.log('\n=== region-filter.js: setSelected ===');
|
||||
{
|
||||
const ctx = makeSandbox();
|
||||
ctx.fetch = () => Promise.resolve({ json: () => Promise.resolve({ 'US-SFO': 'San Jose', 'US-LAX': 'Los Angeles' }) });
|
||||
loadInCtx(ctx, 'public/region-filter.js');
|
||||
|
||||
const RF = ctx.RegionFilter;
|
||||
RF.init(document.createElement('div'));
|
||||
|
||||
test('setSelected sets region codes', async () => {
|
||||
await RF.init(document.createElement('div'));
|
||||
RF.setSelected(['US-SFO', 'US-LAX']);
|
||||
assert.strictEqual(RF.getRegionParam(), 'US-SFO,US-LAX');
|
||||
});
|
||||
|
||||
test('setSelected with null clears selection', async () => {
|
||||
await RF.init(document.createElement('div'));
|
||||
RF.setSelected(['US-SFO']);
|
||||
RF.setSelected(null);
|
||||
assert.strictEqual(RF.getRegionParam(), '');
|
||||
});
|
||||
|
||||
test('setSelected with empty array clears selection', async () => {
|
||||
await RF.init(document.createElement('div'));
|
||||
RF.setSelected(['US-SFO']);
|
||||
RF.setSelected([]);
|
||||
assert.strictEqual(RF.getRegionParam(), '');
|
||||
});
|
||||
}
|
||||
```
|
||||
|
||||
- [ ] **Step 2: Run test to verify it fails**
|
||||
|
||||
```bash
|
||||
node test-frontend-helpers.js 2>&1 | grep -A2 "setSelected"
|
||||
```
|
||||
|
||||
Expected: `❌ setSelected sets region codes: RF.setSelected is not a function`
|
||||
|
||||
- [ ] **Step 3: Add `_container` tracking and `setSelected` to region-filter.js**
|
||||
|
||||
In `region-filter.js`, add `var _container = null;` after the existing module-level vars (after line 9 `var _listeners = [];`):
|
||||
|
||||
```javascript
|
||||
var _listeners = [];
|
||||
var _container = null; // ← add this line
|
||||
var _loaded = false;
|
||||
```
|
||||
|
||||
In `initFilter`, save the container:
|
||||
|
||||
```javascript
|
||||
async function initFilter(container, opts) {
|
||||
_container = container; // ← add this line
|
||||
if (opts && opts.dropdown) container._forceDropdown = true;
|
||||
await fetchRegions();
|
||||
render(container);
|
||||
}
|
||||
```
|
||||
|
||||
Add `setSelected` function before `// Expose globally`:
|
||||
|
||||
```javascript
|
||||
/** Override selected regions (e.g. from URL param). Persists to localStorage and re-renders. */
|
||||
function setSelected(codesArray) {
|
||||
_selected = (codesArray && codesArray.length > 0) ? new Set(codesArray) : null;
|
||||
saveToStorage();
|
||||
if (_container) render(_container);
|
||||
}
|
||||
```
|
||||
|
||||
Add `setSelected` to the public API object:
|
||||
|
||||
```javascript
|
||||
window.RegionFilter = {
|
||||
init: initFilter,
|
||||
render: render,
|
||||
getSelected: getSelected,
|
||||
getRegionParam: getRegionParam,
|
||||
regionQueryString: regionQueryString,
|
||||
onChange: onChange,
|
||||
offChange: offChange,
|
||||
fetchRegions: fetchRegions,
|
||||
setSelected: setSelected, // ← add this line
|
||||
};
|
||||
```
|
||||
|
||||
- [ ] **Step 4: Run test to verify it passes**
|
||||
|
||||
```bash
|
||||
node test-frontend-helpers.js 2>&1 | grep -E "(setSelected|FAIL|passed|failed)"
|
||||
```
|
||||
|
||||
Expected: 3 passing `setSelected` tests, overall pass.
|
||||
|
||||
- [ ] **Step 5: Commit**
|
||||
|
||||
```bash
|
||||
git add public/region-filter.js test-frontend-helpers.js
|
||||
git commit -m "feat: add RegionFilter.setSelected for URL param initialization (#536)"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Task 2: nodes.js — tab and search deep linking
|
||||
|
||||
**Files:**
|
||||
- Modify: `public/nodes.js`
|
||||
- Test: `test-frontend-helpers.js`
|
||||
- Test: `test-e2e-playwright.js`
|
||||
|
||||
- [ ] **Step 1: Write the unit test (add to test-frontend-helpers.js)**
|
||||
|
||||
Add before the `// ===== SUMMARY =====` line:
|
||||
|
||||
```javascript
|
||||
// ===== NODES.JS: buildNodesQuery =====
|
||||
console.log('\n=== nodes.js: buildNodesQuery ===');
|
||||
{
|
||||
const ctx = makeSandbox();
|
||||
loadInCtx(ctx, 'public/roles.js');
|
||||
loadInCtx(ctx, 'public/app.js');
|
||||
|
||||
// Provide required globals for nodes.js IIFE to execute
|
||||
ctx.registerPage = () => {};
|
||||
ctx.RegionFilter = { init: () => Promise.resolve(), onChange: () => () => {}, offChange: () => {}, getSelected: () => null, getRegionParam: () => '' };
|
||||
ctx.onWS = () => {};
|
||||
ctx.offWS = () => {};
|
||||
ctx.debouncedOnWS = () => () => {};
|
||||
ctx.invalidateApiCache = () => {};
|
||||
ctx.favStar = () => '';
|
||||
ctx.bindFavStars = () => {};
|
||||
ctx.getFavorites = () => [];
|
||||
ctx.isFavorite = () => false;
|
||||
ctx.connectWS = () => {};
|
||||
ctx.HopResolver = { init: () => {}, resolve: () => ({}), ready: () => false };
|
||||
ctx.initTabBar = () => {};
|
||||
ctx.debounce = (fn) => fn;
|
||||
ctx.copyToClipboard = () => {};
|
||||
ctx.api = () => Promise.resolve({});
|
||||
ctx.escapeHtml = (s) => s;
|
||||
ctx.timeAgo = () => '';
|
||||
ctx.formatTimestampWithTooltip = () => '';
|
||||
ctx.getTimestampMode = () => 'ago';
|
||||
ctx.CLIENT_TTL = {};
|
||||
ctx.qrcode = null;
|
||||
|
||||
try {
|
||||
const src = fs.readFileSync('public/nodes.js', 'utf8');
|
||||
vm.runInContext(src, ctx);
|
||||
for (const k of Object.keys(ctx.window)) ctx[k] = ctx.window[k];
|
||||
} catch (e) {
|
||||
console.log(' ⚠️ nodes.js sandbox load failed:', e.message.slice(0, 120));
|
||||
}
|
||||
|
||||
const buildNodesQuery = ctx.buildNodesQuery;
|
||||
|
||||
if (buildNodesQuery) {
|
||||
test('buildNodesQuery: all tab + no search = empty', () => {
|
||||
assert.strictEqual(buildNodesQuery('all', ''), '');
|
||||
});
|
||||
test('buildNodesQuery: repeater tab only', () => {
|
||||
assert.strictEqual(buildNodesQuery('repeater', ''), '?tab=repeater');
|
||||
});
|
||||
test('buildNodesQuery: search only (all tab)', () => {
|
||||
assert.strictEqual(buildNodesQuery('all', 'foo'), '?search=foo');
|
||||
});
|
||||
test('buildNodesQuery: tab + search combined', () => {
|
||||
assert.strictEqual(buildNodesQuery('companion', 'bar'), '?tab=companion&search=bar');
|
||||
});
|
||||
test('buildNodesQuery: null search treated as empty', () => {
|
||||
assert.strictEqual(buildNodesQuery('all', null), '');
|
||||
});
|
||||
test('buildNodesQuery: sensor tab', () => {
|
||||
assert.strictEqual(buildNodesQuery('sensor', ''), '?tab=sensor');
|
||||
});
|
||||
} else {
|
||||
console.log(' ⚠️ buildNodesQuery not exposed — skipping');
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
- [ ] **Step 2: Run test to verify it fails (or skips)**
|
||||
|
||||
```bash
|
||||
node test-frontend-helpers.js 2>&1 | grep -A3 "buildNodesQuery"
|
||||
```
|
||||
|
||||
Expected: `⚠️ buildNodesQuery not exposed — skipping`
|
||||
|
||||
- [ ] **Step 3: Add URL param reading and helpers to nodes.js**
|
||||
|
||||
**3a.** Add `buildNodesQuery` and `updateNodesUrl` functions inside the nodes.js IIFE, after the `TABS` definition (around line 86, before `function renderNodeTimestampHtml`):
|
||||
|
||||
```javascript
|
||||
function buildNodesQuery(tab, searchStr) {
|
||||
var parts = [];
|
||||
if (tab && tab !== 'all') parts.push('tab=' + encodeURIComponent(tab));
|
||||
if (searchStr) parts.push('search=' + encodeURIComponent(searchStr));
|
||||
return parts.length ? '?' + parts.join('&') : '';
|
||||
}
|
||||
window.buildNodesQuery = buildNodesQuery;
|
||||
|
||||
function updateNodesUrl() {
|
||||
history.replaceState(null, '', '#/nodes' + buildNodesQuery(activeTab, search));
|
||||
}
|
||||
```
|
||||
|
||||
**3b.** In the list-view branch of `init` (after the `return;` that ends the full-screen block at line 317), add URL param reading before `app.innerHTML`:
|
||||
|
||||
```javascript
|
||||
// Read URL params for list view (router strips query string from routeParam)
|
||||
const _listUrlParams = new URLSearchParams(location.hash.split('?')[1] || '');
|
||||
const _urlTab = _listUrlParams.get('tab');
|
||||
const _urlSearch = _listUrlParams.get('search');
|
||||
if (_urlTab && TABS.some(function(t) { return t.key === _urlTab; })) activeTab = _urlTab;
|
||||
if (_urlSearch) search = _urlSearch;
|
||||
|
||||
app.innerHTML = `<div class="nodes-page">
|
||||
```
|
||||
|
||||
**3c.** After `app.innerHTML = ...` (after the closing backtick at line ~330), populate the search input:
|
||||
|
||||
```javascript
|
||||
if (search) {
|
||||
var _si = document.getElementById('nodeSearch');
|
||||
if (_si) _si.value = search;
|
||||
}
|
||||
```
|
||||
|
||||
**3d.** In the search input event listener (around line 335), add `updateNodesUrl()`:
|
||||
|
||||
```javascript
|
||||
document.getElementById('nodeSearch').addEventListener('input', debounce(e => {
|
||||
search = e.target.value;
|
||||
updateNodesUrl();
|
||||
loadNodes();
|
||||
}, 250));
|
||||
```
|
||||
|
||||
**3e.** In the tab click handler inside `renderLeft` (around line 875), add `updateNodesUrl()`:
|
||||
|
||||
```javascript
|
||||
btn.addEventListener('click', () => { activeTab = btn.dataset.tab; updateNodesUrl(); loadNodes(); });
|
||||
```
|
||||
|
||||
- [ ] **Step 4: Run unit tests**
|
||||
|
||||
```bash
|
||||
node test-frontend-helpers.js 2>&1 | grep -E "(buildNodesQuery|✅|❌)" | grep -v "helpers"
|
||||
```
|
||||
|
||||
Expected: 6 passing `buildNodesQuery` tests.
|
||||
|
||||
- [ ] **Step 5: Write Playwright test (add to test-e2e-playwright.js)**
|
||||
|
||||
Add before the closing `await browser.close()` line:
|
||||
|
||||
```javascript
|
||||
// --- Group: Deep linking (#536) ---
|
||||
|
||||
// Test: nodes tab deep link
|
||||
await test('Nodes tab deep link restores active tab', async () => {
|
||||
await page.goto(BASE + '#/nodes?tab=repeater', { waitUntil: 'domcontentloaded' });
|
||||
await page.waitForSelector('.node-tab', { timeout: 8000 });
|
||||
const activeTab = await page.$('.node-tab.active');
|
||||
assert(activeTab, 'No active tab found');
|
||||
const tabText = await activeTab.textContent();
|
||||
assert(tabText.includes('Repeater'), `Expected Repeater tab active, got: ${tabText}`);
|
||||
const url = page.url();
|
||||
assert(url.includes('tab=repeater'), `URL should contain tab=repeater, got: ${url}`);
|
||||
});
|
||||
|
||||
// Test: nodes tab click updates URL
|
||||
await test('Nodes tab click updates URL', async () => {
|
||||
await page.goto(BASE + '#/nodes', { waitUntil: 'domcontentloaded' });
|
||||
await page.waitForSelector('.node-tab', { timeout: 8000 });
|
||||
const roomTab = await page.$('.node-tab[data-tab="room"]');
|
||||
if (roomTab) {
|
||||
await roomTab.click();
|
||||
await page.waitForTimeout(300);
|
||||
const url = page.url();
|
||||
assert(url.includes('tab=room'), `URL should contain tab=room after click, got: ${url}`);
|
||||
}
|
||||
});
|
||||
```
|
||||
|
||||
- [ ] **Step 6: Run full test suite**
|
||||
|
||||
```bash
|
||||
node test-frontend-helpers.js
|
||||
```
|
||||
|
||||
Expected: all tests pass.
|
||||
|
||||
- [ ] **Step 7: Commit**
|
||||
|
||||
```bash
|
||||
git add public/nodes.js test-frontend-helpers.js test-e2e-playwright.js
|
||||
git commit -m "feat: deep link nodes tab and search query (#536)"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Task 3: packets.js — timeWindow and region deep linking
|
||||
|
||||
**Files:**
|
||||
- Modify: `public/packets.js`
|
||||
- Test: `test-frontend-helpers.js`
|
||||
- Test: `test-e2e-playwright.js`
|
||||
|
||||
> Depends on Task 1 (RegionFilter.setSelected).
|
||||
|
||||
- [ ] **Step 1: Write the unit test**
|
||||
|
||||
Add to `test-frontend-helpers.js` before `// ===== SUMMARY =====`:
|
||||
|
||||
```javascript
|
||||
// ===== PACKETS.JS: buildPacketsUrl =====
|
||||
console.log('\n=== packets.js: buildPacketsUrl ===');
|
||||
{
|
||||
// Test the pure helper function
|
||||
// (loaded via packets.js after it exposes window.buildPacketsUrl)
|
||||
const ctx = makeSandbox();
|
||||
loadInCtx(ctx, 'public/roles.js');
|
||||
loadInCtx(ctx, 'public/app.js');
|
||||
|
||||
ctx.registerPage = () => {};
|
||||
ctx.RegionFilter = { init: () => Promise.resolve(), onChange: () => () => {}, offChange: () => {}, getSelected: () => null, getRegionParam: () => '', setSelected: () => {} };
|
||||
ctx.onWS = () => {};
|
||||
ctx.offWS = () => {};
|
||||
ctx.debouncedOnWS = () => () => {};
|
||||
ctx.invalidateApiCache = () => {};
|
||||
ctx.api = () => Promise.resolve({});
|
||||
ctx.observerMap = new Map();
|
||||
ctx.getParsedPath = () => [];
|
||||
ctx.getParsedDecoded = () => ({});
|
||||
ctx.clearParsedCache = () => {};
|
||||
ctx.escapeHtml = (s) => s;
|
||||
ctx.timeAgo = () => '';
|
||||
ctx.formatTimestampWithTooltip = () => '';
|
||||
ctx.getTimestampMode = () => 'ago';
|
||||
ctx.copyToClipboard = () => {};
|
||||
ctx.CLIENT_TTL = {};
|
||||
ctx.debounce = (fn) => fn;
|
||||
ctx.initTabBar = () => {};
|
||||
|
||||
try {
|
||||
const src = fs.readFileSync('public/packet-helpers.js', 'utf8');
|
||||
vm.runInContext(src, ctx);
|
||||
for (const k of Object.keys(ctx.window)) ctx[k] = ctx.window[k];
|
||||
const src2 = fs.readFileSync('public/packets.js', 'utf8');
|
||||
vm.runInContext(src2, ctx);
|
||||
for (const k of Object.keys(ctx.window)) ctx[k] = ctx.window[k];
|
||||
} catch (e) {
|
||||
console.log(' ⚠️ packets.js sandbox load failed:', e.message.slice(0, 120));
|
||||
}
|
||||
|
||||
const buildPacketsUrl = ctx.buildPacketsUrl;
|
||||
|
||||
if (buildPacketsUrl) {
|
||||
test('buildPacketsUrl: default (15min, no region) = bare #/packets', () => {
|
||||
assert.strictEqual(buildPacketsUrl(15, ''), '#/packets');
|
||||
});
|
||||
test('buildPacketsUrl: non-default timeWindow', () => {
|
||||
assert.strictEqual(buildPacketsUrl(60, ''), '#/packets?timeWindow=60');
|
||||
});
|
||||
test('buildPacketsUrl: region only', () => {
|
||||
assert.strictEqual(buildPacketsUrl(15, 'US-SFO'), '#/packets?region=US-SFO');
|
||||
});
|
||||
test('buildPacketsUrl: timeWindow + region', () => {
|
||||
assert.strictEqual(buildPacketsUrl(30, 'US-SFO,US-LAX'), '#/packets?timeWindow=30®ion=US-SFO%2CUS-LAX');
|
||||
});
|
||||
test('buildPacketsUrl: timeWindow=0 treated as default', () => {
|
||||
assert.strictEqual(buildPacketsUrl(0, ''), '#/packets');
|
||||
});
|
||||
} else {
|
||||
console.log(' ⚠️ buildPacketsUrl not exposed — skipping');
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
- [ ] **Step 2: Run to verify it skips**
|
||||
|
||||
```bash
|
||||
node test-frontend-helpers.js 2>&1 | grep -A2 "buildPacketsUrl"
|
||||
```
|
||||
|
||||
Expected: `⚠️ buildPacketsUrl not exposed — skipping`
|
||||
|
||||
- [ ] **Step 3: Add helpers and URL param reading to packets.js**
|
||||
|
||||
**3a.** Add `buildPacketsUrl` and `updatePacketsUrl` inside the packets.js IIFE, after the existing constants at the top (around line 36, after `let showHexHashes`):
|
||||
|
||||
```javascript
|
||||
function buildPacketsUrl(timeWindowMin, regionParam) {
|
||||
var parts = [];
|
||||
if (timeWindowMin && timeWindowMin !== 15) parts.push('timeWindow=' + timeWindowMin);
|
||||
if (regionParam) parts.push('region=' + encodeURIComponent(regionParam));
|
||||
return '#/packets' + (parts.length ? '?' + parts.join('&') : '');
|
||||
}
|
||||
window.buildPacketsUrl = buildPacketsUrl;
|
||||
|
||||
function updatePacketsUrl() {
|
||||
history.replaceState(null, '', buildPacketsUrl(savedTimeWindowMin, RegionFilter.getRegionParam()));
|
||||
}
|
||||
```
|
||||
|
||||
**3b.** In the `init` function (around line 263), add URL param reading after the existing `routeParam`/`directObsId` parsing and before `app.innerHTML`:
|
||||
|
||||
```javascript
|
||||
// Read URL params for filter state (router strips query from routeParam; read from location.hash)
|
||||
var _initUrlParams = new URLSearchParams(location.hash.split('?')[1] || '');
|
||||
var _urlTimeWindow = Number(_initUrlParams.get('timeWindow'));
|
||||
if (Number.isFinite(_urlTimeWindow) && _urlTimeWindow > 0) {
|
||||
savedTimeWindowMin = _urlTimeWindow;
|
||||
localStorage.setItem('meshcore-time-window', String(_urlTimeWindow));
|
||||
}
|
||||
var _urlRegion = _initUrlParams.get('region');
|
||||
if (_urlRegion) {
|
||||
RegionFilter.setSelected(_urlRegion.split(',').filter(Boolean));
|
||||
}
|
||||
|
||||
app.innerHTML = `<div class="split-layout detail-collapsed">
|
||||
```
|
||||
|
||||
**3c.** In the time window change handler (around line 865), add `updatePacketsUrl()`:
|
||||
|
||||
```javascript
|
||||
fTimeWindow.addEventListener('change', () => {
|
||||
savedTimeWindowMin = Number(fTimeWindow.value);
|
||||
if (!Number.isFinite(savedTimeWindowMin) || savedTimeWindowMin <= 0) savedTimeWindowMin = 15;
|
||||
localStorage.setItem('meshcore-time-window', fTimeWindow.value);
|
||||
updatePacketsUrl();
|
||||
loadPackets();
|
||||
});
|
||||
```
|
||||
|
||||
**3d.** In the RegionFilter.onChange callback (around line 719), add `updatePacketsUrl()`:
|
||||
|
||||
```javascript
|
||||
RegionFilter.onChange(function() { updatePacketsUrl(); loadPackets(); });
|
||||
```
|
||||
|
||||
- [ ] **Step 4: Run unit tests**
|
||||
|
||||
```bash
|
||||
node test-frontend-helpers.js 2>&1 | grep -E "(buildPacketsUrl|✅|❌)" | grep -v "helpers"
|
||||
```
|
||||
|
||||
Expected: 5 passing `buildPacketsUrl` tests.
|
||||
|
||||
- [ ] **Step 5: Write Playwright test (add to test-e2e-playwright.js, inside the deep-linking group)**
|
||||
|
||||
```javascript
|
||||
// Test: packets timeWindow deep link
|
||||
await test('Packets timeWindow deep link restores dropdown', async () => {
|
||||
await page.goto(BASE + '#/packets?timeWindow=60', { waitUntil: 'domcontentloaded' });
|
||||
await page.waitForSelector('#fTimeWindow', { timeout: 8000 });
|
||||
const val = await page.$eval('#fTimeWindow', el => el.value);
|
||||
assert(val === '60', `Expected timeWindow dropdown = 60, got: ${val}`);
|
||||
const url = page.url();
|
||||
assert(url.includes('timeWindow=60'), `URL should still contain timeWindow=60, got: ${url}`);
|
||||
});
|
||||
|
||||
// Test: timeWindow change updates URL
|
||||
await test('Packets timeWindow change updates URL', async () => {
|
||||
await page.goto(BASE + '#/packets', { waitUntil: 'domcontentloaded' });
|
||||
await page.waitForSelector('#fTimeWindow', { timeout: 8000 });
|
||||
await page.selectOption('#fTimeWindow', '30');
|
||||
await page.waitForTimeout(300);
|
||||
const url = page.url();
|
||||
assert(url.includes('timeWindow=30'), `URL should contain timeWindow=30 after change, got: ${url}`);
|
||||
});
|
||||
```
|
||||
|
||||
- [ ] **Step 6: Run full test suite**
|
||||
|
||||
```bash
|
||||
node test-frontend-helpers.js
|
||||
```
|
||||
|
||||
Expected: all tests pass.
|
||||
|
||||
- [ ] **Step 7: Commit**
|
||||
|
||||
```bash
|
||||
git add public/packets.js test-frontend-helpers.js test-e2e-playwright.js
|
||||
git commit -m "feat: deep link packets timeWindow and region filter (#536)"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Task 4: channels.js — node panel deep linking
|
||||
|
||||
**Files:**
|
||||
- Modify: `public/channels.js`
|
||||
|
||||
No unit tests needed for this task — the URL manipulation is side-effectful (DOM + History API). Playwright tests cover it.
|
||||
|
||||
- [ ] **Step 1: Write the Playwright test (add to test-e2e-playwright.js, inside the deep-linking group)**
|
||||
|
||||
```javascript
|
||||
// Test: channels selected channel survives refresh (already implemented, verify it still works)
|
||||
await test('Channels channel selection is URL-addressable', async () => {
|
||||
await page.goto(BASE + '#/channels', { waitUntil: 'domcontentloaded' });
|
||||
await page.waitForSelector('.ch-item', { timeout: 8000 }).catch(() => null);
|
||||
const firstChannel = await page.$('.ch-item');
|
||||
if (firstChannel) {
|
||||
await firstChannel.click();
|
||||
await page.waitForTimeout(500);
|
||||
const url = page.url();
|
||||
assert(url.includes('#/channels/') || url.includes('#/channels'), `URL should reflect channel selection, got: ${url}`);
|
||||
}
|
||||
});
|
||||
```
|
||||
|
||||
- [ ] **Step 2: Update `showNodeDetail` to write `?node=` to the URL**
|
||||
|
||||
In `channels.js`, in `showNodeDetail` (around line 171), add the URL update right after `selectedNode = name;`:
|
||||
|
||||
```javascript
|
||||
async function showNodeDetail(name) {
|
||||
_nodePanelTrigger = document.activeElement;
|
||||
if (_focusTrapCleanup) { _focusTrapCleanup(); _focusTrapCleanup = null; }
|
||||
const node = await lookupNode(name);
|
||||
selectedNode = name;
|
||||
var _chBase = selectedHash ? '#/channels/' + encodeURIComponent(selectedHash) : '#/channels';
|
||||
history.replaceState(null, '', _chBase + '?node=' + encodeURIComponent(name));
|
||||
|
||||
let panel = document.getElementById('chNodePanel');
|
||||
```
|
||||
|
||||
- [ ] **Step 3: Update `closeNodeDetail` to strip `?node=` from the URL**
|
||||
|
||||
In `closeNodeDetail` (around line 232), add URL restore right after `selectedNode = null;`:
|
||||
|
||||
```javascript
|
||||
function closeNodeDetail() {
|
||||
if (_focusTrapCleanup) { _focusTrapCleanup(); _focusTrapCleanup = null; }
|
||||
const panel = document.getElementById('chNodePanel');
|
||||
if (panel) panel.classList.remove('open');
|
||||
selectedNode = null;
|
||||
var _chRestoreUrl = selectedHash ? '#/channels/' + encodeURIComponent(selectedHash) : '#/channels';
|
||||
history.replaceState(null, '', _chRestoreUrl);
|
||||
if (_nodePanelTrigger && typeof _nodePanelTrigger.focus === 'function') {
|
||||
```
|
||||
|
||||
- [ ] **Step 4: Read `?node=` on init and auto-open panel**
|
||||
|
||||
In `channels.js` `init` (line 316), add URL param reading at the very top of the function (before `app.innerHTML`):
|
||||
|
||||
```javascript
|
||||
function init(app, routeParam) {
|
||||
var _initUrlParams = new URLSearchParams(location.hash.split('?')[1] || '');
|
||||
var _pendingNode = _initUrlParams.get('node');
|
||||
|
||||
app.innerHTML = `<div class="ch-layout">
|
||||
```
|
||||
|
||||
Then update the `loadChannels().then(...)` call (around line 350) to auto-open the node panel:
|
||||
|
||||
```javascript
|
||||
loadChannels().then(async function () {
|
||||
if (routeParam) await selectChannel(routeParam);
|
||||
if (_pendingNode) showNodeDetail(_pendingNode);
|
||||
});
|
||||
```
|
||||
|
||||
- [ ] **Step 5: Run full test suite**
|
||||
|
||||
```bash
|
||||
node test-frontend-helpers.js
|
||||
```
|
||||
|
||||
Expected: all tests pass (no channels unit tests, but regression tests still pass).
|
||||
|
||||
- [ ] **Step 6: Commit**
|
||||
|
||||
```bash
|
||||
git add public/channels.js
|
||||
git commit -m "feat: deep link channels node panel via ?node= (#536)"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Task 5: Run E2E Playwright tests
|
||||
|
||||
- [ ] **Step 1: Start the local server**
|
||||
|
||||
```bash
|
||||
cd cmd/server && go run . &
|
||||
```
|
||||
|
||||
Wait for it to be ready (check `http://localhost:3000`).
|
||||
|
||||
- [ ] **Step 2: Run Playwright tests**
|
||||
|
||||
```bash
|
||||
node test-e2e-playwright.js
|
||||
```
|
||||
|
||||
Expected: all tests pass including the new deep-linking group.
|
||||
|
||||
- [ ] **Step 3: If any deep-linking test fails, debug**
|
||||
|
||||
Common failures:
|
||||
- Selector `.node-tab.active` not found: check that nodes.js correctly reads `?tab=` from URL before rendering
|
||||
- `#fTimeWindow` value wrong: check that `savedTimeWindowMin` is overridden before the DOM is built
|
||||
- URL doesn't update: check `history.replaceState` calls in the change handlers
|
||||
|
||||
- [ ] **Step 4: Final commit (if any fixes needed)**
|
||||
|
||||
```bash
|
||||
git add public/nodes.js public/packets.js public/channels.js
|
||||
git commit -m "fix: deep linking E2E adjustments (#536)"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Self-Review
|
||||
|
||||
**Spec coverage check:**
|
||||
- ✅ P1: Nodes role tab → Task 2
|
||||
- ✅ P1: Packets time window → Task 3
|
||||
- ✅ P1: Packets region filter → Task 3 (depends on Task 1)
|
||||
- ✅ P1: Channels selected channel → Already implemented via `#/channels/{hash}` (verified in channels.js init line 351)
|
||||
- ✅ P1: Channels node panel → Task 4
|
||||
- ✅ P2+ items → explicitly out of scope per issue
|
||||
|
||||
**Architecture note:** The router in `app.js` strips the query string at line 422 (`const route = hash.split('?')[0]`) before computing `basePage` and `routeParam`. Therefore `#/nodes?tab=repeater` gives `routeParam=null` (not `?tab=repeater`). All pages must read URL params from `location.hash` directly, not from `routeParam`. This is the established pattern in `analytics.js` and `nodes.js` (section scroll).
|
||||
|
||||
**Placeholder scan:** No TBDs, no "implement later", all code blocks complete. ✅
|
||||
|
||||
**Type consistency:**
|
||||
- `buildNodesQuery(tab, searchStr)` — used consistently in `updateNodesUrl()` and in tests ✅
|
||||
- `buildPacketsUrl(timeWindowMin, regionParam)` — used consistently in `updatePacketsUrl()` and in tests ✅
|
||||
- `RegionFilter.setSelected(codesArray)` — defined in Task 1, used in Task 3 ✅
|
||||
@@ -1,204 +0,0 @@
|
||||
# Scope Stats Page — Design Spec
|
||||
|
||||
**Issue**: Kpa-clawbot/CoreScope#899
|
||||
**Date**: 2026-04-23
|
||||
**Branch target**: `master`
|
||||
|
||||
---
|
||||
|
||||
## Overview
|
||||
|
||||
Add a dedicated **Scopes** page showing scope/region statistics for MeshCore transport-route packets. Scope filtering in MeshCore uses `TRANSPORT_FLOOD` (route_type 0) and `TRANSPORT_DIRECT` (route_type 3) packets that carry two 16-bit transport codes. Code1 ≠ `0000` means the packet is region-scoped.
|
||||
|
||||
Feature 3 from the issue (default scope per client via advert) is **not implemented** — the advert format has no scope field in the current firmware.
|
||||
|
||||
---
|
||||
|
||||
## How Scopes Work (Firmware)
|
||||
|
||||
Transport code derivation (authoritative source: `meshcore-dev/MeshCore`):
|
||||
|
||||
```
|
||||
key = SHA256("#regionname")[:16] // TransportKeyStore::getAutoKeyFor
|
||||
Code1 = HMAC-SHA256(key, type || payload) // TransportKey::calcTransportCode, 2-byte output
|
||||
```
|
||||
|
||||
Code1 is a **per-message** HMAC — the same region produces a different Code1 for every message. Identifying a region from Code1 requires knowing the region name in advance and recomputing the HMAC.
|
||||
|
||||
`Code1 = 0000` is the "no scope" sentinel (also `FFFF` is reserved). Packets with route_type 1 or 2 (plain FLOOD/DIRECT) carry no transport codes.
|
||||
|
||||
---
|
||||
|
||||
## Config
|
||||
|
||||
Add `hashRegions` to the ingestor `Config` struct in `cmd/ingestor/config.go`, mirroring `hashChannels`:
|
||||
|
||||
```json
|
||||
"hashRegions": ["#belgium", "#eu", "#brussels"]
|
||||
```
|
||||
|
||||
Normalization (same rules as `hashChannels`):
|
||||
- Trim whitespace
|
||||
- Prepend `#` if missing
|
||||
- Skip empty entries
|
||||
|
||||
---
|
||||
|
||||
## Ingestor Changes
|
||||
|
||||
### Key derivation (`loadRegionKeys`)
|
||||
|
||||
```go
|
||||
func loadRegionKeys(cfg *Config) map[string][]byte {
|
||||
// key = first 16 bytes of SHA256("#regionname")
|
||||
}
|
||||
```
|
||||
|
||||
Returns `map[string][]byte` (region name → 16-byte HMAC key). Called once at startup, stored on the `Store`.
|
||||
|
||||
### Decoder: expose raw payload bytes
|
||||
|
||||
Add `PayloadRaw []byte` to `DecodedPacket` in `cmd/ingestor/decoder.go`. Populated from the raw `buf` slice at the payload offset — zero-copy slice, no allocation. This is the **encrypted** payload bytes, matching what the firmware feeds into `calcTransportCode`.
|
||||
|
||||
### At-ingest region matching
|
||||
|
||||
In `BuildPacketData`:
|
||||
- Skip if `route_type` not in `{0, 3}` → `scope_name` stays `nil`
|
||||
- If `Code1 == "0000"` → `scope_name = nil` (unscoped transport, no scope involvement)
|
||||
- If `Code1 != "0000"` → try each region key:
|
||||
```
|
||||
HMAC-SHA256(key, payloadType_byte || PayloadRaw) → first 2 bytes as uint16
|
||||
```
|
||||
First match → `scope_name = "#regionname"`. No match → `scope_name = ""` (unknown scope).
|
||||
|
||||
Add `ScopeName *string` to `PacketData`.
|
||||
|
||||
### MQTT-sourced packets (DM / CHAN paths in main.go)
|
||||
|
||||
These are injected directly without going through `BuildPacketData`. They use `route_type = 1` (FLOOD), so they are never transport-route packets. No scope matching needed for these paths.
|
||||
|
||||
---
|
||||
|
||||
## Database
|
||||
|
||||
### Migration
|
||||
|
||||
```sql
|
||||
ALTER TABLE transmissions ADD COLUMN scope_name TEXT DEFAULT NULL;
|
||||
CREATE INDEX idx_tx_scope_name ON transmissions(scope_name) WHERE scope_name IS NOT NULL;
|
||||
```
|
||||
|
||||
### Column semantics
|
||||
|
||||
| Value | Meaning |
|
||||
|-------|---------|
|
||||
| `NULL` | Either: non-transport-route packet (route_type 1/2), or transport-route with Code1=0000 |
|
||||
| `""` (empty string) | Transport-route, Code1 ≠ 0000, but no configured region matched |
|
||||
| `"#belgium"` | Matched named region |
|
||||
|
||||
The API stats queries resolve the NULL ambiguity by always filtering `route_type IN (0, 3)` first:
|
||||
- `unscoped` count = `route_type IN (0,3) AND scope_name IS NULL`
|
||||
- `scoped` count = `route_type IN (0,3) AND scope_name IS NOT NULL`
|
||||
|
||||
### Backfill
|
||||
|
||||
On migration, re-decode `raw_hex` for all rows where `route_type IN (0, 3)` and `scope_name IS NULL`. Run the same HMAC matching logic. Rows with `Code1 = 0000` remain `NULL`.
|
||||
|
||||
The backfill runs in the existing migration framework in `cmd/ingestor/db.go`. If no regions are configured, backfill is skipped.
|
||||
|
||||
---
|
||||
|
||||
## API
|
||||
|
||||
### `GET /api/scope-stats`
|
||||
|
||||
**Query param**: `window` — one of `1h`, `24h` (default), `7d`
|
||||
|
||||
**Time-series bucket sizes**:
|
||||
| Window | Bucket |
|
||||
|--------|--------|
|
||||
| `1h` | 5 min |
|
||||
| `24h` | 1 hour |
|
||||
| `7d` | 6 hours|
|
||||
|
||||
**Response**:
|
||||
```json
|
||||
{
|
||||
"window": "24h",
|
||||
"summary": {
|
||||
"transportTotal": 1240,
|
||||
"scoped": 890,
|
||||
"unscoped": 350,
|
||||
"unknownScope": 42
|
||||
},
|
||||
"byRegion": [
|
||||
{ "name": "#belgium", "count": 612 },
|
||||
{ "name": "#eu", "count": 236 }
|
||||
],
|
||||
"timeSeries": [
|
||||
{ "t": "2026-04-23T10:00:00Z", "scoped": 45, "unscoped": 18 },
|
||||
{ "t": "2026-04-23T11:00:00Z", "scoped": 51, "unscoped": 22 }
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
- `transportTotal` = `scoped + unscoped` (transport-route packets only)
|
||||
- `scoped` = Code1 ≠ 0000 (named + unknown)
|
||||
- `unscoped` = transport-route with Code1 = 0000
|
||||
- `unknownScope` = scoped but no region name matched (subset of `scoped`)
|
||||
- `byRegion` sorted by count descending, excludes unknown
|
||||
- `timeSeries` covers the full window at the bucket granularity
|
||||
|
||||
Route: `GET /api/scope-stats` registered in `cmd/server/routes.go`.
|
||||
No auth required (same as other read endpoints).
|
||||
TTL cache: 30 seconds (heavier query than `/api/stats`).
|
||||
|
||||
---
|
||||
|
||||
## Frontend
|
||||
|
||||
### Navigation
|
||||
|
||||
Add nav link between Channels and Nodes in `public/index.html`:
|
||||
```html
|
||||
<a href="#/scopes" class="nav-link" data-route="scopes">Scopes</a>
|
||||
```
|
||||
|
||||
### `public/scopes.js`
|
||||
|
||||
Three sections on the page:
|
||||
|
||||
**1. Summary cards** (reuse existing card CSS pattern from home/analytics pages)
|
||||
- Transport total, Scoped, Unscoped, Unknown scope
|
||||
- Each card shows count + percentage of transport total
|
||||
|
||||
**2. Per-region table**
|
||||
Columns: Region, Messages, % of Scoped
|
||||
Sorted by count descending. Last row: "Unknown scope" (italic) if unknownScope > 0.
|
||||
Shows "No regions configured" message if `byRegion` is empty and `unknownScope = 0`.
|
||||
|
||||
**3. Time-series chart**
|
||||
- Window selector: `1h / 24h / 7d` (default 24h)
|
||||
- Two lines: **Scoped** (blue) and **Unscoped** (grey)
|
||||
- Uses the same lightweight canvas chart pattern as other pages (no external chart lib)
|
||||
|
||||
### Cache buster
|
||||
|
||||
`scopes.js` added to the `__BUST__` entries in `index.html` in the same commit.
|
||||
|
||||
---
|
||||
|
||||
## Testing
|
||||
|
||||
- Unit tests for `loadRegionKeys`: normalization, key bytes match firmware SHA256 derivation
|
||||
- Unit tests for HMAC matching: known Code1 value computed from firmware logic, verified against Go implementation
|
||||
- Integration test: ingest a synthetic transport-route packet with a known region, assert `scope_name` column is set correctly
|
||||
- API test: `GET /api/scope-stats` returns correct summary counts against fixture DB
|
||||
|
||||
---
|
||||
|
||||
## Out of Scope
|
||||
|
||||
- Feature 3 (default scope per client via advert) — firmware has no advert scope field
|
||||
- Drill-down from region row to filtered packet list (deferred)
|
||||
- Private regions (`$`-prefixed) — use secret keys not publicly derivable
|
||||
@@ -1,162 +0,0 @@
|
||||
# v3.4.2 Manual Validation Checklist
|
||||
|
||||
**Tester:** _______________
|
||||
**Staging:** http://20.109.157.39
|
||||
**Prod:** https://analyzer.00id.net (READ ONLY — do not deploy until staging passes)
|
||||
**Browser:** Chrome + Firefox + Safari (mobile for responsive items)
|
||||
**Time estimate:** ~45 minutes
|
||||
|
||||
---
|
||||
|
||||
## 🔴 HIGH RISK — Test First
|
||||
|
||||
### 1. Zero-hop hash size display (#649, #653)
|
||||
- [ ] Go to Packets page, find a DIRECT advert (route_type=2, 0 hops)
|
||||
- [ ] Open packet detail — hash size should say "Unknown (zero-hop)" or be hidden, NOT "1 byte"
|
||||
- [ ] Check "Path Length" field shows `hash_count=0 (direct advert)`
|
||||
- [ ] Find a FLOOD advert with 0 hops — it SHOULD show hash size (this is different from DIRECT)
|
||||
|
||||
### 2. TRACE packet real path (#651, #656)
|
||||
- [ ] Send a trace from your companion
|
||||
- [ ] Watch Live map — the animated dot should only travel along completed hops (solid line)
|
||||
- [ ] Unreached hops should show as dashed/ghosted line at reduced opacity
|
||||
- [ ] If trace completes fully, entire path should be solid
|
||||
- [ ] Ghost line should auto-clean after ~10 seconds
|
||||
|
||||
### 3. "Paths through this node" accuracy (#655, #658)
|
||||
- [ ] Go to: http://20.109.157.39/#/nodes/c0dedad4208acb6cbe44b848943fc6d3c5d43cf38a21e48b43826a70862980e4
|
||||
- [ ] Check "Packets through this node" — packets should actually have this node in their path
|
||||
- [ ] Compare with a node that shares a 2-char prefix (e.g. C0ffee SF) — they should show DIFFERENT packets
|
||||
- [ ] Spot-check 3-4 packets: click through, verify path contains the node
|
||||
|
||||
### 4. Hash Stats "By Repeaters" (#652, #654)
|
||||
- [ ] Go to Analytics → Hash Stats
|
||||
- [ ] "By Repeaters" section should only show repeater-role nodes
|
||||
- [ ] Compare count in "Multi-Byte Hash Adopters" vs "By Repeaters" — adopters may include companions, repeaters section should not
|
||||
- [ ] Check that companions/rooms/sensors are excluded from the repeater distribution
|
||||
|
||||
### 5. Noise floor column chart (#600, #659)
|
||||
- [ ] Go to Analytics → RF Health
|
||||
- [ ] Noise floor chart should show vertical color-coded bars, NOT a line
|
||||
- [ ] Green bars (< -100 dBm), yellow (-100 to -85), red (≥ -85)
|
||||
- [ ] Hover over a bar — tooltip should show exact dBm + timestamp
|
||||
- [ ] Check with only 1 observer selected — chart should still render (division by zero edge case)
|
||||
- [ ] Reboot markers (if any) should show as vertical dashed lines
|
||||
|
||||
### 6. Async backfill on startup
|
||||
- [ ] SSH to staging: `ssh -i ~/.ssh/id_ed25519 runner@20.109.157.39`
|
||||
- [ ] `docker restart corescope-staging-go`
|
||||
- [ ] Within 30 seconds, hit `curl http://localhost:82/api/stats` — should return data (not hang)
|
||||
- [ ] Check `backfilling` and `backfillProgress` fields in stats response
|
||||
- [ ] Server should be serving HTTP while backfill runs in background
|
||||
|
||||
---
|
||||
|
||||
## 🟡 MEDIUM RISK — Features
|
||||
|
||||
### 7. Distance unit preference (#621, #646)
|
||||
- [ ] Go to Customizer → Display tab
|
||||
- [ ] Change distance unit to "mi" — all distances should show in miles
|
||||
- [ ] Change to "km" — all distances should show in km
|
||||
- [ ] Change to "auto" — should use locale (US = miles, EU = km)
|
||||
- [ ] Check Analytics page distances update after customizer change (no page reload needed)
|
||||
- [ ] Check Node detail → Neighbors table distances
|
||||
- [ ] Very small distances (<0.1 mi) should show in feet, not "0.0 mi"
|
||||
|
||||
### 8. Panel corner toggle (#608, #657)
|
||||
- [ ] Go to Live map page
|
||||
- [ ] Each panel (feed, legend, node detail) should have a small corner-toggle button
|
||||
- [ ] Click the button — panel should snap to next corner (TL → TR → BR → BL)
|
||||
- [ ] Refresh page — panel positions should persist (localStorage)
|
||||
- [ ] Move two panels to same corner — collision avoidance should skip to next free corner
|
||||
- [ ] On mobile viewport (<768px?) — toggle buttons should be hidden
|
||||
|
||||
### 9. Deep linking (#536, #618)
|
||||
- [ ] Navigate to Nodes page, click a node → URL should update with pubkey hash
|
||||
- [ ] Copy URL, open in new tab → should land on same node
|
||||
- [ ] Apply packet filters → URL hash should include filter params
|
||||
- [ ] Channels page: select a node → URL should reflect selection
|
||||
- [ ] Analytics tabs: switch tabs → URL should include tab name
|
||||
- [ ] Share a deep link with someone — they should see the same view
|
||||
|
||||
### 10. Sortable tables (#620, #638, #639)
|
||||
- [ ] Nodes list: click column headers — should sort ascending/descending
|
||||
- [ ] Sort indicator (arrow) should be visible on active column
|
||||
- [ ] Node detail → Neighbors table: sortable
|
||||
- [ ] Node detail → Observers table: sortable
|
||||
- [ ] Packets table: sortable by column headers
|
||||
|
||||
### 11. Channel color highlighting (#271, #607, #611)
|
||||
- [ ] Go to Channels page
|
||||
- [ ] Assign a color to a channel using the color picker
|
||||
- [ ] Feed rows should highlight with that color
|
||||
- [ ] Change color — should update immediately
|
||||
- [ ] Refresh — color assignment should persist
|
||||
|
||||
### 12. Collapsible panels (#606)
|
||||
- [ ] Live map: panels should have collapse/expand toggle
|
||||
- [ ] Collapsed panel should show just the header
|
||||
- [ ] State should persist across page navigations
|
||||
|
||||
### 13. Mobile accessibility (#630, #633)
|
||||
- [ ] Open staging on phone (or Chrome DevTools mobile emulation)
|
||||
- [ ] Touch targets should be at least 44×44px
|
||||
- [ ] Channel color picker should work on mobile
|
||||
- [ ] No horizontal scroll on any page
|
||||
- [ ] ARIA labels present on interactive elements (inspect with accessibility tools)
|
||||
|
||||
### 14. Map byte-size filter (#565, #568)
|
||||
- [ ] Go to Map page
|
||||
- [ ] Find the byte-size filter control
|
||||
- [ ] Filter by packet size — map should update to show only matching packets
|
||||
- [ ] Clear filter — all packets should return
|
||||
|
||||
### 15. API key security (#532, #628)
|
||||
- [ ] Try accessing a write endpoint without API key — should be blocked
|
||||
- [ ] Try with a weak key (e.g., "test", "admin") — should be rejected at startup
|
||||
- [ ] Check staging logs for API key warning: `docker logs corescope-staging-go 2>&1 | grep -i "apiKey\|api_key\|security"`
|
||||
|
||||
### 16. OpenAPI/Swagger (#530, #632)
|
||||
- [ ] Hit http://20.109.157.39/api/spec — should return valid OpenAPI 3.0 spec
|
||||
- [ ] Hit http://20.109.157.39/api/docs — should show Swagger UI
|
||||
- [ ] Try an endpoint from Swagger UI — should work
|
||||
|
||||
---
|
||||
|
||||
## 🟢 LOW RISK — Verify Quickly
|
||||
|
||||
### 17. View Route on Map button
|
||||
- [ ] Go to any packet detail page
|
||||
- [ ] Click "View Route on Map" — should navigate to map with route highlighted
|
||||
|
||||
### 18. og-image compression
|
||||
- [ ] Check page source or network tab — og-image.png should be < 300KB (was 1.1MB)
|
||||
|
||||
### 19. Prefix Tool
|
||||
- [ ] Analytics → Prefix Tool tab should load
|
||||
- [ ] Should show collision data
|
||||
|
||||
### 20. License
|
||||
- [ ] Check repo footer/LICENSE — should be GPL v3
|
||||
|
||||
### 21. Docker DISABLE_CADDY
|
||||
- [ ] (If testable) Set DISABLE_CADDY=true — Caddy should not start
|
||||
|
||||
### 22. Region filter on RF Health
|
||||
- [ ] RF Health tab: change region filter — charts should update
|
||||
|
||||
---
|
||||
|
||||
## 🏁 Sign-off
|
||||
|
||||
| Section | Status | Notes |
|
||||
|---------|--------|-------|
|
||||
| High risk (1-6) | ☐ | |
|
||||
| Medium risk (7-16) | ☐ | |
|
||||
| Low risk (17-22) | ☐ | |
|
||||
| **Overall** | ☐ | |
|
||||
|
||||
**Tested by:** _______________
|
||||
**Date:** _______________
|
||||
**Staging version:** `curl -s http://20.109.157.39/api/stats | jq .version`
|
||||
**Ready for release:** ☐ Yes / ☐ No — blockers: _______________
|
||||
@@ -1,309 +0,0 @@
|
||||
# v3.4.2 Release Test Plan
|
||||
|
||||
**Scope:** 90 commits since v3.4.1 (84 files, +14,931 / -1,005)
|
||||
**Categories:** 19 perf, 19 feat, 18 fix, 15 docs, 3 chore, 1 test, 1 refactor, 1 ci
|
||||
**Date:** 2026-04-08
|
||||
|
||||
---
|
||||
|
||||
## A. Automated Tests — Verify All Pass
|
||||
|
||||
### Go Backend
|
||||
```bash
|
||||
cd cmd/server && go test -race -count=1 ./...
|
||||
cd cmd/ingestor && go test -race -count=1 ./...
|
||||
```
|
||||
|
||||
**Test files (27 total):**
|
||||
|
||||
| File | Tests For |
|
||||
|------|-----------|
|
||||
| `cmd/server/decoder_test.go` | Hash size zero-hop, TRACE hopsCompleted, transport direct |
|
||||
| `cmd/server/backfill_async_test.go` | **NEW** — Async chunked backfill |
|
||||
| `cmd/server/eviction_test.go` | Memory eviction with runtime heap stats |
|
||||
| `cmd/server/apikey_security_test.go` | **NEW** — Weak/default API key rejection |
|
||||
| `cmd/server/openapi_test.go` | **NEW** — OpenAPI spec generation |
|
||||
| `cmd/server/routes_test.go` | Batch observations endpoint, subpaths-bulk, expand=observations |
|
||||
| `cmd/server/cache_invalidation_test.go` | cacheTTL config wiring |
|
||||
| `cmd/server/config_knobs_test.go` | cacheTTLSec helper |
|
||||
| `cmd/server/helpers_test.go` | constantTimeEqual, IsWeakAPIKey |
|
||||
| `cmd/server/obs_dedup_test.go` | UniqueObserverCount tracking |
|
||||
| `cmd/server/neighbor_*.go` (4 files) | Neighbor graph, affinity, persistence |
|
||||
| `cmd/server/perfstats_race_test.go` | Perf stats concurrency |
|
||||
| `cmd/server/resolve_context_test.go` | Resolved path filtering |
|
||||
| `cmd/server/advert_pubkey_test.go` | Advert pubkey tracking |
|
||||
| `cmd/server/db_test.go` | SQLite operations |
|
||||
| `cmd/server/config_test.go` | Config loading |
|
||||
| `cmd/server/coverage_test.go` | Coverage helpers |
|
||||
| `cmd/server/parity_test.go` | Go/JS decoder parity |
|
||||
| `cmd/server/websocket_test.go` | WebSocket broadcast |
|
||||
| `cmd/ingestor/decoder_test.go` | Ingestor decoder (hash size zero-hop) |
|
||||
| `cmd/ingestor/db_test.go` | Ingestor DB writes |
|
||||
| `cmd/ingestor/config_test.go` | Ingestor config |
|
||||
| `cmd/ingestor/main_test.go` | Ingestor entry |
|
||||
| `cmd/ingestor/coverage_boost_test.go` | Coverage helpers |
|
||||
|
||||
### Frontend Unit Tests
|
||||
```bash
|
||||
node test-packet-filter.js
|
||||
node test-aging.js
|
||||
node test-frontend-helpers.js
|
||||
node test-table-sort.js # NEW — shared table sort utility
|
||||
node test-channel-colors.js # NEW — channel color model
|
||||
node test-panel-corner.js # NEW — panel corner toggle
|
||||
node test-packets.js # NEW — packets page logic
|
||||
node test-hop-resolver-affinity.js
|
||||
node test-customizer-v2.js
|
||||
node test-live.js
|
||||
node test-live-dedup.js
|
||||
```
|
||||
|
||||
### E2E / Playwright
|
||||
```bash
|
||||
BASE_URL=http://localhost:13581 node test-e2e-playwright.js
|
||||
```
|
||||
|
||||
**Expected:** All existing tests pass + new tests added for sortable tables, deep linking, collapsible panels.
|
||||
|
||||
---
|
||||
|
||||
## B. Manual Browser Verification
|
||||
|
||||
### B1. HIGH RISK — Data Correctness
|
||||
|
||||
| # | Feature | Page | What to Check |
|
||||
|---|---------|------|---------------|
|
||||
| 1 | Hash size zero-hop | Packets detail | Find a direct (route_type=0) packet → hash_size should show 0, not a bogus computed value |
|
||||
| 2 | TRACE hopsCompleted | Packets detail / Live map | Find a TRACE packet → verify `hopsCompleted` shows in decoded JSON, live map shows real path length vs intended |
|
||||
| 3 | Transport direct hash size | Packets detail | Find route_type=RouteTransportDirect packet → hash_size=0 |
|
||||
| 4 | resolved_path filtering | Node detail → Paths tab | Verify path-hop candidates use resolved_path, no prefix collision false positives |
|
||||
| 5 | Hash stats repeater filter | Analytics → Hash Issues | "By Repeaters" should only show nodes with repeater role, not companions/sensors |
|
||||
| 6 | Async chunked backfill | Server startup | Start server with large DB → verify HTTP serves within 2 minutes, `X-CoreScope-Status: backfilling` header present, then transitions to `ready` |
|
||||
| 7 | Memory eviction (heap stats) | Admin/stats | Verify `/api/stats` shows realistic memory numbers from runtime heap, not the old estimation |
|
||||
| 8 | Distance/subpath/path-hop indexes | Analytics → Distances, Subpaths | Verify analytics data matches v3.4.1 output (no missing or extra entries) |
|
||||
| 9 | cacheTTL config wiring | Config | Set `cacheTTL.analyticsHashSizes: 300` in config → verify collision cache respects it |
|
||||
|
||||
### B2. MEDIUM RISK — User-Facing Features
|
||||
|
||||
| # | Feature | Page | What to Check |
|
||||
|---|---------|------|---------------|
|
||||
| 10 | Distance unit preference | Nodes detail, Map | Toggle km/mi/auto in settings → distances update throughout UI |
|
||||
| 11 | Panel corner toggle | Live page | Click corner toggle → panel moves to opposite corner, persists on reload |
|
||||
| 12 | Noise floor column chart | Analytics → RF | Verify column chart renders with color-coded thresholds, hover shows values |
|
||||
| 13 | Deep linking UI states | All pages | Navigate to `#/nodes?tab=neighbors`, `#/packets?observer=X`, `#/channels?node=Y` → correct state loads. Copy URL, open in new tab → same state |
|
||||
| 14 | Sortable tables | Nodes list, Neighbors, Observers | Click column headers → sort asc/desc, indicator arrow shows, persists correctly |
|
||||
| 15 | Channel color highlighting | Channels, Live feed | Assign color to channel → feed rows show that color, persists on reload |
|
||||
| 16 | Mobile accessibility | All pages (phone viewport) | Touch targets ≥44px, ARIA labels present, small viewport doesn't overflow |
|
||||
| 17 | Collapsible panels | Live map | Collapse/expand panels, medium breakpoint auto-collapses, state persists |
|
||||
| 18 | Byte-size map filter | Map page | Filter by byte size → markers update correctly |
|
||||
| 19 | OpenAPI/Swagger | `/api/spec`, `/api/docs` | Spec loads valid JSON, Swagger UI renders and all endpoints are documented |
|
||||
| 20 | API key rejection | Protected endpoints | Send weak key (e.g. "changeme", "test123") → 403 forbidden |
|
||||
| 21 | Channel color picker mobile | Channels (phone viewport) | Color picker usable on touch, doesn't overflow |
|
||||
| 22 | RF Health dashboard | Analytics → RF Health | Observer metrics grid, airtime charts, battery charts, error rate, region filter |
|
||||
| 23 | Prefix Tool tab | Analytics → Prefix Tool | Renders correctly, collision data consistent with Hash Issues |
|
||||
| 24 | View Route on Map | Packet detail page | Button works and shows route on map |
|
||||
|
||||
### B3. LOWER RISK — Performance (Verify No Regressions)
|
||||
|
||||
| # | Feature | Page | What to Check |
|
||||
|---|---------|------|---------------|
|
||||
| 25 | Incremental DOM diff | Packets (30K+) | Virtual scroll renders smoothly, no visible flicker |
|
||||
| 26 | Coalesced WS renders | Live page | Rapid packets don't cause frame drops (rAF coalescing) |
|
||||
| 27 | Marker reposition on zoom | Map | Zoom/resize → markers move smoothly, no full rebuild flash |
|
||||
| 28 | Parallel replay fetches | Live → VCR | Replay loads quickly (parallel observation fetches) |
|
||||
| 29 | Batch observations API | Packets page (sort change) | Changing sort fetches observations in batch (network tab: 1 POST not N GETs) |
|
||||
| 30 | Client-side network status | Analytics | No separate API call for network status |
|
||||
| 31 | og-image compression | `/og-image.png` | Verify loads, ~235KB not ~1.1MB |
|
||||
|
||||
---
|
||||
|
||||
## C. API Regression Tests
|
||||
|
||||
Run against a local server with test-fixture DB:
|
||||
|
||||
```bash
|
||||
BASE=http://localhost:13581
|
||||
|
||||
# Core endpoints — verify response shape
|
||||
curl -s "$BASE/api/stats" | jq '.totalPackets, .backfilling, .backfillProgress'
|
||||
curl -s "$BASE/api/packets?limit=5" | jq '.packets[0] | keys'
|
||||
curl -s "$BASE/api/packets?limit=5&expand=observations" | jq '.packets[0].observations | length'
|
||||
curl -s "$BASE/api/nodes?limit=5" | jq '.[0] | keys'
|
||||
|
||||
# New endpoints
|
||||
curl -s -X POST "$BASE/api/packets/observations" \
|
||||
-H 'Content-Type: application/json' \
|
||||
-d '{"hashes":["test123"]}' | jq '.results | keys'
|
||||
|
||||
curl -s "$BASE/api/analytics/subpaths-bulk?hops=A,B&hops=B,C" | jq 'keys'
|
||||
|
||||
curl -s "$BASE/api/observers/metrics/summary" | jq 'type'
|
||||
curl -s "$BASE/api/spec" | jq '.openapi'
|
||||
curl -s "$BASE/api/docs" | head -5 # Should return HTML
|
||||
|
||||
# Backfill status header
|
||||
curl -sI "$BASE/api/stats" | grep X-CoreScope-Status
|
||||
|
||||
# API key rejection
|
||||
curl -s -H 'X-API-Key: changeme' "$BASE/api/debug/vars" | jq '.error'
|
||||
curl -s -H 'X-API-Key: test' "$BASE/api/debug/vars" | jq '.error'
|
||||
|
||||
# Existing endpoints — verify not broken
|
||||
curl -s "$BASE/api/analytics/rf?timeRange=24h" | jq 'keys'
|
||||
curl -s "$BASE/api/analytics/hash-sizes" | jq 'type'
|
||||
curl -s "$BASE/api/analytics/distances" | jq 'type'
|
||||
curl -s "$BASE/api/analytics/subpaths" | jq 'type'
|
||||
curl -s "$BASE/api/channels" | jq 'type'
|
||||
curl -s "$BASE/api/config/client" | jq 'keys'
|
||||
```
|
||||
|
||||
### Expected response shape changes from v3.4.1:
|
||||
- `/api/stats` now includes `backfilling` (bool) and `backfillProgress` (float 0-1)
|
||||
- `/api/packets` no longer strips observations by default (lazy via `ExpandObservations` flag) — verify `observations` key absent without `expand=observations`
|
||||
- Decoded packets with route_type=direct now have `hashSize: 0`
|
||||
- TRACE packets now have `path.hopsCompleted` field
|
||||
|
||||
---
|
||||
|
||||
## D. Performance Regression Tests
|
||||
|
||||
### D1. Server Startup Time
|
||||
```bash
|
||||
# Start server with production-size DB (~30K packets)
|
||||
# Measure time from process start to first successful HTTP response
|
||||
time curl -s http://localhost:13581/api/stats > /dev/null
|
||||
# Target: < 2 minutes (async backfill requirement)
|
||||
```
|
||||
|
||||
### D2. Go Benchmarks
|
||||
```bash
|
||||
cd cmd/server && go test -bench=. -benchmem -count=3
|
||||
```
|
||||
Key benchmarks to compare with v3.4.1 baseline:
|
||||
- `BenchmarkQueryPackets` — should not regress with new indexes
|
||||
- `BenchmarkEvictStale` — batch removal from secondary indexes
|
||||
- `BenchmarkGetStoreStats` — 2 concurrent queries vs 5 sequential
|
||||
- `BenchmarkIngestNew` — additional index maintenance overhead
|
||||
|
||||
### D3. Frontend Performance
|
||||
- Open Packets page with 30K+ packets → measure initial render time (DevTools Performance tab)
|
||||
- Scroll rapidly through virtual scroll → should maintain 60fps
|
||||
- Switch sort column on packets → single batch POST, not N+1 GETs
|
||||
- Open Analytics page → no redundant API calls in network tab
|
||||
|
||||
### D4. Memory Usage
|
||||
- After loading 30K packets, check `/api/stats` memory figure
|
||||
- Compare with v3.4.1 baseline (prefix map cap at 8 chars should reduce ~10x)
|
||||
- Verify eviction triggers at correct memory threshold using runtime heap stats
|
||||
|
||||
---
|
||||
|
||||
## E. Infrastructure / Deployment Tests
|
||||
|
||||
### E1. Docker Build
|
||||
```bash
|
||||
docker build -t corescope:test .
|
||||
docker run --rm -p 13581:13581 corescope:test
|
||||
# Verify: container starts, HTTP responds, WebSocket connects
|
||||
```
|
||||
|
||||
### E2. GHCR Publish (CI)
|
||||
- Verify CI publishes to `ghcr.io/kpa-clawbot/corescope`
|
||||
- Verify tags: `edge` (master), `vX.Y.Z` (release)
|
||||
|
||||
### E3. Staging Deploy
|
||||
```bash
|
||||
# Verify staging compose works with standard ports
|
||||
docker compose -f docker-compose.staging.yml up -d
|
||||
# Check: no 3GB memory limit, standard port binding
|
||||
```
|
||||
|
||||
### E4. DISABLE_CADDY
|
||||
```bash
|
||||
docker run --rm -e DISABLE_CADDY=true corescope:test
|
||||
# Verify: Caddy not started, Go server serves directly
|
||||
```
|
||||
|
||||
### E5. CI Pipeline
|
||||
- Verify consolidated pipeline: build → publish GHCR → deploy staging
|
||||
- Verify runs on `meshcore-runner-2`
|
||||
|
||||
---
|
||||
|
||||
## F. Edge Cases & Integration Tests
|
||||
|
||||
### F1. Cross-Feature Interactions
|
||||
| Scenario | Risk |
|
||||
|----------|------|
|
||||
| Deep link to sorted table → sort state matches URL params | Medium |
|
||||
| Channel color + deep link → color persists in linked URL | Medium |
|
||||
| Panel corner toggle + collapsible panels → both states persist independently | Low |
|
||||
| Distance unit pref + neighbor table sort by distance → sort uses correct unit | Medium |
|
||||
| Noise floor chart + region filter → chart respects filter | Medium |
|
||||
| Byte-size map filter + channel color highlighting → both active simultaneously | Low |
|
||||
|
||||
### F2. Data Correctness Edge Cases
|
||||
| Scenario | Risk |
|
||||
|----------|------|
|
||||
| Zero-hop TRACE packet (should NOT reset hashSize — TRACE exemption) | **High** |
|
||||
| Packet with all hops having same 2-char prefix → resolved_path filtering prevents false match | **High** |
|
||||
| Node that switches role (repeater → companion) → hash stats updates | Medium |
|
||||
| Backfill interrupted mid-chunk (server restart) → resumes or completes on next start | Medium |
|
||||
| Empty DB startup → no errors, backfill completes instantly | Low |
|
||||
| DB with 100K+ packets → async backfill doesn't OOM, progress reported | **High** |
|
||||
|
||||
### F3. Concurrency / Race Conditions
|
||||
| Scenario | Risk |
|
||||
|----------|------|
|
||||
| Concurrent API requests during backfill → no deadlock (lock ordering documented) | **High** |
|
||||
| Eviction running while analytics query in progress → no stale pointer panic | **High** |
|
||||
| Multiple WebSocket clients during high ingest rate → coalesced broadcasts don't drop | Medium |
|
||||
| `time.NewTicker` cleanup on graceful shutdown (replaced `time.Tick`) | Low |
|
||||
|
||||
### F4. API Key Security
|
||||
| Scenario | Expected |
|
||||
|----------|----------|
|
||||
| No API key configured → write endpoints disabled | 403 "write endpoints disabled" |
|
||||
| Weak key "changeme" → rejected even if configured | 403 "forbidden" |
|
||||
| Timing-safe comparison → no timing oracle | Constant-time via `crypto/subtle` |
|
||||
| Empty string key → rejected | 401 "unauthorized" |
|
||||
|
||||
### F5. Browser Compatibility
|
||||
- Test on Chrome, Firefox, Safari (latest)
|
||||
- Test on iOS Safari, Android Chrome
|
||||
- Verify touch targets on mobile (44px minimum)
|
||||
- Verify ARIA labels with screen reader
|
||||
|
||||
---
|
||||
|
||||
## G. Test Coverage Gaps — Action Items
|
||||
|
||||
| Gap | Priority | Action |
|
||||
|-----|----------|--------|
|
||||
| No automated test for distance unit preference rendering | Medium | Add Playwright test |
|
||||
| No automated test for noise floor column chart | Medium | Add Playwright test |
|
||||
| No automated test for deep link state restoration | **High** | Add Playwright tests for each deep-linkable state |
|
||||
| No automated test for channel color persistence | Medium | `test-channel-colors.js` covers model; need Playwright for UI |
|
||||
| No automated test for mobile viewport behavior | Medium | Add Playwright test with mobile viewport |
|
||||
| No automated test for backfill progress header | Low | Add to `routes_test.go` |
|
||||
| No automated test for `time.NewTicker` cleanup | Low | Add to graceful shutdown test |
|
||||
| Observer metrics endpoints not covered in route tests | Medium | Add to `routes_test.go` |
|
||||
| Subpaths-bulk endpoint needs test | Medium | Add to `routes_test.go` |
|
||||
| No load test for batch observations endpoint (200 hash limit) | Low | Add boundary test |
|
||||
|
||||
---
|
||||
|
||||
## H. Release Checklist
|
||||
|
||||
- [ ] All Go tests pass with `-race` flag
|
||||
- [ ] All frontend unit tests pass
|
||||
- [ ] Playwright E2E tests pass
|
||||
- [ ] Manual browser verification (Section B) complete
|
||||
- [ ] API regression tests (Section C) pass
|
||||
- [ ] Docker build succeeds
|
||||
- [ ] Staging deploy verified
|
||||
- [ ] No console errors on any page
|
||||
- [ ] Performance spot-checks (Section D) — no regressions
|
||||
- [ ] Coverage badges updated (backend ≥85%, frontend ≥42%)
|
||||
- [ ] CHANGELOG updated
|
||||
- [ ] Tag `v3.4.2` created
|
||||
@@ -98,22 +98,6 @@ How long (in hours) before a node is marked degraded or silent:
|
||||
| `retention.nodeDays` | `7` | Nodes not seen in N days move to inactive |
|
||||
| `retention.packetDays` | `30` | Packets older than N days are deleted daily |
|
||||
|
||||
> **Note:** Lowering retention does **not** immediately shrink the database file.
|
||||
> SQLite marks deleted pages as free but does not return them to the filesystem
|
||||
> unless [incremental auto-vacuum](database.md) is enabled. New databases created
|
||||
> after v0.x.x have auto-vacuum enabled automatically. Existing databases require
|
||||
> a one-time migration — see the [Database](database.md) guide.
|
||||
|
||||
## Database
|
||||
|
||||
| Field | Default | Description |
|
||||
|-------|---------|-------------|
|
||||
| `db.vacuumOnStartup` | `false` | Run a one-time full `VACUUM` on startup to enable incremental auto-vacuum (blocks for minutes on large DBs) |
|
||||
| `db.incrementalVacuumPages` | `1024` | Free pages returned to the OS after each retention reaper cycle |
|
||||
|
||||
See [Database](database.md) for details on SQLite auto-vacuum, WAL, and manual maintenance.
|
||||
See [#919](https://github.com/Kpa-clawbot/CoreScope/issues/919) for background.
|
||||
|
||||
## Channel decryption
|
||||
|
||||
| Field | Description |
|
||||
@@ -166,9 +150,6 @@ Lower values = fresher data but more server load.
|
||||
|-------|---------|-------------|
|
||||
| `packetStore.maxMemoryMB` | `1024` | Maximum RAM for in-memory packet store |
|
||||
| `packetStore.estimatedPacketBytes` | `450` | Estimated bytes per packet (for memory budgeting) |
|
||||
| `packetStore.retentionHours` | `0` | Only load packets younger than N hours on startup and keep them in memory. **Set this on any instance with a large DB.** `0` = unlimited (loads full DB history — causes OOM on cold start when the DB has hundreds of thousands of paths). Recommended: same as `retention.packetDays × 24` (e.g. `168` for 7 days). |
|
||||
|
||||
> **Warning:** Leaving `retentionHours` at `0` on a large database will cause the server to OOM-kill itself on every cold start. The full packet history is loaded into the subpath index at startup; a DB with ~280K paths produces ~13M index entries before the process is killed.
|
||||
|
||||
## Timestamps
|
||||
|
||||
@@ -195,19 +176,6 @@ Lower values = fresher data but more server load.
|
||||
|
||||
Provide cert and key paths to enable HTTPS.
|
||||
|
||||
## Geographic filtering
|
||||
|
||||
```json
|
||||
"geo_filter": {
|
||||
"polygon": [[51.55, 3.80], [51.55, 5.90], [50.65, 5.90], [50.65, 3.80]],
|
||||
"bufferKm": 20
|
||||
}
|
||||
```
|
||||
|
||||
Restricts ingestion and API responses to nodes within the polygon plus a buffer margin. Remove the block to disable filtering. Nodes with no GPS fix always pass through.
|
||||
|
||||
See [Geographic Filtering](geofilter.md) for the full guide including the visual polygon builder and the prune script for cleaning up historical data.
|
||||
|
||||
## Home page
|
||||
|
||||
The `home` section customizes the onboarding experience. See `config.example.json` for the full structure including `steps`, `checklist`, and `footerLinks`.
|
||||
|
||||
@@ -66,12 +66,6 @@ Click **Import JSON** and paste a previously exported theme. The customizer load
|
||||
|
||||
Click **Reset to Defaults** to restore all settings to the built-in defaults.
|
||||
|
||||
## GeoFilter Builder
|
||||
|
||||
The Export tab includes a **GeoFilter Builder →** link. Click it to open a Leaflet map where you can draw a polygon boundary for your deployment area. The tool generates a `geo_filter` block you can paste directly into `config.json`.
|
||||
|
||||
See [Geographic Filtering](geofilter.md) for full details on what geo filtering does and how to configure it.
|
||||
|
||||
## How it works
|
||||
|
||||
The customizer writes CSS custom properties (variables) to override the defaults. Exported JSON maps directly to the `theme`, `nodeColors`, `branding`, and `home` sections of [config.json](configuration.md).
|
||||
|
||||
@@ -1,82 +0,0 @@
|
||||
# Database
|
||||
|
||||
CoreScope uses SQLite in WAL (Write-Ahead Log) mode for both the server
|
||||
(read-only) and ingestor (read-write).
|
||||
|
||||
## WAL mode
|
||||
|
||||
WAL mode allows concurrent reads while writes happen. It is set automatically
|
||||
at connection time via `PRAGMA journal_mode=WAL`. No operator action needed.
|
||||
|
||||
The WAL file (`meshcore.db-wal`) grows during writes and is checkpointed
|
||||
(merged back into the main DB) periodically and at clean shutdown.
|
||||
|
||||
## Auto-vacuum
|
||||
|
||||
By default, SQLite does not shrink the database file after `DELETE` operations.
|
||||
Deleted pages are marked free and reused by future writes, but the file size
|
||||
on disk stays the same. This is surprising when lowering retention settings.
|
||||
|
||||
### New databases
|
||||
|
||||
Databases created after this feature was added automatically have
|
||||
`PRAGMA auto_vacuum = INCREMENTAL`. After each retention reaper cycle,
|
||||
CoreScope runs `PRAGMA incremental_vacuum(N)` to return free pages to the OS.
|
||||
|
||||
### Existing databases
|
||||
|
||||
The `auto_vacuum` mode is stored in the database header and can only be changed
|
||||
by rewriting the entire file with `VACUUM`. CoreScope will **not** do this
|
||||
automatically — on large databases (5+ GB seen in the wild) it takes minutes
|
||||
and holds an exclusive lock.
|
||||
|
||||
**To migrate an existing database:**
|
||||
|
||||
1. At startup, CoreScope logs a warning:
|
||||
```
|
||||
[db] auto_vacuum=NONE — DB needs one-time VACUUM to enable incremental auto-vacuum.
|
||||
```
|
||||
2. **Ensure at least 2× the database file size in free disk space.** Full VACUUM
|
||||
creates a temporary copy of the entire file — on a near-full disk it will fail.
|
||||
3. Set `db.vacuumOnStartup: true` in your `config.json`:
|
||||
```json
|
||||
{
|
||||
"db": {
|
||||
"vacuumOnStartup": true
|
||||
}
|
||||
}
|
||||
```
|
||||
4. Restart CoreScope. The one-time `VACUUM` will run and block startup.
|
||||
5. After migration, remove or set `vacuumOnStartup: false` — it's not needed again.
|
||||
|
||||
### Configuration
|
||||
|
||||
| Field | Default | Description |
|
||||
|-------|---------|-------------|
|
||||
| `db.vacuumOnStartup` | `false` | One-time full VACUUM to enable incremental auto-vacuum |
|
||||
| `db.incrementalVacuumPages` | `1024` | Pages returned to OS per reaper cycle |
|
||||
|
||||
## Manual VACUUM
|
||||
|
||||
You can also run a manual vacuum from the SQLite CLI:
|
||||
|
||||
```bash
|
||||
sqlite3 data/meshcore.db "PRAGMA auto_vacuum = INCREMENTAL; VACUUM;"
|
||||
```
|
||||
|
||||
This is equivalent to `vacuumOnStartup: true` but can be done offline.
|
||||
|
||||
> ⚠️ Full VACUUM requires **2× the database file size** in free disk space (it
|
||||
> creates a temporary copy). Check with `ls -lh data/meshcore.db` before running.
|
||||
|
||||
## Checking current mode
|
||||
|
||||
```bash
|
||||
sqlite3 data/meshcore.db "PRAGMA auto_vacuum;"
|
||||
```
|
||||
|
||||
- `0` = NONE (default for old databases)
|
||||
- `1` = FULL (automatic, but slower writes)
|
||||
- `2` = INCREMENTAL (recommended — CoreScope triggers vacuum after deletes)
|
||||
|
||||
See [#919](https://github.com/Kpa-clawbot/CoreScope/issues/919) for background on this feature.
|
||||
@@ -52,14 +52,3 @@ CoreScope uses URL hashes for deep linking. Copy the URL from your browser — i
|
||||
- `#/packets/abc123` — a specific packet
|
||||
- `#/analytics?tab=collisions` — the hash issues tab
|
||||
- `#/nodes/pubkey123` — a specific node's detail page
|
||||
|
||||
### Where is the API documentation?
|
||||
|
||||
CoreScope auto-generates an OpenAPI 3.0 specification from its route definitions:
|
||||
|
||||
- **Interactive docs (Swagger UI):** `/api/docs` — browse and test all 40+ endpoints from your browser
|
||||
- **Machine-readable spec:** `/api/spec` — import into Postman, Insomnia, or any OpenAPI tool
|
||||
|
||||
The spec is always in sync with the running server. No manual maintenance needed.
|
||||
|
||||
On the public instance: [analyzer.00id.net/api/docs](https://analyzer.00id.net/api/docs)
|
||||
|
||||
@@ -1,114 +0,0 @@
|
||||
# Geographic Filtering
|
||||
|
||||
CoreScope supports geographic filtering to restrict which nodes are ingested and returned in API responses. This is useful for public-facing deployments that should only show activity in a specific region.
|
||||
|
||||
## How it works
|
||||
|
||||
Geographic filtering operates at two levels:
|
||||
|
||||
- **Ingest time** — ADVERT packets carrying GPS coordinates are rejected by the ingestor if the node falls outside the configured area. The node never reaches the database.
|
||||
- **API responses** — Nodes already in the database are filtered from the `/api/nodes` response if they fall outside the area. This covers nodes ingested before the filter was configured.
|
||||
|
||||
Nodes with no GPS fix (`lat=0, lon=0` or missing coordinates) always pass the filter regardless of configuration.
|
||||
|
||||
## Configuration
|
||||
|
||||
Add a `geo_filter` block to `config.json`:
|
||||
|
||||
```json
|
||||
"geo_filter": {
|
||||
"polygon": [
|
||||
[51.55, 3.80],
|
||||
[51.55, 5.90],
|
||||
[50.65, 5.90],
|
||||
[50.65, 3.80]
|
||||
],
|
||||
"bufferKm": 20
|
||||
}
|
||||
```
|
||||
|
||||
| Field | Type | Description |
|
||||
|-------|------|-------------|
|
||||
| `polygon` | `[[lat, lon], ...]` | Array of at least 3 coordinate pairs defining the boundary |
|
||||
| `bufferKm` | number | Extra distance (km) around the polygon edge that is also accepted. `0` = exact boundary |
|
||||
|
||||
Both the server and the ingestor read `geo_filter` from `config.json`. Restart both after changing this section.
|
||||
|
||||
To disable filtering entirely, remove the `geo_filter` block.
|
||||
|
||||
### Legacy bounding box
|
||||
|
||||
An older bounding box format is also supported as a fallback when no `polygon` is present:
|
||||
|
||||
```json
|
||||
"geo_filter": {
|
||||
"latMin": 50.65,
|
||||
"latMax": 51.55,
|
||||
"lonMin": 3.80,
|
||||
"lonMax": 5.90
|
||||
}
|
||||
```
|
||||
|
||||
Prefer the polygon format — it supports irregular shapes and the `bufferKm` margin.
|
||||
|
||||
## API endpoint
|
||||
|
||||
The current geo filter configuration is exposed at:
|
||||
|
||||
```
|
||||
GET /api/config/geo-filter
|
||||
```
|
||||
|
||||
The frontend reads this endpoint to display the active filter. No authentication is required (the endpoint returns config, not private data).
|
||||
|
||||
## GeoFilter Builder
|
||||
|
||||
The simplest way to create a polygon is the included visual builder:
|
||||
|
||||
**File:** `tools/geofilter-builder.html`
|
||||
|
||||
Open it directly in a browser — it runs entirely client-side, no server required:
|
||||
|
||||
```bash
|
||||
# From the project root
|
||||
open tools/geofilter-builder.html # macOS
|
||||
xdg-open tools/geofilter-builder.html # Linux
|
||||
start tools/geofilter-builder.html # Windows
|
||||
```
|
||||
|
||||
**Workflow:**
|
||||
|
||||
1. The map opens centered on Belgium by default. Navigate to your region.
|
||||
2. Click on the map to add polygon vertices. Each click adds a numbered point.
|
||||
3. Add at least 3 points to form a closed polygon.
|
||||
4. Adjust **Buffer km** (default 20) to add a margin around the polygon edge.
|
||||
5. The generated JSON block appears at the bottom of the page — copy it directly into `config.json`.
|
||||
6. Use **↩ Undo** to remove the last point, **✕ Clear** to start over.
|
||||
|
||||
The output is a complete `{ "geo_filter": { ... } }` block ready to paste into `config.json`.
|
||||
|
||||
## Cleaning up historical nodes
|
||||
|
||||
The ingestor prevents new out-of-bounds nodes from being ingested, but it does not retroactively remove nodes that were stored before the filter was configured. For that, use the prune script.
|
||||
|
||||
**File:** `scripts/prune-nodes-outside-geo-filter.py`
|
||||
|
||||
```bash
|
||||
# Dry run — shows what would be deleted without making any changes
|
||||
python3 scripts/prune-nodes-outside-geo-filter.py --dry-run
|
||||
|
||||
# Default paths: /app/data/meshcore.db and /app/config.json
|
||||
python3 scripts/prune-nodes-outside-geo-filter.py
|
||||
|
||||
# Custom paths
|
||||
python3 scripts/prune-nodes-outside-geo-filter.py /path/to/meshcore.db \
|
||||
--config /path/to/config.json
|
||||
|
||||
# In Docker — run inside the container
|
||||
docker exec -it meshcore-analyzer \
|
||||
python3 /app/scripts/prune-nodes-outside-geo-filter.py --dry-run
|
||||
```
|
||||
|
||||
The script reads `geo_filter.polygon` and `geo_filter.bufferKm` from config, lists the nodes that fall outside, then asks for `yes` confirmation before deleting. Nodes without coordinates are always kept.
|
||||
|
||||
This is a **one-time migration tool** — run it once after first configuring `geo_filter` to clean up pre-filter data. The ingestor handles all subsequent filtering automatically at ingest time.
|
||||
@@ -1,98 +0,0 @@
|
||||
// Package channel provides MeshCore hashtag channel key derivation,
|
||||
// decryption (HMAC-SHA256 MAC + AES-128-ECB), and plaintext parsing.
|
||||
package channel
|
||||
|
||||
import (
|
||||
"crypto/aes"
|
||||
"crypto/hmac"
|
||||
"crypto/sha256"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// DeriveKey derives an AES-128 key from a channel name (e.g. "#wardriving").
|
||||
// Returns 16 bytes: SHA-256(channelName)[:16].
|
||||
func DeriveKey(channelName string) []byte {
|
||||
h := sha256.Sum256([]byte(channelName))
|
||||
return h[:16]
|
||||
}
|
||||
|
||||
// ChannelHash returns the 1-byte channel hash used as the first byte of GRP_TXT payloads.
|
||||
// It is the first byte of SHA-256 of the 16-byte key.
|
||||
func ChannelHash(key []byte) byte {
|
||||
h := sha256.Sum256(key)
|
||||
return h[0]
|
||||
}
|
||||
|
||||
// Decrypt verifies the 2-byte HMAC-SHA256 MAC and performs AES-128-ECB decryption.
|
||||
// mac must be exactly 2 bytes. ciphertext must be a multiple of 16 bytes.
|
||||
// Returns the plaintext and true if MAC verification succeeded, or nil and false otherwise.
|
||||
func Decrypt(key []byte, mac []byte, ciphertext []byte) ([]byte, bool) {
|
||||
if len(key) != 16 || len(mac) != 2 || len(ciphertext) == 0 || len(ciphertext)%aes.BlockSize != 0 {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// 32-byte channel secret: 16-byte key + 16 zero bytes
|
||||
channelSecret := make([]byte, 32)
|
||||
copy(channelSecret, key)
|
||||
|
||||
// Verify HMAC-SHA256 (first 2 bytes must match)
|
||||
h := hmac.New(sha256.New, channelSecret)
|
||||
h.Write(ciphertext)
|
||||
calculatedMac := h.Sum(nil)
|
||||
if calculatedMac[0] != mac[0] || calculatedMac[1] != mac[1] {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// AES-128-ECB decrypt
|
||||
block, err := aes.NewCipher(key)
|
||||
if err != nil {
|
||||
return nil, false
|
||||
}
|
||||
plaintext := make([]byte, len(ciphertext))
|
||||
for i := 0; i < len(ciphertext); i += aes.BlockSize {
|
||||
block.Decrypt(plaintext[i:i+aes.BlockSize], ciphertext[i:i+aes.BlockSize])
|
||||
}
|
||||
|
||||
return plaintext, true
|
||||
}
|
||||
|
||||
// ParsePlaintext parses decrypted plaintext into timestamp, sender, and message.
|
||||
// Format: timestamp(4 LE) + flags(1) + "sender: message\0..."
|
||||
func ParsePlaintext(plaintext []byte) (timestamp uint32, sender string, message string, err error) {
|
||||
if len(plaintext) < 5 {
|
||||
return 0, "", "", fmt.Errorf("plaintext too short (%d bytes)", len(plaintext))
|
||||
}
|
||||
|
||||
timestamp = binary.LittleEndian.Uint32(plaintext[0:4])
|
||||
text := string(plaintext[5:])
|
||||
if idx := strings.IndexByte(text, 0); idx >= 0 {
|
||||
text = text[:idx]
|
||||
}
|
||||
|
||||
if !utf8.ValidString(text) || countNonPrintable(text) > 2 {
|
||||
return 0, "", "", fmt.Errorf("decrypted text contains non-printable characters")
|
||||
}
|
||||
|
||||
// Parse "sender: message" format
|
||||
if colonIdx := strings.Index(text, ": "); colonIdx > 0 && colonIdx < 50 {
|
||||
potentialSender := text[:colonIdx]
|
||||
if !strings.ContainsAny(potentialSender, ":[]") {
|
||||
return timestamp, potentialSender, text[colonIdx+2:], nil
|
||||
}
|
||||
}
|
||||
|
||||
return timestamp, "", text, nil
|
||||
}
|
||||
|
||||
func countNonPrintable(s string) int {
|
||||
count := 0
|
||||
for _, r := range s {
|
||||
if r < 32 && r != '\n' && r != '\r' && r != '\t' {
|
||||
count++
|
||||
}
|
||||
}
|
||||
return count
|
||||
}
|
||||
@@ -1,161 +0,0 @@
|
||||
package channel
|
||||
|
||||
import (
|
||||
"crypto/aes"
|
||||
"crypto/hmac"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestDeriveKey(t *testing.T) {
|
||||
key := DeriveKey("#wardriving")
|
||||
h := sha256.Sum256([]byte("#wardriving"))
|
||||
expected := h[:16]
|
||||
if len(key) != 16 {
|
||||
t.Fatalf("key length %d, want 16", len(key))
|
||||
}
|
||||
for i := range key {
|
||||
if key[i] != expected[i] {
|
||||
t.Fatalf("DeriveKey mismatch at byte %d", i)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestChannelHash(t *testing.T) {
|
||||
key := DeriveKey("#wardriving")
|
||||
ch := ChannelHash(key)
|
||||
h := sha256.Sum256(key)
|
||||
if ch != h[0] {
|
||||
t.Fatalf("ChannelHash %02x, want %02x", ch, h[0])
|
||||
}
|
||||
}
|
||||
|
||||
func testECBEncrypt(t *testing.T, key, plaintext []byte) []byte {
|
||||
t.Helper()
|
||||
block, err := aes.NewCipher(key)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ct := make([]byte, len(plaintext))
|
||||
for i := 0; i < len(plaintext); i += aes.BlockSize {
|
||||
block.Encrypt(ct[i:i+aes.BlockSize], plaintext[i:i+aes.BlockSize])
|
||||
}
|
||||
return ct
|
||||
}
|
||||
|
||||
func testComputeMAC(key, ciphertext []byte) []byte {
|
||||
secret := make([]byte, 32)
|
||||
copy(secret, key)
|
||||
h := hmac.New(sha256.New, secret)
|
||||
h.Write(ciphertext)
|
||||
sum := h.Sum(nil)
|
||||
return sum[:2]
|
||||
}
|
||||
|
||||
func TestDecryptValidMAC(t *testing.T) {
|
||||
key := DeriveKey("#test")
|
||||
padded := make([]byte, 16)
|
||||
copy(padded, []byte{0x01, 0x00, 0x00, 0x00, 0x00})
|
||||
ciphertext := testECBEncrypt(t, key, padded)
|
||||
mac := testComputeMAC(key, ciphertext)
|
||||
|
||||
result, ok := Decrypt(key, mac, ciphertext)
|
||||
if !ok {
|
||||
t.Fatal("Decrypt returned false for valid MAC")
|
||||
}
|
||||
if len(result) != 16 {
|
||||
t.Fatalf("result length %d, want 16", len(result))
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecryptInvalidMAC(t *testing.T) {
|
||||
key := DeriveKey("#test")
|
||||
ciphertext := make([]byte, 16)
|
||||
mac := []byte{0xFF, 0xFF}
|
||||
_, ok := Decrypt(key, mac, ciphertext)
|
||||
if ok {
|
||||
t.Fatal("Decrypt should reject wrong MAC")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecryptWrongChannel(t *testing.T) {
|
||||
key1 := DeriveKey("#channel1")
|
||||
key2 := DeriveKey("#channel2")
|
||||
padded := make([]byte, 16)
|
||||
copy(padded, []byte{0x01, 0x00, 0x00, 0x00, 0x00, 'h', 'i'})
|
||||
ciphertext := testECBEncrypt(t, key1, padded)
|
||||
mac := testComputeMAC(key1, ciphertext)
|
||||
|
||||
_, ok := Decrypt(key2, mac, ciphertext)
|
||||
if ok {
|
||||
t.Fatal("Decrypt should reject wrong channel key")
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsePlaintext(t *testing.T) {
|
||||
plain := []byte{100, 0, 0, 0, 0}
|
||||
plain = append(plain, []byte("Alice: Hello\x00")...)
|
||||
ts, sender, msg, err := ParsePlaintext(plain)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if ts != 100 {
|
||||
t.Fatalf("timestamp %d, want 100", ts)
|
||||
}
|
||||
if sender != "Alice" {
|
||||
t.Fatalf("sender %q, want Alice", sender)
|
||||
}
|
||||
if msg != "Hello" {
|
||||
t.Fatalf("message %q, want Hello", msg)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsePlaintextNoSender(t *testing.T) {
|
||||
plain := []byte{1, 0, 0, 0, 0}
|
||||
plain = append(plain, []byte("just a message\x00")...)
|
||||
_, sender, msg, err := ParsePlaintext(plain)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if sender != "" {
|
||||
t.Fatalf("sender %q, want empty", sender)
|
||||
}
|
||||
if msg != "just a message" {
|
||||
t.Fatalf("message %q", msg)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeriveKeyMatchesIngestor(t *testing.T) {
|
||||
channelName := "#MeshCore"
|
||||
key := DeriveKey(channelName)
|
||||
hexKey := hex.EncodeToString(key)
|
||||
h := sha256.Sum256([]byte(channelName))
|
||||
expected := hex.EncodeToString(h[:16])
|
||||
if hexKey != expected {
|
||||
t.Fatalf("key hex %s != expected %s", hexKey, expected)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRoundTrip(t *testing.T) {
|
||||
key := DeriveKey("#test")
|
||||
original := make([]byte, 32)
|
||||
copy(original, []byte{0x64, 0x00, 0x00, 0x00, 0x00})
|
||||
copy(original[5:], []byte("Bob: world\x00"))
|
||||
|
||||
ciphertext := testECBEncrypt(t, key, original)
|
||||
mac := testComputeMAC(key, ciphertext)
|
||||
|
||||
plaintext, ok := Decrypt(key, mac, ciphertext)
|
||||
if !ok {
|
||||
t.Fatal("round-trip MAC failed")
|
||||
}
|
||||
|
||||
ts, sender, msg, err := ParsePlaintext(plaintext)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if ts != 100 || sender != "Bob" || msg != "world" {
|
||||
t.Fatalf("got ts=%d sender=%q msg=%q", ts, sender, msg)
|
||||
}
|
||||
}
|
||||
@@ -1,3 +0,0 @@
|
||||
module github.com/meshcore-analyzer/channel
|
||||
|
||||
go 1.22
|
||||
@@ -1,3 +0,0 @@
|
||||
module github.com/meshcore-analyzer/packetpath
|
||||
|
||||
go 1.22
|
||||
@@ -1,76 +0,0 @@
|
||||
// Package packetpath provides shared helpers for extracting path hops from
|
||||
// raw MeshCore packet hex bytes.
|
||||
package packetpath
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// DecodePathFromRawHex extracts the header path hops directly from raw hex bytes.
|
||||
// This is the authoritative path that matches what's in raw_hex, as opposed to
|
||||
// decoded.Path.Hops which may be overwritten for TRACE packets (issue #886).
|
||||
//
|
||||
// WARNING: This function returns the literal header path bytes regardless of
|
||||
// payload type. For TRACE packets these bytes are SNR values, NOT hop hashes.
|
||||
// Callers that may receive TRACE packets MUST check PathBytesAreHops(payloadType)
|
||||
// first, or use the safer DecodeHopsForPayload wrapper.
|
||||
func DecodePathFromRawHex(rawHex string) ([]string, error) {
|
||||
buf, err := hex.DecodeString(rawHex)
|
||||
if err != nil || len(buf) < 2 {
|
||||
return nil, fmt.Errorf("invalid or too-short hex")
|
||||
}
|
||||
|
||||
headerByte := buf[0]
|
||||
offset := 1
|
||||
if IsTransportRoute(int(headerByte & 0x03)) {
|
||||
if len(buf) < offset+4 {
|
||||
return nil, fmt.Errorf("too short for transport codes")
|
||||
}
|
||||
offset += 4
|
||||
}
|
||||
if offset >= len(buf) {
|
||||
return nil, fmt.Errorf("too short for path byte")
|
||||
}
|
||||
|
||||
pathByte := buf[offset]
|
||||
offset++
|
||||
|
||||
hashSize := int(pathByte>>6) + 1
|
||||
hashCount := int(pathByte & 0x3F)
|
||||
|
||||
hops := make([]string, 0, hashCount)
|
||||
for i := 0; i < hashCount; i++ {
|
||||
start := offset + i*hashSize
|
||||
end := start + hashSize
|
||||
if end > len(buf) {
|
||||
break
|
||||
}
|
||||
hops = append(hops, strings.ToUpper(hex.EncodeToString(buf[start:end])))
|
||||
}
|
||||
return hops, nil
|
||||
}
|
||||
|
||||
// DecodeHopsForPayload returns the header path hops only when the payload type's
|
||||
// header bytes are actually route hops (i.e. PathBytesAreHops(payloadType) is true).
|
||||
// For TRACE packets it returns (nil, ErrPayloadHasNoHeaderHops) so the caller is
|
||||
// forced to source hops from the decoded payload instead.
|
||||
//
|
||||
// Prefer this over DecodePathFromRawHex when the payload type is known.
|
||||
func DecodeHopsForPayload(rawHex string, payloadType byte) ([]string, error) {
|
||||
if !PathBytesAreHops(payloadType) {
|
||||
return nil, ErrPayloadHasNoHeaderHops
|
||||
}
|
||||
return DecodePathFromRawHex(rawHex)
|
||||
}
|
||||
|
||||
// ErrPayloadHasNoHeaderHops is returned by DecodeHopsForPayload when the
|
||||
// payload type repurposes the raw_hex header path bytes (e.g. TRACE → SNR values).
|
||||
var ErrPayloadHasNoHeaderHops = errPayloadHasNoHeaderHops{}
|
||||
|
||||
type errPayloadHasNoHeaderHops struct{}
|
||||
|
||||
func (errPayloadHasNoHeaderHops) Error() string {
|
||||
return "payload type repurposes header path bytes; source hops from decoded payload"
|
||||
}
|
||||
@@ -1,150 +0,0 @@
|
||||
package packetpath
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestDecodePathFromRawHex_Basic(t *testing.T) {
|
||||
// Build a simple FLOOD packet (route_type=1) with 2 hops of hashSize=1
|
||||
// header: route_type=1, payload_type=2 (TXT_MSG), version=0 → 0b00_0010_01 = 0x09
|
||||
// path byte: hashSize=1 (bits 7-6 = 0), hashCount=2 (bits 5-0 = 2) → 0x02
|
||||
// hops: AB, CD
|
||||
// payload: some bytes
|
||||
raw := "0902ABCD" + "DEADBEEF"
|
||||
hops, err := DecodePathFromRawHex(raw)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if len(hops) != 2 || hops[0] != "AB" || hops[1] != "CD" {
|
||||
t.Fatalf("expected [AB, CD], got %v", hops)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecodePathFromRawHex_ZeroHops(t *testing.T) {
|
||||
// DIRECT route (type=2), no hops → 0b00_0010_10 = 0x0A
|
||||
// path byte: 0x00 (0 hops)
|
||||
raw := "0A00" + "DEADBEEF"
|
||||
hops, err := DecodePathFromRawHex(raw)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if len(hops) != 0 {
|
||||
t.Fatalf("expected 0 hops, got %v", hops)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecodePathFromRawHex_TransportRoute(t *testing.T) {
|
||||
// TRANSPORT_FLOOD (route_type=0), payload_type=5 (GRP_TXT), version=0
|
||||
// header: 0b00_0101_00 = 0x14
|
||||
// transport codes: 4 bytes
|
||||
// path byte: hashSize=1, hashCount=1 → 0x01
|
||||
// hop: FF
|
||||
raw := "14" + "00112233" + "01" + "FF" + "DEAD"
|
||||
hops, err := DecodePathFromRawHex(raw)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if len(hops) != 1 || hops[0] != "FF" {
|
||||
t.Fatalf("expected [FF], got %v", hops)
|
||||
}
|
||||
}
|
||||
|
||||
// buildTracePacket creates a TRACE packet hex string where header path bytes are
|
||||
// SNR values, and payload contains the actual route hops.
|
||||
func buildTracePacket() (rawHex string, headerPathHops []string, payloadHops []string) {
|
||||
// DIRECT route (type=2), TRACE payload (type=9), version=0
|
||||
// header byte: 0b00_1001_10 = 0x26
|
||||
headerByte := byte(0x26)
|
||||
|
||||
// Header path: 2 SNR bytes (hashSize=1, hashCount=2) → path byte = 0x02
|
||||
// SNR values: 0x1A (26 dB), 0x0F (15 dB)
|
||||
pathByte := byte(0x02)
|
||||
snrBytes := []byte{0x1A, 0x0F}
|
||||
|
||||
// TRACE payload: tag(4) + authCode(4) + flags(1) + path hops
|
||||
tag := []byte{0x01, 0x00, 0x00, 0x00}
|
||||
authCode := []byte{0x02, 0x00, 0x00, 0x00}
|
||||
// flags: path_sz=0 (1 byte hops), other bits=0 → 0x00
|
||||
flags := byte(0x00)
|
||||
// Payload hops: AA, BB, CC (the actual route)
|
||||
payloadPathBytes := []byte{0xAA, 0xBB, 0xCC}
|
||||
|
||||
var buf []byte
|
||||
buf = append(buf, headerByte, pathByte)
|
||||
buf = append(buf, snrBytes...)
|
||||
buf = append(buf, tag...)
|
||||
buf = append(buf, authCode...)
|
||||
buf = append(buf, flags)
|
||||
buf = append(buf, payloadPathBytes...)
|
||||
|
||||
rawHex = strings.ToUpper(hex.EncodeToString(buf))
|
||||
headerPathHops = []string{"1A", "0F"} // SNR values — NOT route hops
|
||||
payloadHops = []string{"AA", "BB", "CC"} // actual route hops from payload
|
||||
return
|
||||
}
|
||||
|
||||
func TestDecodePathFromRawHex_TraceReturnsSNR(t *testing.T) {
|
||||
rawHex, expectedSNR, _ := buildTracePacket()
|
||||
hops, err := DecodePathFromRawHex(rawHex)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
// DecodePathFromRawHex always returns header path bytes — for TRACE these are SNR values
|
||||
if len(hops) != len(expectedSNR) {
|
||||
t.Fatalf("expected %d hops (SNR), got %d: %v", len(expectedSNR), len(hops), hops)
|
||||
}
|
||||
for i, h := range hops {
|
||||
if h != expectedSNR[i] {
|
||||
t.Errorf("hop[%d]: expected %s, got %s", i, expectedSNR[i], h)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTracePathJSON_UsesPayloadHops(t *testing.T) {
|
||||
// This test validates the TRACE vs non-TRACE logic that callers should implement:
|
||||
// For TRACE: path_json = decoded.Path.Hops (payload-decoded route hops)
|
||||
// For non-TRACE: path_json = DecodePathFromRawHex(raw_hex)
|
||||
rawHex, snrHops, payloadHops := buildTracePacket()
|
||||
|
||||
// DecodePathFromRawHex returns SNR bytes for TRACE
|
||||
headerHops, _ := DecodePathFromRawHex(rawHex)
|
||||
headerJSON, _ := json.Marshal(headerHops)
|
||||
|
||||
// payload hops (what decoded.Path.Hops would return after TRACE decoding)
|
||||
payloadJSON, _ := json.Marshal(payloadHops)
|
||||
|
||||
// They must differ — SNR != route hops
|
||||
if string(headerJSON) == string(payloadJSON) {
|
||||
t.Fatalf("SNR hops and payload hops should differ for TRACE; both are %s", headerJSON)
|
||||
}
|
||||
|
||||
// For TRACE, path_json should be payloadHops, not headerHops
|
||||
_ = snrHops // snrHops == headerHops — used for documentation
|
||||
t.Logf("TRACE: header path (SNR) = %s, payload path (route) = %s", headerJSON, payloadJSON)
|
||||
}
|
||||
|
||||
func TestDecodeHopsForPayload_NonTrace(t *testing.T) {
|
||||
// header 0x01, path_len 0x02, hops 0xAA 0xBB, then payload bytes
|
||||
raw := "0102AABB00"
|
||||
hops, err := DecodeHopsForPayload(raw, 0x05) // GRP_TXT — header path bytes ARE hops
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if len(hops) != 2 || hops[0] != "AA" || hops[1] != "BB" {
|
||||
t.Errorf("expected [AA BB], got %v", hops)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecodeHopsForPayload_TraceReturnsError(t *testing.T) {
|
||||
raw := "010205F00100"
|
||||
hops, err := DecodeHopsForPayload(raw, PayloadTRACE)
|
||||
if err != ErrPayloadHasNoHeaderHops {
|
||||
t.Errorf("expected ErrPayloadHasNoHeaderHops, got %v", err)
|
||||
}
|
||||
if hops != nil {
|
||||
t.Errorf("expected nil hops for TRACE, got %v", hops)
|
||||
}
|
||||
}
|
||||
@@ -1,24 +0,0 @@
|
||||
package packetpath
|
||||
|
||||
// Route type constants (header bits 1-0).
|
||||
const (
|
||||
RouteTransportFlood = 0
|
||||
RouteFlood = 1
|
||||
RouteDirect = 2
|
||||
RouteTransportDirect = 3
|
||||
)
|
||||
|
||||
// PayloadTRACE is the payload type constant for TRACE packets.
|
||||
const PayloadTRACE = 0x09
|
||||
|
||||
// IsTransportRoute returns true for TRANSPORT_FLOOD (0) and TRANSPORT_DIRECT (3).
|
||||
func IsTransportRoute(routeType int) bool {
|
||||
return routeType == RouteTransportFlood || routeType == RouteTransportDirect
|
||||
}
|
||||
|
||||
// PathBytesAreHops returns true when the raw_hex header path bytes represent
|
||||
// route hop hashes (the normal case). Returns false for packet types where
|
||||
// header path bytes are repurposed (e.g. TRACE uses them for SNR values).
|
||||
func PathBytesAreHops(payloadType byte) bool {
|
||||
return payloadType != PayloadTRACE
|
||||
}
|
||||
@@ -1,31 +0,0 @@
|
||||
package packetpath
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestIsTransportRoute(t *testing.T) {
|
||||
if !IsTransportRoute(RouteTransportFlood) {
|
||||
t.Error("RouteTransportFlood should be transport")
|
||||
}
|
||||
if !IsTransportRoute(RouteTransportDirect) {
|
||||
t.Error("RouteTransportDirect should be transport")
|
||||
}
|
||||
if IsTransportRoute(RouteFlood) {
|
||||
t.Error("RouteFlood should not be transport")
|
||||
}
|
||||
if IsTransportRoute(RouteDirect) {
|
||||
t.Error("RouteDirect should not be transport")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPathBytesAreHops(t *testing.T) {
|
||||
if PathBytesAreHops(PayloadTRACE) {
|
||||
t.Error("PathBytesAreHops(PayloadTRACE) should be false")
|
||||
}
|
||||
// All other known payload types should return true.
|
||||
otherTypes := []byte{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}
|
||||
for _, pt := range otherTypes {
|
||||
if !PathBytesAreHops(pt) {
|
||||
t.Errorf("PathBytesAreHops(0x%02X) should be true", pt)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,3 +0,0 @@
|
||||
module github.com/meshcore-analyzer/sigvalidate
|
||||
|
||||
go 1.22
|
||||
@@ -1,27 +0,0 @@
|
||||
// Package sigvalidate provides ed25519 signature validation for MeshCore advert packets.
|
||||
package sigvalidate
|
||||
|
||||
import (
|
||||
"crypto/ed25519"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// ValidateAdvert verifies the ed25519 signature on a MeshCore advert.
|
||||
// pubKey must be 32 bytes, signature must be 64 bytes.
|
||||
// The signed message is: pubKey (32) + timestamp (4 LE) + appdata.
|
||||
func ValidateAdvert(pubKey, signature []byte, timestamp uint32, appdata []byte) (bool, error) {
|
||||
if len(pubKey) != 32 {
|
||||
return false, fmt.Errorf("invalid pubkey length: %d", len(pubKey))
|
||||
}
|
||||
if len(signature) != 64 {
|
||||
return false, fmt.Errorf("invalid signature length: %d", len(signature))
|
||||
}
|
||||
|
||||
message := make([]byte, 32+4+len(appdata))
|
||||
copy(message[0:32], pubKey)
|
||||
binary.LittleEndian.PutUint32(message[32:36], timestamp)
|
||||
copy(message[36:], appdata)
|
||||
|
||||
return ed25519.Verify(ed25519.PublicKey(pubKey), message, signature), nil
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user