mirror of
https://github.com/Kpa-clawbot/meshcore-analyzer.git
synced 2026-05-13 16:53:09 +00:00
Compare commits
411 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 03b5d3fe28 | |||
| b4f186af19 | |||
| 9b9848611b | |||
| 7c60d9db4b | |||
| 8bb994750e | |||
| a069586f43 | |||
| 433ba0d30b | |||
| d7b343ccce | |||
| 9d1f5d2395 | |||
| b95684e8ca | |||
| 10546f1870 | |||
| b0da831d4e | |||
| 50f2237cf7 | |||
| 09200c8dfe | |||
| 05876b3a59 | |||
| f58214a6cc | |||
| 99677f71b6 | |||
| 56167b4d28 | |||
| cf136aa367 | |||
| 0063c7c24a | |||
| 16c48e73b3 | |||
| de2595a147 | |||
| 53762d341b | |||
| ab3cbada13 | |||
| 99cd0a8947 | |||
| a104cb963b | |||
| 9774403fa4 | |||
| bc644a368e | |||
| b8f4a91381 | |||
| 9c97c382ee | |||
| 096c1ee489 | |||
| c2602e0fdd | |||
| f4cf2acbc0 | |||
| b4f31c95eb | |||
| 254f886ed9 | |||
| 86eae59f46 | |||
| 5de3ba907c | |||
| 8a0aa21348 | |||
| 89d644dd72 | |||
| 79bf673fa3 | |||
| 6df72a4512 | |||
| c5a7addbc8 | |||
| f9cef6c9fa | |||
| f51329759d | |||
| cf604ca788 | |||
| 0eee922d5a | |||
| a0651c4740 | |||
| 7fe2ef3ea4 | |||
| 13b370282a | |||
| c0f698804c | |||
| d6256c4f94 | |||
| cfd1903c6b | |||
| 63c7ee2fa6 | |||
| 1b00ed2eb1 | |||
| b157ec64a0 | |||
| a86deaa520 | |||
| a55398ed2d | |||
| 81f95aaabe | |||
| f27132e44e | |||
| a09a1cb7e4 | |||
| cfeab24af0 | |||
| 16811ade3a | |||
| 218671733d | |||
| 66b432d842 | |||
| 051c251e7f | |||
| 844536a4cc | |||
| a8852275cd | |||
| 3fd5d0e7f5 | |||
| 62295a036b | |||
| 1349f21bd9 | |||
| 4fe835b6c3 | |||
| fb744d895f | |||
| dfacfd0f6e | |||
| 652d4939ea | |||
| 1578cf8027 | |||
| b44d9de751 | |||
| bae162d111 | |||
| 84a9d4105e | |||
| 0d8bc7536c | |||
| d2e248dd61 | |||
| ecf6f3cd44 | |||
| 292496b5e4 | |||
| b88e292714 | |||
| 88d71ba8a1 | |||
| eae1b915ca | |||
| eddca7acde | |||
| cd238d366f | |||
| 16f3bc9d26 | |||
| 50ee7fb7b9 | |||
| e5aa490686 | |||
| 87a7f53ef4 | |||
| 494d3022f9 | |||
| 86880c241b | |||
| b15fb40193 | |||
| 9451217085 | |||
| b34febe3ed | |||
| 9169ff384c | |||
| 364c5766fc | |||
| 0568c35b8d | |||
| e1b9bf05f8 | |||
| 0b7d622337 | |||
| 493b7c5f17 | |||
| 351c8fa1c0 | |||
| f4abe6f451 | |||
| b74bbf803c | |||
| 268f9ae5a4 | |||
| 93f7a11906 | |||
| 13e2d349c2 | |||
| 072a8f2ff9 | |||
| b03ef4abd3 | |||
| 6b9154df3a | |||
| 19e58c97b6 | |||
| e281871971 | |||
| 642aac472f | |||
| 5e6dfaa496 | |||
| 5a5df5d92b | |||
| ee7ebd988b | |||
| 1559dcae2b | |||
| 0d47a2e295 | |||
| 96cf279fdc | |||
| fdce338b92 | |||
| f86a44d6b5 | |||
| 0a3590358e | |||
| f8fdfe38f3 | |||
| 0ee26b4fcf | |||
| 9827e751aa | |||
| 33e8263b9e | |||
| 74dffa2fb7 | |||
| 4b3b01b50a | |||
| 6eea05960d | |||
| 330942f661 | |||
| acb7e15bec | |||
| 91522ef738 | |||
| 76d89e6578 | |||
| 90c3d46e8f | |||
| 0497d98bc9 | |||
| 4973b18e9d | |||
| d1f72c7c5a | |||
| 5a901e4ccf | |||
| 45f2607f75 | |||
| 433f1b544e | |||
| d78ccb4bd4 | |||
| 33ee1659de | |||
| 51ce1b1085 | |||
| 25b3e4ec2b | |||
| 50676d5e65 | |||
| 2634e4b4b7 | |||
| 33fab978f4 | |||
| f8e9964915 | |||
| 7e6fb013a3 | |||
| 4b91035f4d | |||
| 12d96a9d15 | |||
| 10caa71ccf | |||
| 43321e8eae | |||
| b62d00f4fd | |||
| c839fa7579 | |||
| 35137819d0 | |||
| bf68a99acf | |||
| ef1cabe077 | |||
| 214b16b5e6 | |||
| 1245bcfd1d | |||
| b084dc12f2 | |||
| aadedd5e3a | |||
| 4cd51a41e7 | |||
| c84ec409c7 | |||
| eaeb65b426 | |||
| ac53b1bf06 | |||
| a9fbabbd99 | |||
| 76e0bd3144 | |||
| 7c00008fd1 | |||
| 52bb07d6c1 | |||
| ebf9c5550d | |||
| 0fc435c415 | |||
| b3da743863 | |||
| 22c6ed7608 | |||
| dec0259645 | |||
| 85b8c8115a | |||
| d1e6c733dc | |||
| b52a938b27 | |||
| 6d17cac40e | |||
| ade7513693 | |||
| eda0d590b2 | |||
| 0da119f843 | |||
| 6a8b1363bd | |||
| e4bb175ac3 | |||
| 88dca33355 | |||
| c1763379b8 | |||
| 9d4a3a436e | |||
| 968f62185d | |||
| 2ee09142d1 | |||
| 043b6dfd58 | |||
| ac0cf5ac7d | |||
| 3935b0028f | |||
| aad00ff70b | |||
| fc19fcd0d8 | |||
| 4288d48d28 | |||
| fc558bfbfa | |||
| 36ee71d17e | |||
| 5fa3b56ccb | |||
| 55302a362e | |||
| f5c9680e78 | |||
| b4080c25d3 | |||
| 73c1253cea | |||
| 39b545f6b7 | |||
| 282074b19d | |||
| 136e1d23c8 | |||
| f7d8a7cb8f | |||
| e9c801b41a | |||
| 3ab404b545 | |||
| aa3d26f314 | |||
| 5f6c5af0cf | |||
| f33801ecb4 | |||
| d05e468598 | |||
| d192330bdc | |||
| 2b01ecd051 | |||
| 34b418894a | |||
| 1860cb4c54 | |||
| 6a715e6af7 | |||
| fc16b4e069 | |||
| 45f30fcadc | |||
| 8b924cd217 | |||
| 83881e6b71 | |||
| 417b460fa0 | |||
| 78dabd5bda | |||
| e2050f8ec8 | |||
| cbfd159f8e | |||
| eaf14a61f5 | |||
| b71c290783 | |||
| d7fbd4755e | |||
| 13b6eecc82 | |||
| b18ebe1a26 | |||
| 9aa94166df | |||
| 38703c75e6 | |||
| f9cd43f06f | |||
| 26a914274f | |||
| e4f358f562 | |||
| 5ff9d4f31d | |||
| db75dbee44 | |||
| 16e1ff9e6c | |||
| d144764d38 | |||
| c4fac7fe2e | |||
| 13587584d2 | |||
| 68cd9d77c6 | |||
| f2ee74c8f3 | |||
| f676c146ae | |||
| 227f375b4a | |||
| 2e959145aa | |||
| 72dd377ba1 | |||
| 8a536c5899 | |||
| f3a7d0d435 | |||
| ccc7cf5a77 | |||
| 67da696a42 | |||
| 5829d2328d | |||
| df60f324e9 | |||
| 0aeb33f757 | |||
| e334f8611e | |||
| 32ba77eaf8 | |||
| 724a96f35b | |||
| 849bf1c335 | |||
| a0b791254c | |||
| 62a2a13251 | |||
| c94ba05c01 | |||
| c00b585ee5 | |||
| e2bd9a8fa2 | |||
| 1f3c8130ef | |||
| e5606058c1 | |||
| 47b4021346 | |||
| c93c008867 | |||
| cea2c70d12 | |||
| 71f82d5d25 | |||
| 81430cf4c4 | |||
| 1178bae18f | |||
| 27c8514d70 | |||
| a24ec6e767 | |||
| c1d0daf200 | |||
| d967170dd3 | |||
| 2f0c97604b | |||
| 0b0fda5bb2 | |||
| e966ecc71a | |||
| e7aa8eded8 | |||
| d652b7c39d | |||
| 6a8ed98d8f | |||
| 26daa760cd | |||
| c196030ec0 | |||
| 7b07761fb9 | |||
| e47257222e | |||
| 6f2d70599a | |||
| c120b5eef2 | |||
| 3290ff1ed5 | |||
| 505206feb4 | |||
| 41762a873a | |||
| 7ab05c5a19 | |||
| c3138a96f7 | |||
| 03c895addc | |||
| c9301fee9c | |||
| dd66f678be | |||
| 8ec355c6d6 | |||
| 98e5fe6adf | |||
| b40719a21e | |||
| a695110ea4 | |||
| 3aaa21bbc0 | |||
| 4def3ed7c4 | |||
| cfb4d652a7 | |||
| 9bf4c103d8 | |||
| 49857dd748 | |||
| 8815b194d8 | |||
| 9f55ef802b | |||
| 019ace3645 | |||
| c5139f5de5 | |||
| 0add429d24 | |||
| c8b29d0482 | |||
| 9c5e13d133 | |||
| 1f4969c1a6 | |||
| 38ae1c92de | |||
| ac881e4f4a | |||
| 7e15022d2d | |||
| b3dba21460 | |||
| aabc892272 | |||
| a1f4cb9b5d | |||
| 01a687e912 | |||
| 8652ddc7c0 | |||
| 739bb67fc9 | |||
| 2363a988dc | |||
| b6b25390e8 | |||
| b06adf9f2a | |||
| 51b9fed15e | |||
| cb21305dc4 | |||
| a56ee5c4fe | |||
| df69a17718 | |||
| f229e15869 | |||
| 912cd52a59 | |||
| 51c5842c10 | |||
| b9c967be18 | |||
| a45b921e09 | |||
| 7b11497cd8 | |||
| d3920f66e9 | |||
| 5e01de0d52 | |||
| 4d043579f8 | |||
| b0e4d2fa18 | |||
| c186129d47 | |||
| 43cb0d2ea6 | |||
| f282323cc6 | |||
| aba3e05d1b | |||
| ce2ed99e41 | |||
| 935e40b26c | |||
| 153308134e | |||
| a500d6d506 | |||
| e7c15818c9 | |||
| f3f9ef5353 | |||
| e4422efa5c | |||
| c5460d37dd | |||
| 23d1e8d328 | |||
| 1ca665efde | |||
| e86b5a3a0c | |||
| ed8d7d68bd | |||
| 7960191a62 | |||
| f1b2dfcc56 | |||
| 436c2bb12d | |||
| 62f9962e01 | |||
| 2e3a94b86d | |||
| 81aeadafbf | |||
| 4c0c39823f | |||
| 7d5d130095 | |||
| 50a0eda1aa | |||
| a745847f3b | |||
| 8dfcec2ff3 | |||
| 84ffed96ed | |||
| b21db32d2e | |||
| f34a233ba7 | |||
| 9342ed2799 | |||
| e2d49a62ee | |||
| 564d93d6aa | |||
| 0b7c4c41c6 | |||
| f87654e7d8 | |||
| 0c9b305a99 | |||
| 4aebc4d90b | |||
| 78d96d24db | |||
| 440bda6244 | |||
| aea0a9caee | |||
| 01246f9412 | |||
| 4c309bad80 | |||
| ce769950dd | |||
| 73c04a9ba3 | |||
| e2eaf4c656 | |||
| b7c280c20a | |||
| d43c95a4bb | |||
| bed5e0267f | |||
| 999ecfc84d | |||
| f12428c460 | |||
| 2199d404c9 | |||
| 016a6f2750 | |||
| dd2f044f2b | |||
| 736b09697d | |||
| b3b96b3dda | |||
| 5c9860db46 | |||
| de288e71da | |||
| 3529b1334b | |||
| 7bd1f396df | |||
| 58484ad924 | |||
| 1a2170bf92 | |||
| 8a3c87e5a2 | |||
| 722cf480f8 | |||
| 5cbfb4a8e7 | |||
| b7933553a6 | |||
| fc57433f27 | |||
| 53ab302dd6 | |||
| 5aa8f795cd | |||
| 1e7c187521 | |||
| 4b8d8143f4 | |||
| 3364eed303 | |||
| d65122491e |
@@ -1 +1 @@
|
||||
{"schemaVersion":1,"label":"e2e tests","message":"89 passed","color":"brightgreen"}
|
||||
{"schemaVersion":1,"label":"e2e tests","message":"1178 passed","color":"brightgreen"}
|
||||
|
||||
@@ -1 +1 @@
|
||||
{"schemaVersion":1,"label":"frontend coverage","message":"40.21%","color":"red"}
|
||||
{"schemaVersion":1,"label":"frontend coverage","message":"39.03%","color":"red"}
|
||||
|
||||
@@ -79,6 +79,28 @@ jobs:
|
||||
go test ./...
|
||||
echo "--- Decrypt CLI tests passed ---"
|
||||
|
||||
- name: Lint CSS variables (issue #1128)
|
||||
run: |
|
||||
set -e
|
||||
node scripts/check-css-vars.js
|
||||
node scripts/test-check-css-vars.js
|
||||
|
||||
- name: Run JS unit tests (packet-filter)
|
||||
run: |
|
||||
set -e
|
||||
node test-packet-filter.js
|
||||
node test-packet-filter-time.js
|
||||
node test-channel-decrypt-insecure-context.js
|
||||
node test-live-region-filter.js
|
||||
node test-issue-1136-observer-iata-map.js
|
||||
node test-channel-qr.js
|
||||
node test-channel-qr-wiring.js
|
||||
node test-channel-modal-ux.js
|
||||
node test-channel-issue-1087.js
|
||||
node test-channel-issue-1101.js
|
||||
node test-pull-to-reconnect-1091.js
|
||||
node test-channel-fluid-layout.js
|
||||
|
||||
- name: Verify proto syntax
|
||||
run: |
|
||||
set -e
|
||||
@@ -200,6 +222,38 @@ jobs:
|
||||
- name: Run Playwright E2E tests (fail-fast)
|
||||
run: |
|
||||
BASE_URL=http://localhost:13581 node test-e2e-playwright.js 2>&1 | tee e2e-output.txt
|
||||
BASE_URL=http://localhost:13581 node test-filter-ux-e2e.js 2>&1 | tee -a e2e-output.txt
|
||||
BASE_URL=http://localhost:13581 node test-channel-issue-1087-e2e.js 2>&1 | tee -a e2e-output.txt
|
||||
BASE_URL=http://localhost:13581 node test-channel-issue-1111-e2e.js 2>&1 | tee -a e2e-output.txt
|
||||
BASE_URL=http://localhost:13581 node test-map-modal-fluid-e2e.js 2>&1 | tee -a e2e-output.txt
|
||||
CHROMIUM_REQUIRE=1 BASE_URL=http://localhost:13581 node test-nav-fluid-1055-e2e.js 2>&1 | tee -a e2e-output.txt
|
||||
CHROMIUM_REQUIRE=1 BASE_URL=http://localhost:13581 node test-nav-priority-1102-e2e.js 2>&1 | tee -a e2e-output.txt
|
||||
CHROMIUM_REQUIRE=1 BASE_URL=http://localhost:13581 node test-nav-more-floor-1139-e2e.js 2>&1 | tee -a e2e-output.txt
|
||||
CHROMIUM_REQUIRE=1 BASE_URL=http://localhost:13581 node test-bottom-nav-1061-e2e.js 2>&1 | tee -a e2e-output.txt
|
||||
CHROMIUM_REQUIRE=1 BASE_URL=http://localhost:13581 node test-gestures-1062-e2e.js 2>&1 | tee -a e2e-output.txt
|
||||
CHROMIUM_REQUIRE=1 BASE_URL=http://localhost:13581 node test-gestures-1185-scroll-discriminator-e2e.js 2>&1 | tee -a e2e-output.txt
|
||||
CHROMIUM_REQUIRE=1 BASE_URL=http://localhost:13581 node test-gesture-hints-1065-e2e.js 2>&1 | tee -a e2e-output.txt
|
||||
BASE_URL=http://localhost:13581 node test-channel-fluid-e2e.js 2>&1 | tee -a e2e-output.txt
|
||||
BASE_URL=http://localhost:13581 node test-table-fluid-e2e.js 2>&1 | tee -a e2e-output.txt
|
||||
BASE_URL=http://localhost:13581 node test-charts-fluid-1058-e2e.js 2>&1 | tee -a e2e-output.txt
|
||||
BASE_URL=http://localhost:13581 node test-slideover-1056-e2e.js 2>&1 | tee -a e2e-output.txt
|
||||
BASE_URL=http://localhost:13581 node test-slideover-1168-munger-e2e.js 2>&1 | tee -a e2e-output.txt
|
||||
BASE_URL=http://localhost:13581 node test-logo-pulse-1173-e2e.js 2>&1 | tee -a e2e-output.txt
|
||||
BASE_URL=http://localhost:13581 node test-issue-1122-packets-filter-ux-e2e.js 2>&1 | tee -a e2e-output.txt
|
||||
BASE_URL=http://localhost:13581 node test-issue-1128-packets-layout-e2e.js 2>&1 | tee -a e2e-output.txt
|
||||
BASE_URL=http://localhost:13581 node test-issue-1128-multi-viewport-e2e.js 2>&1 | tee -a e2e-output.txt
|
||||
BASE_URL=http://localhost:13581 node test-issue-1136-live-region-e2e.js 2>&1 | tee -a e2e-output.txt
|
||||
BASE_URL=http://localhost:13581 node test-issue-1150-404-state-e2e.js 2>&1 | tee -a e2e-output.txt
|
||||
BASE_URL=http://localhost:13581 node test-issue-1146-path-link-contrast-e2e.js 2>&1 | tee -a e2e-output.txt
|
||||
BASE_URL=http://localhost:13581 node test-issue-1147-section-order-e2e.js 2>&1 | tee -a e2e-output.txt
|
||||
BASE_URL=http://localhost:13581 node test-issue-1151-orphan-separators-e2e.js 2>&1 | tee -a e2e-output.txt
|
||||
CHROMIUM_REQUIRE=1 BASE_URL=http://localhost:13581 node test-logo-rebrand-e2e.js 2>&1 | tee -a e2e-output.txt
|
||||
CHROMIUM_REQUIRE=1 BASE_URL=http://localhost:13581 node test-logo-theme-e2e.js 2>&1 | tee -a e2e-output.txt
|
||||
CHROMIUM_REQUIRE=1 BASE_URL=http://localhost:13581 node test-logo-default-sage-teal-e2e.js 2>&1 | tee -a e2e-output.txt
|
||||
CHROMIUM_REQUIRE=1 BASE_URL=http://localhost:13581 node test-issue-1109-hamburger-dropdown-visible-e2e.js 2>&1 | tee -a e2e-output.txt
|
||||
CHROMIUM_REQUIRE=1 BASE_URL=http://localhost:13581 node test-live-layout-1178-1179-e2e.js 2>&1 | tee -a e2e-output.txt
|
||||
CHROMIUM_REQUIRE=1 BASE_URL=http://localhost:13581 node test-live-mql-leak-1180-e2e.js 2>&1 | tee -a e2e-output.txt
|
||||
CHROMIUM_REQUIRE=1 BASE_URL=http://localhost:13581 node test-nav-drawer-1064-e2e.js 2>&1 | tee -a e2e-output.txt
|
||||
|
||||
- name: Collect frontend coverage (parallel)
|
||||
if: success() && github.event_name == 'push'
|
||||
|
||||
@@ -1,5 +1,12 @@
|
||||
# Changelog
|
||||
|
||||
## [3.7.2] — 2026-05-06
|
||||
|
||||
Hotfix release branched from `v3.7.1`. Cherry-picks PR #1121 only — no other changes.
|
||||
|
||||
### 🐛 Bug Fixes
|
||||
- **Ingestor: backfill infinite loop on `path_json='[]'` rows** (#1119, #1121) — `BackfillPathJSONAsync` re-selected observations whose `path_json` was already `'[]'`, rewrote them to `'[]'`, and looped forever. The migration marker was never recorded and the ingestor sustained 2–3 MB/s WAL writes at idle (~76% CPU in `sqlite.Exec`). Fix: drop `'[]'` from the WHERE clause so the loop terminates after one full pass and the `backfill_path_json_from_raw_hex_v1` marker is written.
|
||||
|
||||
## [2.5.0] "Digital Rain" — 2026-03-22
|
||||
|
||||
### ✨ Matrix Mode — Full Cyberpunk Map Theme
|
||||
|
||||
@@ -1,5 +1,8 @@
|
||||
# Build stage always runs natively on the builder's arch ($BUILDPLATFORM)
|
||||
# and cross-compiles to $TARGETOS/$TARGETARCH via Go toolchain. No QEMU.
|
||||
# BUILDPLATFORM is auto-set by buildx; default to linux/amd64 so plain
|
||||
# `docker build` (without buildx) doesn't fail on an empty platform string.
|
||||
ARG BUILDPLATFORM=linux/amd64
|
||||
FROM --platform=$BUILDPLATFORM golang:1.22-alpine AS builder
|
||||
|
||||
ARG APP_VERSION=unknown
|
||||
@@ -15,6 +18,8 @@ COPY cmd/server/go.mod cmd/server/go.sum ./
|
||||
COPY internal/geofilter/ ../../internal/geofilter/
|
||||
COPY internal/sigvalidate/ ../../internal/sigvalidate/
|
||||
COPY internal/packetpath/ ../../internal/packetpath/
|
||||
COPY internal/dbconfig/ ../../internal/dbconfig/
|
||||
COPY internal/perfio/ ../../internal/perfio/
|
||||
RUN go mod download
|
||||
COPY cmd/server/ ./
|
||||
RUN CGO_ENABLED=0 GOOS=${TARGETOS} GOARCH=${TARGETARCH} \
|
||||
@@ -26,6 +31,8 @@ COPY cmd/ingestor/go.mod cmd/ingestor/go.sum ./
|
||||
COPY internal/geofilter/ ../../internal/geofilter/
|
||||
COPY internal/sigvalidate/ ../../internal/sigvalidate/
|
||||
COPY internal/packetpath/ ../../internal/packetpath/
|
||||
COPY internal/dbconfig/ ../../internal/dbconfig/
|
||||
COPY internal/perfio/ ../../internal/perfio/
|
||||
RUN go mod download
|
||||
COPY cmd/ingestor/ ./
|
||||
RUN CGO_ENABLED=0 GOOS=${TARGETOS} GOARCH=${TARGETARCH} \
|
||||
|
||||
@@ -47,6 +47,24 @@ The config file uses the same format as the Node.js `config.json`. The ingestor
|
||||
| `DB_PATH` | SQLite database path | `data/meshcore.db` |
|
||||
| `MQTT_BROKER` | Single MQTT broker URL (overrides config) | — |
|
||||
| `MQTT_TOPIC` | MQTT topic (used with `MQTT_BROKER`) | `meshcore/#` |
|
||||
| `CORESCOPE_INGESTOR_STATS` | Path to the per-second stats JSON file consumed by the server's `/api/perf/io` and `/api/perf/write-sources` endpoints (#1120) | `/tmp/corescope-ingestor-stats.json` |
|
||||
|
||||
### Stats file (`CORESCOPE_INGESTOR_STATS`)
|
||||
|
||||
Every second the ingestor publishes a JSON snapshot of its counters
|
||||
(`tx_inserted`, `obs_inserted`, `walCommits`, `backfillUpdates.*`, etc.) plus
|
||||
a `procIO` block sampled from `/proc/self/io` (read/write/cancelled bytes per
|
||||
second + syscall counts). The server reads this file and surfaces the data on
|
||||
the Perf page so operators can self-diagnose write-volume anomalies.
|
||||
|
||||
The writer uses `O_NOFOLLOW | O_CREAT | O_TRUNC` mode `0o600`, so a
|
||||
pre-planted symlink at the path cannot be used to clobber an arbitrary file.
|
||||
|
||||
**Security note:** the default lives in `/tmp`, which is world-writable on
|
||||
most hosts (sticky bit only protects deletion, not creation). On
|
||||
shared/multi-tenant hosts, override `CORESCOPE_INGESTOR_STATS` to point at a
|
||||
private directory (e.g. `/var/lib/corescope/ingestor-stats.json`) that only
|
||||
the corescope user can write to.
|
||||
|
||||
### Minimal Config
|
||||
|
||||
|
||||
+62
-6
@@ -9,6 +9,7 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/meshcore-analyzer/dbconfig"
|
||||
"github.com/meshcore-analyzer/geofilter"
|
||||
)
|
||||
|
||||
@@ -21,6 +22,17 @@ type MQTTSource struct {
|
||||
RejectUnauthorized *bool `json:"rejectUnauthorized,omitempty"`
|
||||
Topics []string `json:"topics"`
|
||||
IATAFilter []string `json:"iataFilter,omitempty"`
|
||||
ConnectTimeoutSec int `json:"connectTimeoutSec,omitempty"`
|
||||
Region string `json:"region,omitempty"`
|
||||
}
|
||||
|
||||
// ConnectTimeoutOrDefault returns the per-source connect timeout in seconds,
|
||||
// or 30 if not set (matching the WaitTimeout default from #926).
|
||||
func (s MQTTSource) ConnectTimeoutOrDefault() int {
|
||||
if s.ConnectTimeoutSec > 0 {
|
||||
return s.ConnectTimeoutSec
|
||||
}
|
||||
return 30
|
||||
}
|
||||
|
||||
// MQTTLegacy is the old single-broker config format.
|
||||
@@ -40,10 +52,21 @@ type Config struct {
|
||||
HashChannels []string `json:"hashChannels,omitempty"`
|
||||
Retention *RetentionConfig `json:"retention,omitempty"`
|
||||
Metrics *MetricsConfig `json:"metrics,omitempty"`
|
||||
GeoFilter *GeoFilterConfig `json:"geo_filter,omitempty"`
|
||||
GeoFilter *GeoFilterConfig `json:"geo_filter,omitempty"`
|
||||
ForeignAdverts *ForeignAdvertConfig `json:"foreignAdverts,omitempty"`
|
||||
ValidateSignatures *bool `json:"validateSignatures,omitempty"`
|
||||
DB *DBConfig `json:"db,omitempty"`
|
||||
|
||||
// ObserverIATAWhitelist restricts which observer IATA regions are processed.
|
||||
// When non-empty, only observers whose IATA code (from the MQTT topic) matches
|
||||
// one of these entries are accepted. Case-insensitive. An empty list means all
|
||||
// IATA codes are allowed. This applies globally, unlike the per-source iataFilter.
|
||||
ObserverIATAWhitelist []string `json:"observerIATAWhitelist,omitempty"`
|
||||
|
||||
// obsIATAWhitelistCached is the lazily-built uppercase set for O(1) lookups.
|
||||
obsIATAWhitelistCached map[string]bool
|
||||
obsIATAWhitelistOnce sync.Once
|
||||
|
||||
// ObserverBlacklist is a list of observer public keys to drop at ingest.
|
||||
// Messages from blacklisted observers are silently discarded — no DB writes,
|
||||
// no UpsertObserver, no observations, no metrics.
|
||||
@@ -57,6 +80,23 @@ type Config struct {
|
||||
// GeoFilterConfig is an alias for the shared geofilter.Config type.
|
||||
type GeoFilterConfig = geofilter.Config
|
||||
|
||||
// ForeignAdvertConfig controls how the ingestor handles ADVERTs whose GPS lies
|
||||
// outside the configured geofilter polygon (#730). Modes:
|
||||
// - "flag" (default): store the advert/node and tag it foreign for visibility.
|
||||
// - "drop": silently discard the advert (legacy behavior).
|
||||
type ForeignAdvertConfig struct {
|
||||
Mode string `json:"mode,omitempty"`
|
||||
}
|
||||
|
||||
// IsDropMode reports whether the foreign-advert config is set to "drop".
|
||||
// Defaults to false ("flag" mode) when nil or unset.
|
||||
func (f *ForeignAdvertConfig) IsDropMode() bool {
|
||||
if f == nil {
|
||||
return false
|
||||
}
|
||||
return strings.EqualFold(strings.TrimSpace(f.Mode), "drop")
|
||||
}
|
||||
|
||||
// RetentionConfig controls how long stale nodes are kept before being moved to inactive_nodes.
|
||||
type RetentionConfig struct {
|
||||
NodeDays int `json:"nodeDays"`
|
||||
@@ -69,11 +109,8 @@ type MetricsConfig struct {
|
||||
SampleIntervalSec int `json:"sampleIntervalSec"`
|
||||
}
|
||||
|
||||
// DBConfig controls SQLite vacuum and maintenance behavior (#919).
|
||||
type DBConfig struct {
|
||||
VacuumOnStartup bool `json:"vacuumOnStartup"` // one-time full VACUUM on startup if auto_vacuum is not INCREMENTAL
|
||||
IncrementalVacuumPages int `json:"incrementalVacuumPages"` // pages returned to OS per reaper cycle (default 1024)
|
||||
}
|
||||
// DBConfig is the shared SQLite vacuum/maintenance config (#919, #921).
|
||||
type DBConfig = dbconfig.DBConfig
|
||||
|
||||
// IncrementalVacuumPages returns the configured pages per vacuum or 1024 default.
|
||||
func (c *Config) IncrementalVacuumPages() int {
|
||||
@@ -142,6 +179,25 @@ func (c *Config) IsObserverBlacklisted(id string) bool {
|
||||
return c.obsBlacklistSetCached[strings.ToLower(strings.TrimSpace(id))]
|
||||
}
|
||||
|
||||
// IsObserverIATAAllowed returns true if the given IATA code is permitted.
|
||||
// When ObserverIATAWhitelist is empty, all codes are allowed.
|
||||
func (c *Config) IsObserverIATAAllowed(iata string) bool {
|
||||
if c == nil || len(c.ObserverIATAWhitelist) == 0 {
|
||||
return true
|
||||
}
|
||||
c.obsIATAWhitelistOnce.Do(func() {
|
||||
m := make(map[string]bool, len(c.ObserverIATAWhitelist))
|
||||
for _, code := range c.ObserverIATAWhitelist {
|
||||
trimmed := strings.ToUpper(strings.TrimSpace(code))
|
||||
if trimmed != "" {
|
||||
m[trimmed] = true
|
||||
}
|
||||
}
|
||||
c.obsIATAWhitelistCached = m
|
||||
})
|
||||
return c.obsIATAWhitelistCached[strings.ToUpper(strings.TrimSpace(iata))]
|
||||
}
|
||||
|
||||
// LoadConfig reads configuration from a JSON file, with env var overrides.
|
||||
// If the config file does not exist, sensible defaults are used (zero-config startup).
|
||||
func LoadConfig(path string) (*Config, error) {
|
||||
|
||||
@@ -284,3 +284,113 @@ func TestLoadConfigWithAllFields(t *testing.T) {
|
||||
t.Errorf("iataFilter=%v", src.IATAFilter)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConnectTimeoutOrDefault(t *testing.T) {
|
||||
// Default when unset
|
||||
s := MQTTSource{}
|
||||
if got := s.ConnectTimeoutOrDefault(); got != 30 {
|
||||
t.Errorf("default: got %d, want 30", got)
|
||||
}
|
||||
|
||||
// Custom value
|
||||
s.ConnectTimeoutSec = 5
|
||||
if got := s.ConnectTimeoutOrDefault(); got != 5 {
|
||||
t.Errorf("custom: got %d, want 5", got)
|
||||
}
|
||||
|
||||
// Zero treated as unset
|
||||
s.ConnectTimeoutSec = 0
|
||||
if got := s.ConnectTimeoutOrDefault(); got != 30 {
|
||||
t.Errorf("zero: got %d, want 30", got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConnectTimeoutFromJSON(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
cfgPath := dir + "/config.json"
|
||||
os.WriteFile(cfgPath, []byte(`{"mqttSources":[{"name":"s1","broker":"tcp://b:1883","topics":["#"],"connectTimeoutSec":5}]}`), 0644)
|
||||
cfg, err := LoadConfig(cfgPath)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if got := cfg.MQTTSources[0].ConnectTimeoutOrDefault(); got != 5 {
|
||||
t.Errorf("from JSON: got %d, want 5", got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestObserverIATAWhitelist(t *testing.T) {
|
||||
// Config with whitelist set
|
||||
cfg := Config{
|
||||
ObserverIATAWhitelist: []string{"ARN", "got"},
|
||||
}
|
||||
|
||||
// Matching (case-insensitive)
|
||||
if !cfg.IsObserverIATAAllowed("ARN") {
|
||||
t.Error("ARN should be allowed")
|
||||
}
|
||||
if !cfg.IsObserverIATAAllowed("arn") {
|
||||
t.Error("arn (lowercase) should be allowed")
|
||||
}
|
||||
if !cfg.IsObserverIATAAllowed("GOT") {
|
||||
t.Error("GOT should be allowed")
|
||||
}
|
||||
|
||||
// Non-matching
|
||||
if cfg.IsObserverIATAAllowed("SJC") {
|
||||
t.Error("SJC should NOT be allowed")
|
||||
}
|
||||
|
||||
// Empty string not allowed
|
||||
if cfg.IsObserverIATAAllowed("") {
|
||||
t.Error("empty IATA should NOT be allowed")
|
||||
}
|
||||
}
|
||||
|
||||
func TestObserverIATAWhitelistEmpty(t *testing.T) {
|
||||
// No whitelist = allow all
|
||||
cfg := Config{}
|
||||
if !cfg.IsObserverIATAAllowed("SJC") {
|
||||
t.Error("with no whitelist, all IATAs should be allowed")
|
||||
}
|
||||
if !cfg.IsObserverIATAAllowed("") {
|
||||
t.Error("with no whitelist, even empty IATA should be allowed")
|
||||
}
|
||||
}
|
||||
|
||||
func TestObserverIATAWhitelistJSON(t *testing.T) {
|
||||
json := `{
|
||||
"dbPath": "test.db",
|
||||
"observerIATAWhitelist": ["ARN", "GOT"]
|
||||
}`
|
||||
tmp := t.TempDir() + "/config.json"
|
||||
os.WriteFile(tmp, []byte(json), 0644)
|
||||
cfg, err := LoadConfig(tmp)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(cfg.ObserverIATAWhitelist) != 2 {
|
||||
t.Fatalf("expected 2 entries, got %d", len(cfg.ObserverIATAWhitelist))
|
||||
}
|
||||
if !cfg.IsObserverIATAAllowed("ARN") {
|
||||
t.Error("ARN should be allowed after loading from JSON")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMQTTSourceRegionField(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
cfgPath := filepath.Join(dir, "config.json")
|
||||
os.WriteFile(cfgPath, []byte(`{
|
||||
"dbPath": "/tmp/test.db",
|
||||
"mqttSources": [
|
||||
{"name": "cascadia", "broker": "tcp://localhost:1883", "topics": ["meshcore/#"], "region": "PDX"}
|
||||
]
|
||||
}`), 0o644)
|
||||
|
||||
cfg, err := LoadConfig(cfgPath)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if cfg.MQTTSources[0].Region != "PDX" {
|
||||
t.Fatalf("expected region PDX, got %q", cfg.MQTTSources[0].Region)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -428,7 +428,12 @@ func TestHandleMessageAdvertGeoFiltered(t *testing.T) {
|
||||
topic: "meshcore/SJC/obs1/packets",
|
||||
payload: []byte(`{"raw":"` + rawHex + `"}`),
|
||||
}
|
||||
handleMessage(store, "test", source, msg, nil, &Config{GeoFilter: gf})
|
||||
// Legacy silent-drop behavior is now opt-in via ForeignAdverts.Mode="drop"
|
||||
// (#730). The new default — flag — is covered by foreign_advert_test.go.
|
||||
handleMessage(store, "test", source, msg, nil, &Config{
|
||||
GeoFilter: gf,
|
||||
ForeignAdverts: &ForeignAdvertConfig{Mode: "drop"},
|
||||
})
|
||||
|
||||
// Geo-filtered adverts should not create nodes
|
||||
var nodeCount int
|
||||
@@ -436,7 +441,7 @@ func TestHandleMessageAdvertGeoFiltered(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if nodeCount != 0 {
|
||||
t.Errorf("nodes=%d, want 0 (geo-filtered advert should not create node)", nodeCount)
|
||||
t.Errorf("nodes=%d, want 0 (geo-filtered advert in drop mode should not create node)", nodeCount)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
+260
-9
@@ -8,6 +8,7 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
@@ -24,6 +25,38 @@ type DBStats struct {
|
||||
ObserverUpserts atomic.Int64
|
||||
WriteErrors atomic.Int64
|
||||
SignatureDrops atomic.Int64
|
||||
// WALCommits tracks every successful tx.Commit() that may have flushed
|
||||
// WAL pages.
|
||||
WALCommits atomic.Int64
|
||||
// BackfillUpdates tracks per-named-backfill row write counts so an
|
||||
// infinite-loop backfill (cf #1119) is obvious from the perf page.
|
||||
BackfillUpdates sync.Map // name (string) -> *atomic.Int64
|
||||
}
|
||||
|
||||
// IncBackfill increments the backfill counter for the given name, allocating
|
||||
// the counter on first use.
|
||||
func (s *DBStats) IncBackfill(name string) {
|
||||
v, ok := s.BackfillUpdates.Load(name)
|
||||
if !ok {
|
||||
nc := new(atomic.Int64)
|
||||
actual, loaded := s.BackfillUpdates.LoadOrStore(name, nc)
|
||||
if loaded {
|
||||
v = actual
|
||||
} else {
|
||||
v = nc
|
||||
}
|
||||
}
|
||||
v.(*atomic.Int64).Add(1)
|
||||
}
|
||||
|
||||
// SnapshotBackfills returns a name->count copy of all backfill counters.
|
||||
func (s *DBStats) SnapshotBackfills() map[string]int64 {
|
||||
out := make(map[string]int64)
|
||||
s.BackfillUpdates.Range(func(k, v interface{}) bool {
|
||||
out[k.(string)] = v.(*atomic.Int64).Load()
|
||||
return true
|
||||
})
|
||||
return out
|
||||
}
|
||||
|
||||
// Store wraps the SQLite database for packet ingestion.
|
||||
@@ -44,6 +77,7 @@ type Store struct {
|
||||
stmtUpsertMetrics *sql.Stmt
|
||||
|
||||
sampleIntervalSec int
|
||||
backfillWg sync.WaitGroup
|
||||
}
|
||||
|
||||
// OpenStore opens or creates a SQLite DB at the given path, applying the
|
||||
@@ -99,7 +133,8 @@ func applySchema(db *sql.DB) error {
|
||||
first_seen TEXT,
|
||||
advert_count INTEGER DEFAULT 0,
|
||||
battery_mv INTEGER,
|
||||
temperature_c REAL
|
||||
temperature_c REAL,
|
||||
foreign_advert INTEGER DEFAULT 0
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS observers (
|
||||
@@ -116,7 +151,8 @@ func applySchema(db *sql.DB) error {
|
||||
battery_mv INTEGER,
|
||||
uptime_secs INTEGER,
|
||||
noise_floor REAL,
|
||||
inactive INTEGER DEFAULT 0
|
||||
inactive INTEGER DEFAULT 0,
|
||||
last_packet_at TEXT DEFAULT NULL
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_nodes_last_seen ON nodes(last_seen);
|
||||
@@ -132,7 +168,8 @@ func applySchema(db *sql.DB) error {
|
||||
first_seen TEXT,
|
||||
advert_count INTEGER DEFAULT 0,
|
||||
battery_mv INTEGER,
|
||||
temperature_c REAL
|
||||
temperature_c REAL,
|
||||
foreign_advert INTEGER DEFAULT 0
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_inactive_nodes_last_seen ON inactive_nodes(last_seen);
|
||||
@@ -146,12 +183,15 @@ func applySchema(db *sql.DB) error {
|
||||
payload_type INTEGER,
|
||||
payload_version INTEGER,
|
||||
decoded_json TEXT,
|
||||
from_pubkey TEXT,
|
||||
created_at TEXT DEFAULT (datetime('now'))
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_transmissions_hash ON transmissions(hash);
|
||||
CREATE INDEX IF NOT EXISTS idx_transmissions_first_seen ON transmissions(first_seen);
|
||||
CREATE INDEX IF NOT EXISTS idx_transmissions_payload_type ON transmissions(payload_type);
|
||||
-- idx_transmissions_from_pubkey is created by the from_pubkey_v1
|
||||
-- migration after the column is added on legacy DBs (#1143).
|
||||
`
|
||||
if _, err := db.Exec(schema); err != nil {
|
||||
return fmt.Errorf("base schema: %w", err)
|
||||
@@ -213,11 +253,16 @@ func applySchema(db *sql.DB) error {
|
||||
row = db.QueryRow("SELECT 1 FROM _migrations WHERE name = 'advert_count_unique_v1'")
|
||||
if row.Scan(&migDone) != nil {
|
||||
log.Println("[migration] Recalculating advert_count (unique transmissions only)...")
|
||||
// Note: this migration is gated on a one-shot _migrations row, so it
|
||||
// runs at most once per DB. The historical version used a LIKE-on-JSON
|
||||
// substring match (#1143). Switching to from_pubkey here is safe even
|
||||
// though the column may not yet be backfilled on legacy DBs: the
|
||||
// migration is already marked done on those DBs and won't re-run.
|
||||
db.Exec(`
|
||||
UPDATE nodes SET advert_count = (
|
||||
SELECT COUNT(*) FROM transmissions t
|
||||
WHERE t.payload_type = 4
|
||||
AND t.decoded_json LIKE '%' || nodes.public_key || '%'
|
||||
AND t.from_pubkey = nodes.public_key
|
||||
)
|
||||
`)
|
||||
db.Exec(`INSERT INTO _migrations (name) VALUES ('advert_count_unique_v1')`)
|
||||
@@ -421,6 +466,82 @@ func applySchema(db *sql.DB) error {
|
||||
log.Println("[migration] observations.raw_hex column added")
|
||||
}
|
||||
|
||||
// Migration: add last_packet_at column to observers (#last-packet-at)
|
||||
row = db.QueryRow("SELECT 1 FROM _migrations WHERE name = 'observers_last_packet_at_v1'")
|
||||
if row.Scan(&migDone) != nil {
|
||||
log.Println("[migration] Adding last_packet_at column to observers...")
|
||||
_, alterErr := db.Exec(`ALTER TABLE observers ADD COLUMN last_packet_at TEXT DEFAULT NULL`)
|
||||
if alterErr != nil && !strings.Contains(alterErr.Error(), "duplicate column") {
|
||||
return fmt.Errorf("observers last_packet_at ALTER: %w", alterErr)
|
||||
}
|
||||
// Backfill: set last_packet_at = last_seen only for observers that actually have
|
||||
// observation rows (packet_count alone is unreliable — UpsertObserver sets it to 1
|
||||
// on INSERT even for status-only observers).
|
||||
res, err := db.Exec(`UPDATE observers SET last_packet_at = last_seen
|
||||
WHERE last_packet_at IS NULL
|
||||
AND rowid IN (SELECT DISTINCT observer_idx FROM observations WHERE observer_idx IS NOT NULL)`)
|
||||
if err == nil {
|
||||
n, _ := res.RowsAffected()
|
||||
log.Printf("[migration] Backfilled last_packet_at for %d observers with packets", n)
|
||||
}
|
||||
db.Exec(`INSERT INTO _migrations (name) VALUES ('observers_last_packet_at_v1')`)
|
||||
log.Println("[migration] observers.last_packet_at column added")
|
||||
}
|
||||
|
||||
// Migration: backfill observations.path_json from raw_hex (#888)
|
||||
// NOTE: This runs ASYNC via BackfillPathJSONAsync() to avoid blocking MQTT startup.
|
||||
// See staging outage where ~502K rows blocked ingest for 15+ hours.
|
||||
|
||||
// One-time cleanup: delete legacy packets with empty hash or empty first_seen (#994)
|
||||
row = db.QueryRow("SELECT 1 FROM _migrations WHERE name = 'cleanup_legacy_null_hash_ts'")
|
||||
if row.Scan(&migDone) != nil {
|
||||
log.Println("[migration] Cleaning up legacy packets with empty hash/timestamp...")
|
||||
db.Exec(`DELETE FROM observations WHERE transmission_id IN (SELECT id FROM transmissions WHERE hash = '' OR first_seen = '')`)
|
||||
res, err := db.Exec(`DELETE FROM transmissions WHERE hash = '' OR first_seen = ''`)
|
||||
if err == nil {
|
||||
deleted, _ := res.RowsAffected()
|
||||
log.Printf("[migration] deleted %d legacy packets with empty hash/timestamp", deleted)
|
||||
}
|
||||
db.Exec(`INSERT INTO _migrations (name) VALUES ('cleanup_legacy_null_hash_ts')`)
|
||||
}
|
||||
|
||||
// Migration: foreign_advert column on nodes/inactive_nodes (#730)
|
||||
// Marks nodes whose ADVERT GPS lies outside the configured geofilter polygon.
|
||||
// Default 0; set to 1 by the ingestor when GeoFilter is configured and
|
||||
// PassesFilter() returns false. Allows operators to surface bridged/leaked
|
||||
// adverts without silently dropping them.
|
||||
row = db.QueryRow("SELECT 1 FROM _migrations WHERE name = 'foreign_advert_v1'")
|
||||
if row.Scan(&migDone) != nil {
|
||||
log.Println("[migration] Adding foreign_advert column to nodes/inactive_nodes...")
|
||||
if _, err := db.Exec(`ALTER TABLE nodes ADD COLUMN foreign_advert INTEGER DEFAULT 0`); err != nil {
|
||||
log.Printf("[migration] nodes.foreign_advert: %v (may already exist)", err)
|
||||
}
|
||||
if _, err := db.Exec(`ALTER TABLE inactive_nodes ADD COLUMN foreign_advert INTEGER DEFAULT 0`); err != nil {
|
||||
log.Printf("[migration] inactive_nodes.foreign_advert: %v (may already exist)", err)
|
||||
}
|
||||
db.Exec(`CREATE INDEX IF NOT EXISTS idx_nodes_foreign_advert ON nodes(foreign_advert) WHERE foreign_advert = 1`)
|
||||
db.Exec(`INSERT INTO _migrations (name) VALUES ('foreign_advert_v1')`)
|
||||
log.Println("[migration] foreign_advert column added")
|
||||
}
|
||||
|
||||
// Migration: from_pubkey column on transmissions (#1143).
|
||||
// Replaces the unsound `decoded_json LIKE '%pubkey%'` attribution path with
|
||||
// an exact-match indexed column. Synchronously adds the column + index;
|
||||
// row-level backfill is run by the SERVER asynchronously
|
||||
// (cmd/server/from_pubkey_migration.go) so we don't block ingestor boot.
|
||||
row = db.QueryRow("SELECT 1 FROM _migrations WHERE name = 'from_pubkey_v1'")
|
||||
if row.Scan(&migDone) != nil {
|
||||
log.Println("[migration] Adding from_pubkey column + index to transmissions (#1143)...")
|
||||
if _, err := db.Exec(`ALTER TABLE transmissions ADD COLUMN from_pubkey TEXT`); err != nil {
|
||||
log.Printf("[migration] transmissions.from_pubkey: %v (may already exist)", err)
|
||||
}
|
||||
if _, err := db.Exec(`CREATE INDEX IF NOT EXISTS idx_transmissions_from_pubkey ON transmissions(from_pubkey)`); err != nil {
|
||||
log.Printf("[migration] idx_transmissions_from_pubkey: %v", err)
|
||||
}
|
||||
db.Exec(`INSERT INTO _migrations (name) VALUES ('from_pubkey_v1')`)
|
||||
log.Println("[migration] from_pubkey column + index added")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -433,8 +554,8 @@ func (s *Store) prepareStatements() error {
|
||||
}
|
||||
|
||||
s.stmtInsertTransmission, err = s.db.Prepare(`
|
||||
INSERT INTO transmissions (raw_hex, hash, first_seen, route_type, payload_type, payload_version, decoded_json, channel_hash)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?)
|
||||
INSERT INTO transmissions (raw_hex, hash, first_seen, route_type, payload_type, payload_version, decoded_json, channel_hash, from_pubkey)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -504,7 +625,7 @@ func (s *Store) prepareStatements() error {
|
||||
return err
|
||||
}
|
||||
|
||||
s.stmtUpdateObserverLastSeen, err = s.db.Prepare("UPDATE observers SET last_seen = ? WHERE rowid = ?")
|
||||
s.stmtUpdateObserverLastSeen, err = s.db.Prepare("UPDATE observers SET last_seen = ?, last_packet_at = ? WHERE rowid = ?")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -563,6 +684,7 @@ func (s *Store) InsertTransmission(data *PacketData) (bool, error) {
|
||||
data.RawHex, hash, now,
|
||||
data.RouteType, data.PayloadType, data.PayloadVersion,
|
||||
data.DecodedJSON, nilIfEmpty(data.ChannelHash),
|
||||
nilIfEmpty(data.FromPubkey),
|
||||
)
|
||||
if err != nil {
|
||||
s.Stats.WriteErrors.Add(1)
|
||||
@@ -583,9 +705,9 @@ func (s *Store) InsertTransmission(data *PacketData) (bool, error) {
|
||||
err := s.stmtGetObserverRowid.QueryRow(data.ObserverID).Scan(&rowid)
|
||||
if err == nil {
|
||||
observerIdx = &rowid
|
||||
// Update observer last_seen on every packet to prevent
|
||||
// Update observer last_seen and last_packet_at on every packet to prevent
|
||||
// low-traffic observers from appearing offline (#463)
|
||||
_, _ = s.stmtUpdateObserverLastSeen.Exec(now, rowid)
|
||||
_, _ = s.stmtUpdateObserverLastSeen.Exec(now, now, rowid)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -607,6 +729,10 @@ func (s *Store) InsertTransmission(data *PacketData) (bool, error) {
|
||||
s.Stats.ObservationsInserted.Add(1)
|
||||
}
|
||||
|
||||
// Each prepared-stmt Exec auto-commits. Count one WAL commit per
|
||||
// successful InsertTransmission so the perf page sees commit pressure.
|
||||
s.Stats.WALCommits.Add(1)
|
||||
|
||||
return isNew, nil
|
||||
}
|
||||
|
||||
@@ -634,6 +760,21 @@ func (s *Store) IncrementAdvertCount(pubKey string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// MarkNodeForeign sets foreign_advert=1 on the node row identified by pubKey.
|
||||
// Used when an ADVERT arrives whose GPS lies outside the configured geofilter
|
||||
// polygon (#730). Idempotent — safe to call repeatedly. No-op if pubKey is
|
||||
// empty.
|
||||
func (s *Store) MarkNodeForeign(pubKey string) error {
|
||||
if pubKey == "" {
|
||||
return nil
|
||||
}
|
||||
_, err := s.db.Exec(`UPDATE nodes SET foreign_advert = 1 WHERE public_key = ?`, pubKey)
|
||||
if err != nil {
|
||||
s.Stats.WriteErrors.Add(1)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// UpdateNodeTelemetry updates battery and temperature for a node.
|
||||
func (s *Store) UpdateNodeTelemetry(pubKey string, batteryMv *int, temperatureC *float64) error {
|
||||
var bv, tc interface{}
|
||||
@@ -714,6 +855,7 @@ func (s *Store) UpsertObserver(id, name, iata string, meta *ObserverMeta) error
|
||||
|
||||
// Close checkpoints the WAL and closes the database.
|
||||
func (s *Store) Close() error {
|
||||
s.backfillWg.Wait()
|
||||
s.Checkpoint()
|
||||
return s.db.Close()
|
||||
}
|
||||
@@ -853,6 +995,97 @@ func (s *Store) Checkpoint() {
|
||||
}
|
||||
}
|
||||
|
||||
// BackfillPathJSONAsync launches the path_json backfill in a background goroutine.
|
||||
// It processes observations with NULL/empty path_json that have raw_hex available,
|
||||
// decoding hop paths and updating the column. Safe to run concurrently with ingest
|
||||
// because new observations get path_json at write time; this only touches NULL rows.
|
||||
// Idempotent: skips if migration already recorded.
|
||||
func (s *Store) BackfillPathJSONAsync() {
|
||||
s.backfillWg.Add(1)
|
||||
go func() {
|
||||
defer s.backfillWg.Done()
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
log.Printf("[backfill] path_json async panic recovered: %v", r)
|
||||
}
|
||||
}()
|
||||
|
||||
var migDone int
|
||||
row := s.db.QueryRow("SELECT 1 FROM _migrations WHERE name = 'backfill_path_json_from_raw_hex_v1'")
|
||||
if row.Scan(&migDone) == nil {
|
||||
return // already done
|
||||
}
|
||||
|
||||
log.Println("[backfill] Starting async path_json backfill from raw_hex...")
|
||||
updated := 0
|
||||
errored := false
|
||||
const batchSize = 1000
|
||||
batchNum := 0
|
||||
for {
|
||||
rows, err := s.db.Query(`
|
||||
SELECT o.id, o.raw_hex
|
||||
FROM observations o
|
||||
JOIN transmissions t ON o.transmission_id = t.id
|
||||
WHERE o.raw_hex IS NOT NULL AND o.raw_hex != ''
|
||||
-- NB: '[]' is the "already attempted, no hops" sentinel; excluded
|
||||
-- to prevent the infinite re-UPDATE loop fixed in #1119.
|
||||
AND (o.path_json IS NULL OR o.path_json = '')
|
||||
AND t.payload_type != 9
|
||||
LIMIT ?`, batchSize)
|
||||
if err != nil {
|
||||
log.Printf("[backfill] path_json query error: %v", err)
|
||||
errored = true
|
||||
break
|
||||
}
|
||||
type pendingRow struct {
|
||||
id int64
|
||||
rawHex string
|
||||
}
|
||||
var batch []pendingRow
|
||||
for rows.Next() {
|
||||
var r pendingRow
|
||||
if err := rows.Scan(&r.id, &r.rawHex); err == nil {
|
||||
batch = append(batch, r)
|
||||
}
|
||||
}
|
||||
rows.Close()
|
||||
if len(batch) == 0 {
|
||||
break
|
||||
}
|
||||
for _, r := range batch {
|
||||
hops, err := packetpath.DecodePathFromRawHex(r.rawHex)
|
||||
if err != nil || len(hops) == 0 {
|
||||
if _, execErr := s.db.Exec(`UPDATE observations SET path_json = '[]' WHERE id = ?`, r.id); execErr != nil {
|
||||
log.Printf("[backfill] write error (id=%d): %v", r.id, execErr)
|
||||
} else {
|
||||
s.Stats.IncBackfill("path_json")
|
||||
}
|
||||
continue
|
||||
}
|
||||
b, _ := json.Marshal(hops)
|
||||
if _, execErr := s.db.Exec(`UPDATE observations SET path_json = ? WHERE id = ?`, string(b), r.id); execErr != nil {
|
||||
log.Printf("[backfill] write error (id=%d): %v", r.id, execErr)
|
||||
} else {
|
||||
updated++
|
||||
s.Stats.IncBackfill("path_json")
|
||||
}
|
||||
}
|
||||
batchNum++
|
||||
if batchNum%50 == 0 {
|
||||
log.Printf("[backfill] progress: %d observations updated so far (%d batches)", updated, batchNum)
|
||||
}
|
||||
// Throttle: yield to ingest writers between batches
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
}
|
||||
log.Printf("[backfill] Async path_json backfill complete: %d observations updated", updated)
|
||||
if !errored {
|
||||
s.db.Exec(`INSERT INTO _migrations (name) VALUES ('backfill_path_json_from_raw_hex_v1')`)
|
||||
} else {
|
||||
log.Printf("[backfill] NOT recording migration due to errors — will retry on next restart")
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// LogStats logs current operational metrics.
|
||||
func (s *Store) LogStats() {
|
||||
log.Printf("[stats] tx_inserted=%d tx_dupes=%d obs_inserted=%d node_upserts=%d observer_upserts=%d write_errors=%d sig_drops=%d",
|
||||
@@ -976,6 +1209,9 @@ type PacketData struct {
|
||||
PathJSON string
|
||||
DecodedJSON string
|
||||
ChannelHash string // grouping key for channel queries (#762)
|
||||
Region string // observer region: payload > topic > source config (#788)
|
||||
Foreign bool // true when ADVERT GPS lies outside configured geofilter (#730)
|
||||
FromPubkey string // pubkey of the originating node, for exact-match attribution (#1143)
|
||||
}
|
||||
|
||||
// nilIfEmpty returns nil for empty strings (for nullable DB columns).
|
||||
@@ -994,6 +1230,7 @@ type MQTTPacketMessage struct {
|
||||
Score *float64 `json:"score"`
|
||||
Direction *string `json:"direction"`
|
||||
Origin string `json:"origin"`
|
||||
Region string `json:"region,omitempty"` // optional region override (#788)
|
||||
}
|
||||
|
||||
// BuildPacketData constructs a PacketData from a decoded packet and MQTT message.
|
||||
@@ -1033,6 +1270,13 @@ func BuildPacketData(msg *MQTTPacketMessage, decoded *DecodedPacket, observerID,
|
||||
DecodedJSON: PayloadJSON(&decoded.Payload),
|
||||
}
|
||||
|
||||
// Region priority: payload field > topic-derived parameter (#788)
|
||||
if msg.Region != "" {
|
||||
pd.Region = msg.Region
|
||||
} else {
|
||||
pd.Region = region
|
||||
}
|
||||
|
||||
// Populate channel_hash for fast channel queries (#762)
|
||||
if decoded.Header.PayloadType == PayloadGRP_TXT {
|
||||
if decoded.Payload.Type == "CHAN" && decoded.Payload.Channel != "" {
|
||||
@@ -1042,5 +1286,12 @@ func BuildPacketData(msg *MQTTPacketMessage, decoded *DecodedPacket, observerID,
|
||||
}
|
||||
}
|
||||
|
||||
// Populate from_pubkey at write time (#1143). ADVERTs carry the
|
||||
// originating node's pubkey directly; other packet types stay NULL
|
||||
// (downstream attribution queries handle NULL gracefully).
|
||||
if decoded.Header.PayloadType == PayloadADVERT && decoded.Payload.PubKey != "" {
|
||||
pd.FromPubkey = decoded.Payload.PubKey
|
||||
}
|
||||
|
||||
return pd
|
||||
}
|
||||
|
||||
@@ -569,6 +569,61 @@ func TestInsertTransmissionUpdatesObserverLastSeen(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestLastPacketAtUpdatedOnPacketOnly(t *testing.T) {
|
||||
s, err := OpenStore(tempDBPath(t))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer s.Close()
|
||||
|
||||
// Insert observer via status path — last_packet_at should be NULL
|
||||
if err := s.UpsertObserver("obs1", "Observer1", "SJC", nil); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var lastPacketAt sql.NullString
|
||||
s.db.QueryRow("SELECT last_packet_at FROM observers WHERE id = ?", "obs1").Scan(&lastPacketAt)
|
||||
if lastPacketAt.Valid {
|
||||
t.Fatalf("expected last_packet_at to be NULL after UpsertObserver, got %s", lastPacketAt.String)
|
||||
}
|
||||
|
||||
// Insert a packet from this observer — last_packet_at should be set
|
||||
data := &PacketData{
|
||||
RawHex: "0A00D69F",
|
||||
Timestamp: "2026-04-24T12:00:00Z",
|
||||
ObserverID: "obs1",
|
||||
Hash: "lastpackettest123456",
|
||||
RouteType: 2,
|
||||
PayloadType: 2,
|
||||
PathJSON: "[]",
|
||||
DecodedJSON: `{"type":"TXT_MSG"}`,
|
||||
}
|
||||
if _, err := s.InsertTransmission(data); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
s.db.QueryRow("SELECT last_packet_at FROM observers WHERE id = ?", "obs1").Scan(&lastPacketAt)
|
||||
if !lastPacketAt.Valid {
|
||||
t.Fatal("expected last_packet_at to be non-NULL after InsertTransmission")
|
||||
}
|
||||
// InsertTransmission uses `now = data.Timestamp || time.Now()`, so last_packet_at
|
||||
// should match the packet's Timestamp when provided (same source-of-truth as last_seen).
|
||||
if lastPacketAt.String != "2026-04-24T12:00:00Z" {
|
||||
t.Errorf("expected last_packet_at=2026-04-24T12:00:00Z, got %s", lastPacketAt.String)
|
||||
}
|
||||
|
||||
// UpsertObserver again (status path) — last_packet_at should NOT change
|
||||
if err := s.UpsertObserver("obs1", "Observer1", "SJC", nil); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var lastPacketAtAfterStatus sql.NullString
|
||||
s.db.QueryRow("SELECT last_packet_at FROM observers WHERE id = ?", "obs1").Scan(&lastPacketAtAfterStatus)
|
||||
if !lastPacketAtAfterStatus.Valid || lastPacketAtAfterStatus.String != lastPacketAt.String {
|
||||
t.Errorf("UpsertObserver should not change last_packet_at; expected %s, got %v", lastPacketAt.String, lastPacketAtAfterStatus)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEndToEndIngest(t *testing.T) {
|
||||
s, err := OpenStore(tempDBPath(t))
|
||||
if err != nil {
|
||||
@@ -2123,3 +2178,543 @@ func TestBuildPacketData_NonTracePathJSON(t *testing.T) {
|
||||
t.Errorf("path_json = %s, want %s", pd.PathJSON, expectedPathJSON)
|
||||
}
|
||||
}
|
||||
|
||||
// --- Issue #888: Backfill path_json from raw_hex ---
|
||||
|
||||
func TestBackfillPathJsonFromRawHex(t *testing.T) {
|
||||
dbPath := tempDBPath(t)
|
||||
s, err := OpenStore(dbPath)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Insert a transmission with payload_type != TRACE (e.g. 0x01)
|
||||
// raw_hex: header 0x05 (route FLOOD, payload 0x01), path byte 0x42 (hash_size=2, count=2),
|
||||
// hops: AABB, CCDD, then some payload bytes
|
||||
rawHex := "0542AABBCCDD0000000000000000000000000000"
|
||||
s.db.Exec(`INSERT INTO transmissions (raw_hex, hash, first_seen, payload_type) VALUES (?, 'h1', '2025-01-01T00:00:00Z', 1)`, rawHex)
|
||||
|
||||
// Insert observation with raw_hex but empty path_json
|
||||
s.db.Exec(`INSERT INTO observations (transmission_id, timestamp, raw_hex, path_json) VALUES (1, 1000, ?, '[]')`, rawHex)
|
||||
// Insert observation with raw_hex and NULL path_json
|
||||
s.db.Exec(`INSERT INTO observations (transmission_id, timestamp, raw_hex, path_json) VALUES (1, 1001, ?, NULL)`, rawHex)
|
||||
// Insert observation with existing path_json (should NOT be overwritten)
|
||||
s.db.Exec(`INSERT INTO observations (transmission_id, timestamp, raw_hex, path_json) VALUES (1, 1002, ?, '["XX","YY"]')`, rawHex)
|
||||
|
||||
// Insert a TRACE transmission (payload_type = 0x09) — should be skipped
|
||||
traceRaw := "2604302D0D2359FEE7B100000000006733D63367"
|
||||
s.db.Exec(`INSERT INTO transmissions (raw_hex, hash, first_seen, payload_type) VALUES (?, 'h2', '2025-01-01T00:00:00Z', 9)`, traceRaw)
|
||||
s.db.Exec(`INSERT INTO observations (transmission_id, timestamp, raw_hex, path_json) VALUES (2, 1003, ?, '[]')`, traceRaw)
|
||||
|
||||
// Remove the migration marker so it runs again on reopen
|
||||
s.db.Exec(`DELETE FROM _migrations WHERE name = 'backfill_path_json_from_raw_hex_v1'`)
|
||||
s.Close()
|
||||
|
||||
// Reopen — backfill is now async, must trigger explicitly
|
||||
s2, err := OpenStore(dbPath)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer s2.Close()
|
||||
|
||||
// Trigger async backfill and wait for completion
|
||||
s2.BackfillPathJSONAsync()
|
||||
deadline := time.Now().Add(10 * time.Second)
|
||||
var migCount int
|
||||
for time.Now().Before(deadline) {
|
||||
s2.db.QueryRow("SELECT COUNT(*) FROM _migrations WHERE name = 'backfill_path_json_from_raw_hex_v1'").Scan(&migCount)
|
||||
if migCount == 1 {
|
||||
break
|
||||
}
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
}
|
||||
if migCount != 1 {
|
||||
t.Fatalf("migration not recorded")
|
||||
}
|
||||
|
||||
// Row 1 (was '[]') is NOT re-processed by the backfill — '[]' means
|
||||
// "already attempted, no hops" and is excluded by the WHERE to avoid the
|
||||
// infinite-loop bug fixed in #1119. It must remain '[]'.
|
||||
var pj1 string
|
||||
s2.db.QueryRow("SELECT path_json FROM observations WHERE id = 1").Scan(&pj1)
|
||||
if pj1 != "[]" {
|
||||
t.Errorf("row 1 path_json = %q, want %q (must not re-process '[]' rows after #1119)", pj1, "[]")
|
||||
}
|
||||
|
||||
// Row 2 (was NULL) should now have decoded hops
|
||||
var pj2 string
|
||||
s2.db.QueryRow("SELECT path_json FROM observations WHERE id = 2").Scan(&pj2)
|
||||
if pj2 != `["AABB","CCDD"]` {
|
||||
t.Errorf("row 2 path_json = %q, want %q", pj2, `["AABB","CCDD"]`)
|
||||
}
|
||||
|
||||
// Row 3 (had existing data) should NOT be overwritten
|
||||
var pj3 string
|
||||
s2.db.QueryRow("SELECT path_json FROM observations WHERE id = 3").Scan(&pj3)
|
||||
if pj3 != `["XX","YY"]` {
|
||||
t.Errorf("row 3 path_json = %q, want %q (should not be overwritten)", pj3, `["XX","YY"]`)
|
||||
}
|
||||
|
||||
// Row 4 (TRACE) should NOT be updated
|
||||
var pj4 string
|
||||
s2.db.QueryRow("SELECT path_json FROM observations WHERE id = 4").Scan(&pj4)
|
||||
if pj4 != "[]" {
|
||||
t.Errorf("row 4 (TRACE) path_json = %q, want %q (should be skipped)", pj4, "[]")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCleanupLegacyNullHashTimestamp(t *testing.T) {
|
||||
path := tempDBPath(t)
|
||||
|
||||
// Create a bare-bones DB with legacy bad data
|
||||
db, err := sql.Open("sqlite", path+"?_pragma=journal_mode(WAL)&_pragma=busy_timeout(5000)")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
db.Exec(`CREATE TABLE IF NOT EXISTS transmissions (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
raw_hex TEXT NOT NULL,
|
||||
hash TEXT NOT NULL,
|
||||
first_seen TEXT NOT NULL,
|
||||
route_type INTEGER,
|
||||
payload_type INTEGER,
|
||||
payload_version INTEGER,
|
||||
decoded_json TEXT,
|
||||
created_at TEXT DEFAULT (datetime('now')),
|
||||
channel_hash TEXT DEFAULT NULL
|
||||
)`)
|
||||
db.Exec(`CREATE TABLE IF NOT EXISTS observations (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
transmission_id INTEGER NOT NULL REFERENCES transmissions(id),
|
||||
observer_idx INTEGER,
|
||||
direction TEXT,
|
||||
snr REAL,
|
||||
rssi REAL,
|
||||
score INTEGER,
|
||||
path_json TEXT,
|
||||
timestamp INTEGER NOT NULL
|
||||
)`)
|
||||
db.Exec(`CREATE TABLE IF NOT EXISTS _migrations (name TEXT PRIMARY KEY)`)
|
||||
db.Exec(`CREATE TABLE IF NOT EXISTS nodes (public_key TEXT PRIMARY KEY, name TEXT, role TEXT, lat REAL, lon REAL, last_seen TEXT, first_seen TEXT, advert_count INTEGER DEFAULT 0, battery_mv INTEGER, temperature_c REAL)`)
|
||||
db.Exec(`CREATE TABLE IF NOT EXISTS observers (id TEXT PRIMARY KEY, name TEXT, iata TEXT, last_seen TEXT, first_seen TEXT, packet_count INTEGER DEFAULT 0, model TEXT, firmware TEXT, client_version TEXT, radio TEXT, battery_mv INTEGER, uptime_secs INTEGER, noise_floor REAL, inactive INTEGER DEFAULT 0, last_packet_at TEXT DEFAULT NULL)`)
|
||||
|
||||
// Insert good transmission
|
||||
db.Exec(`INSERT INTO transmissions (id, raw_hex, hash, first_seen) VALUES (1, 'aabb', 'abc123', '2024-01-01T00:00:00Z')`)
|
||||
db.Exec(`INSERT INTO observations (transmission_id, observer_idx, timestamp) VALUES (1, 1, 1704067200)`)
|
||||
|
||||
// Insert bad: empty hash
|
||||
db.Exec(`INSERT INTO transmissions (id, raw_hex, hash, first_seen) VALUES (2, 'ccdd', '', '2024-01-01T00:00:00Z')`)
|
||||
db.Exec(`INSERT INTO observations (transmission_id, observer_idx, timestamp) VALUES (2, 1, 1704067200)`)
|
||||
|
||||
// Insert bad: empty first_seen
|
||||
db.Exec(`INSERT INTO transmissions (id, raw_hex, hash, first_seen) VALUES (3, 'eeff', 'def456', '')`)
|
||||
db.Exec(`INSERT INTO observations (transmission_id, observer_idx, timestamp) VALUES (3, 2, 1704067200)`)
|
||||
|
||||
db.Close()
|
||||
|
||||
// Now open via OpenStore which should run the migration
|
||||
s, err := OpenStore(path)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer s.Close()
|
||||
|
||||
// Good transmission should remain
|
||||
var count int
|
||||
s.db.QueryRow("SELECT COUNT(*) FROM transmissions WHERE id = 1").Scan(&count)
|
||||
if count != 1 {
|
||||
t.Error("good transmission should not be deleted")
|
||||
}
|
||||
|
||||
// Bad transmissions should be gone
|
||||
s.db.QueryRow("SELECT COUNT(*) FROM transmissions WHERE id = 2").Scan(&count)
|
||||
if count != 0 {
|
||||
t.Errorf("transmission with empty hash should be deleted, got count=%d", count)
|
||||
}
|
||||
s.db.QueryRow("SELECT COUNT(*) FROM transmissions WHERE id = 3").Scan(&count)
|
||||
if count != 0 {
|
||||
t.Errorf("transmission with empty first_seen should be deleted, got count=%d", count)
|
||||
}
|
||||
|
||||
// Observations for bad transmissions should be gone
|
||||
s.db.QueryRow("SELECT COUNT(*) FROM observations WHERE transmission_id IN (2, 3)").Scan(&count)
|
||||
if count != 0 {
|
||||
t.Errorf("observations for bad transmissions should be deleted, got count=%d", count)
|
||||
}
|
||||
|
||||
// Observation for good transmission should remain
|
||||
s.db.QueryRow("SELECT COUNT(*) FROM observations WHERE transmission_id = 1").Scan(&count)
|
||||
if count != 1 {
|
||||
t.Error("observation for good transmission should remain")
|
||||
}
|
||||
|
||||
// Migration marker should exist
|
||||
var migCount int
|
||||
s.db.QueryRow("SELECT COUNT(*) FROM _migrations WHERE name = 'cleanup_legacy_null_hash_ts'").Scan(&migCount)
|
||||
if migCount != 1 {
|
||||
t.Error("migration marker cleanup_legacy_null_hash_ts should be recorded")
|
||||
}
|
||||
|
||||
// Idempotent: opening again should not error
|
||||
s.Close()
|
||||
s2, err := OpenStore(path)
|
||||
if err != nil {
|
||||
t.Fatal("second open should not fail:", err)
|
||||
}
|
||||
s2.Close()
|
||||
}
|
||||
|
||||
func TestBuildPacketDataRegionFromPayload(t *testing.T) {
|
||||
msg := &MQTTPacketMessage{Raw: "0102030405060708", Region: "PDX"}
|
||||
decoded := &DecodedPacket{
|
||||
Header: Header{RouteType: 1, PayloadType: 3},
|
||||
}
|
||||
pkt := BuildPacketData(msg, decoded, "obs1", "SJC")
|
||||
// When payload has region, it should override the topic-derived region
|
||||
if pkt.Region != "PDX" {
|
||||
t.Fatalf("expected region PDX from payload, got %q", pkt.Region)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildPacketDataRegionFallsBackToTopic(t *testing.T) {
|
||||
msg := &MQTTPacketMessage{Raw: "0102030405060708"}
|
||||
decoded := &DecodedPacket{
|
||||
Header: Header{RouteType: 1, PayloadType: 3},
|
||||
}
|
||||
pkt := BuildPacketData(msg, decoded, "obs1", "SJC")
|
||||
if pkt.Region != "SJC" {
|
||||
t.Fatalf("expected region SJC from topic, got %q", pkt.Region)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// TestBackfillPathJSONAsync verifies that the path_json backfill does NOT block
|
||||
// OpenStore from returning. MQTT connect happens immediately after OpenStore;
|
||||
// if the backfill is synchronous, MQTT would be delayed indefinitely on large DBs.
|
||||
// This test creates pending backfill rows, opens the store, and asserts that
|
||||
// OpenStore returns before the migration is recorded — proving async execution.
|
||||
func TestBackfillPathJSONAsync(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
dbPath := filepath.Join(dir, "async_test.db")
|
||||
|
||||
// Bootstrap schema manually so we can insert test data BEFORE OpenStore
|
||||
db, err := sql.Open("sqlite", dbPath+"?_pragma=journal_mode(WAL)&_pragma=busy_timeout(5000)")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Create tables manually (minimal schema for this test)
|
||||
_, err = db.Exec(`
|
||||
CREATE TABLE _migrations (name TEXT PRIMARY KEY);
|
||||
CREATE TABLE transmissions (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
raw_hex TEXT NOT NULL,
|
||||
hash TEXT NOT NULL UNIQUE,
|
||||
first_seen TEXT NOT NULL,
|
||||
route_type INTEGER,
|
||||
payload_type INTEGER,
|
||||
payload_version INTEGER,
|
||||
decoded_json TEXT,
|
||||
created_at TEXT DEFAULT (datetime('now')),
|
||||
channel_hash TEXT
|
||||
);
|
||||
CREATE TABLE observers (
|
||||
id TEXT PRIMARY KEY,
|
||||
name TEXT,
|
||||
iata TEXT,
|
||||
last_seen TEXT,
|
||||
first_seen TEXT,
|
||||
packet_count INTEGER DEFAULT 0,
|
||||
model TEXT,
|
||||
firmware TEXT,
|
||||
client_version TEXT,
|
||||
radio TEXT,
|
||||
battery_mv INTEGER,
|
||||
uptime_secs INTEGER,
|
||||
noise_floor REAL,
|
||||
inactive INTEGER DEFAULT 0,
|
||||
last_packet_at TEXT
|
||||
);
|
||||
CREATE TABLE nodes (
|
||||
public_key TEXT PRIMARY KEY,
|
||||
name TEXT, role TEXT, lat REAL, lon REAL,
|
||||
last_seen TEXT, first_seen TEXT, advert_count INTEGER DEFAULT 0,
|
||||
battery_mv INTEGER, temperature_c REAL
|
||||
);
|
||||
CREATE TABLE inactive_nodes (
|
||||
public_key TEXT PRIMARY KEY,
|
||||
name TEXT, role TEXT, lat REAL, lon REAL,
|
||||
last_seen TEXT, first_seen TEXT, advert_count INTEGER DEFAULT 0,
|
||||
battery_mv INTEGER, temperature_c REAL
|
||||
);
|
||||
CREATE TABLE observations (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
transmission_id INTEGER NOT NULL REFERENCES transmissions(id),
|
||||
observer_idx INTEGER,
|
||||
direction TEXT,
|
||||
snr REAL, rssi REAL, score INTEGER,
|
||||
path_json TEXT,
|
||||
timestamp INTEGER NOT NULL,
|
||||
raw_hex TEXT
|
||||
);
|
||||
CREATE UNIQUE INDEX idx_observations_dedup ON observations(transmission_id, observer_idx, COALESCE(path_json, ''));
|
||||
CREATE INDEX idx_observations_transmission_id ON observations(transmission_id);
|
||||
CREATE INDEX idx_observations_observer_idx ON observations(observer_idx);
|
||||
CREATE INDEX idx_observations_timestamp ON observations(timestamp);
|
||||
CREATE TABLE observer_metrics (
|
||||
observer_id TEXT NOT NULL,
|
||||
timestamp TEXT NOT NULL,
|
||||
noise_floor REAL, tx_air_secs INTEGER, rx_air_secs INTEGER,
|
||||
recv_errors INTEGER, battery_mv INTEGER,
|
||||
packets_sent INTEGER, packets_recv INTEGER,
|
||||
PRIMARY KEY (observer_id, timestamp)
|
||||
);
|
||||
CREATE TABLE dropped_packets (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
hash TEXT, raw_hex TEXT, reason TEXT NOT NULL,
|
||||
observer_id TEXT, observer_name TEXT,
|
||||
node_pubkey TEXT, node_name TEXT,
|
||||
dropped_at DATETIME DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
`)
|
||||
if err != nil {
|
||||
t.Fatal("bootstrap schema:", err)
|
||||
}
|
||||
|
||||
// Mark all migrations as done EXCEPT the path_json backfill
|
||||
for _, m := range []string{
|
||||
"advert_count_unique_v1", "noise_floor_real_v1", "node_telemetry_v1",
|
||||
"obs_timestamp_index_v1", "observer_metrics_v1", "observer_metrics_ts_idx",
|
||||
"observers_inactive_v1", "observer_metrics_packets_v1", "channel_hash_v1",
|
||||
"dropped_packets_v1", "observations_raw_hex_v1", "observers_last_packet_at_v1",
|
||||
"cleanup_legacy_null_hash_ts",
|
||||
} {
|
||||
db.Exec(`INSERT INTO _migrations (name) VALUES (?)`, m)
|
||||
}
|
||||
|
||||
// Insert a transmission + observations with NULL path_json and valid raw_hex
|
||||
// raw_hex "0102AABBCCDD0000" has 2-hop path decodable by packetpath
|
||||
rawHex := "41020304AABBCCDD05060708"
|
||||
_, err = db.Exec(`INSERT INTO transmissions (raw_hex, hash, first_seen, payload_type) VALUES (?, 'hash1', '2025-01-01T00:00:00Z', 4)`, rawHex)
|
||||
if err != nil {
|
||||
t.Fatal("insert tx:", err)
|
||||
}
|
||||
// Insert 100 observations needing backfill
|
||||
for i := 0; i < 100; i++ {
|
||||
_, err = db.Exec(`INSERT INTO observations (transmission_id, observer_idx, timestamp, raw_hex, path_json) VALUES (1, ?, ?, ?, NULL)`,
|
||||
i+1, 1700000000+i, rawHex)
|
||||
if err != nil {
|
||||
// dedup index might fire — use unique observer_idx
|
||||
t.Fatalf("insert obs %d: %v", i, err)
|
||||
}
|
||||
}
|
||||
db.Close()
|
||||
|
||||
// Now open store via OpenStore — this must return QUICKLY (non-blocking)
|
||||
start := time.Now()
|
||||
store, err := OpenStoreWithInterval(dbPath, 300)
|
||||
elapsed := time.Since(start)
|
||||
if err != nil {
|
||||
t.Fatal("OpenStore:", err)
|
||||
}
|
||||
defer store.Close()
|
||||
|
||||
// OpenStore must return in under 2 seconds (backfill is no longer in applySchema)
|
||||
if elapsed > 2*time.Second {
|
||||
t.Fatalf("OpenStore blocked for %v — backfill must not run in applySchema", elapsed)
|
||||
}
|
||||
|
||||
// Backfill must NOT be recorded yet — it hasn't been triggered
|
||||
var done int
|
||||
err = store.db.QueryRow("SELECT 1 FROM _migrations WHERE name = 'backfill_path_json_from_raw_hex_v1'").Scan(&done)
|
||||
if err == nil {
|
||||
t.Fatal("migration recorded during OpenStore — backfill must be async via BackfillPathJSONAsync()")
|
||||
}
|
||||
|
||||
// Now trigger the async backfill (simulates what main.go does after OpenStore)
|
||||
store.BackfillPathJSONAsync()
|
||||
|
||||
// Wait for backfill to complete (should be very fast with 100 rows)
|
||||
deadline := time.Now().Add(10 * time.Second)
|
||||
for time.Now().Before(deadline) {
|
||||
err = store.db.QueryRow("SELECT 1 FROM _migrations WHERE name = 'backfill_path_json_from_raw_hex_v1'").Scan(&done)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatal("backfill never completed within 10s")
|
||||
}
|
||||
|
||||
// Verify backfill actually worked — observations should have non-NULL path_json
|
||||
var nullCount int
|
||||
store.db.QueryRow("SELECT COUNT(*) FROM observations WHERE path_json IS NULL").Scan(&nullCount)
|
||||
if nullCount > 0 {
|
||||
t.Errorf("backfill left %d observations with NULL path_json", nullCount)
|
||||
}
|
||||
}
|
||||
|
||||
// TestBackfillPathJSONAsyncMethodExists verifies the async backfill API surface
|
||||
// exists — BackfillPathJSONAsync must be callable independently from OpenStore.
|
||||
func TestBackfillPathJSONAsyncMethodExists(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
dbPath := filepath.Join(dir, "method_test.db")
|
||||
store, err := OpenStoreWithInterval(dbPath, 300)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer store.Close()
|
||||
|
||||
// BackfillPathJSONAsync must exist as a method on *Store
|
||||
// This is a compile-time check — if the method doesn't exist, the test won't compile.
|
||||
store.BackfillPathJSONAsync()
|
||||
}
|
||||
|
||||
// TestBackfillPathJSONAsync_BracketRowsTerminate exercises the infinite-loop bug
|
||||
// from issue #1119. Observations whose path_json is already '[]' (meaning a prior
|
||||
// backfill pass attempted to decode them and found no hops) must NOT be re-selected
|
||||
// by the WHERE clause — otherwise the loop rewrites the same '[]' value forever
|
||||
// and never records the migration marker.
|
||||
//
|
||||
// This test seeds N rows with path_json='[]' and a raw_hex that DecodePathFromRawHex
|
||||
// resolves to zero hops. With the bug, the backfill loops infinitely re-UPDATEing
|
||||
// the same rows back to '[]', batch is never empty, migration marker is never
|
||||
// written. With the fix, no rows match → the very first batch is empty → migration
|
||||
// is recorded immediately.
|
||||
func TestBackfillPathJSONAsync_BracketRowsTerminate(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
dbPath := filepath.Join(dir, "bracket_terminate.db")
|
||||
|
||||
// Bootstrap a minimal schema directly so we can seed pre-existing '[]' rows
|
||||
// before OpenStore runs.
|
||||
db, err := sql.Open("sqlite", dbPath+"?_pragma=journal_mode(WAL)&_pragma=busy_timeout(5000)")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = db.Exec(`
|
||||
CREATE TABLE _migrations (name TEXT PRIMARY KEY);
|
||||
CREATE TABLE transmissions (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
raw_hex TEXT NOT NULL,
|
||||
hash TEXT NOT NULL UNIQUE,
|
||||
first_seen TEXT NOT NULL,
|
||||
route_type INTEGER,
|
||||
payload_type INTEGER,
|
||||
payload_version INTEGER,
|
||||
decoded_json TEXT,
|
||||
created_at TEXT DEFAULT (datetime('now')),
|
||||
channel_hash TEXT
|
||||
);
|
||||
CREATE TABLE observers (
|
||||
id TEXT PRIMARY KEY, name TEXT, iata TEXT,
|
||||
last_seen TEXT, first_seen TEXT, packet_count INTEGER DEFAULT 0,
|
||||
model TEXT, firmware TEXT, client_version TEXT, radio TEXT,
|
||||
battery_mv INTEGER, uptime_secs INTEGER, noise_floor REAL,
|
||||
inactive INTEGER DEFAULT 0, last_packet_at TEXT
|
||||
);
|
||||
CREATE TABLE nodes (
|
||||
public_key TEXT PRIMARY KEY, name TEXT, role TEXT,
|
||||
lat REAL, lon REAL, last_seen TEXT, first_seen TEXT,
|
||||
advert_count INTEGER DEFAULT 0, battery_mv INTEGER, temperature_c REAL
|
||||
);
|
||||
CREATE TABLE inactive_nodes (
|
||||
public_key TEXT PRIMARY KEY, name TEXT, role TEXT,
|
||||
lat REAL, lon REAL, last_seen TEXT, first_seen TEXT,
|
||||
advert_count INTEGER DEFAULT 0, battery_mv INTEGER, temperature_c REAL
|
||||
);
|
||||
CREATE TABLE observations (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
transmission_id INTEGER NOT NULL REFERENCES transmissions(id),
|
||||
observer_idx INTEGER, direction TEXT,
|
||||
snr REAL, rssi REAL, score INTEGER,
|
||||
path_json TEXT,
|
||||
timestamp INTEGER NOT NULL,
|
||||
raw_hex TEXT
|
||||
);
|
||||
CREATE UNIQUE INDEX idx_observations_dedup ON observations(transmission_id, observer_idx, COALESCE(path_json, ''));
|
||||
CREATE INDEX idx_observations_transmission_id ON observations(transmission_id);
|
||||
CREATE INDEX idx_observations_observer_idx ON observations(observer_idx);
|
||||
CREATE INDEX idx_observations_timestamp ON observations(timestamp);
|
||||
CREATE TABLE observer_metrics (
|
||||
observer_id TEXT NOT NULL, timestamp TEXT NOT NULL,
|
||||
noise_floor REAL, tx_air_secs INTEGER, rx_air_secs INTEGER,
|
||||
recv_errors INTEGER, battery_mv INTEGER,
|
||||
packets_sent INTEGER, packets_recv INTEGER,
|
||||
PRIMARY KEY (observer_id, timestamp)
|
||||
);
|
||||
CREATE TABLE dropped_packets (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
hash TEXT, raw_hex TEXT, reason TEXT NOT NULL,
|
||||
observer_id TEXT, observer_name TEXT,
|
||||
node_pubkey TEXT, node_name TEXT,
|
||||
dropped_at DATETIME DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
`)
|
||||
if err != nil {
|
||||
t.Fatal("bootstrap schema:", err)
|
||||
}
|
||||
|
||||
// Mark all migrations done EXCEPT backfill_path_json_from_raw_hex_v1.
|
||||
for _, m := range []string{
|
||||
"advert_count_unique_v1", "noise_floor_real_v1", "node_telemetry_v1",
|
||||
"obs_timestamp_index_v1", "observer_metrics_v1", "observer_metrics_ts_idx",
|
||||
"observers_inactive_v1", "observer_metrics_packets_v1", "channel_hash_v1",
|
||||
"dropped_packets_v1", "observations_raw_hex_v1", "observers_last_packet_at_v1",
|
||||
"cleanup_legacy_null_hash_ts",
|
||||
} {
|
||||
db.Exec(`INSERT INTO _migrations (name) VALUES (?)`, m)
|
||||
}
|
||||
|
||||
// raw_hex producing ZERO hops via DecodePathFromRawHex:
|
||||
// DIRECT route (type=2), payload_type=2, version=0 → header 0x0A; path byte 0x00.
|
||||
// (See internal/packetpath/path_test.go: TestDecodePathFromRawHex_ZeroHops.)
|
||||
rawHex := "0A00DEADBEEF"
|
||||
_, err = db.Exec(`INSERT INTO transmissions (raw_hex, hash, first_seen, payload_type) VALUES (?, 'h_brackets', '2025-01-01T00:00:00Z', 2)`, rawHex)
|
||||
if err != nil {
|
||||
t.Fatal("insert tx:", err)
|
||||
}
|
||||
const seedCount = 100
|
||||
for i := 0; i < seedCount; i++ {
|
||||
_, err = db.Exec(`INSERT INTO observations (transmission_id, observer_idx, timestamp, raw_hex, path_json) VALUES (1, ?, ?, ?, '[]')`,
|
||||
i+1, 1700000000+i, rawHex)
|
||||
if err != nil {
|
||||
t.Fatalf("insert obs %d: %v", i, err)
|
||||
}
|
||||
}
|
||||
db.Close()
|
||||
|
||||
store, err := OpenStoreWithInterval(dbPath, 300)
|
||||
if err != nil {
|
||||
t.Fatal("OpenStore:", err)
|
||||
}
|
||||
defer store.Close()
|
||||
|
||||
// Trigger backfill. With the bug, every iteration re-fetches all 100 rows
|
||||
// (because '[]' matches the WHERE), rewrites them to '[]', sleeps 50ms, repeats.
|
||||
// The loop never terminates and the migration marker is never written.
|
||||
store.BackfillPathJSONAsync()
|
||||
|
||||
// Generous deadline: with the fix the marker is written essentially immediately.
|
||||
// With the bug the marker is never written within any bounded time.
|
||||
deadline := time.Now().Add(5 * time.Second)
|
||||
var done int
|
||||
for time.Now().Before(deadline) {
|
||||
err = store.db.QueryRow("SELECT 1 FROM _migrations WHERE name = 'backfill_path_json_from_raw_hex_v1'").Scan(&done)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("issue #1119: backfill never recorded migration marker within 5s — infinite loop on path_json='[]' rows")
|
||||
}
|
||||
|
||||
// Verify the seeded '[]' rows still have '[]' (sanity — neither bug nor fix
|
||||
// should change their value), and that there are no NULL/empty path_json rows
|
||||
// the backfill should have processed.
|
||||
var bracketCount int
|
||||
store.db.QueryRow("SELECT COUNT(*) FROM observations WHERE path_json = '[]'").Scan(&bracketCount)
|
||||
if bracketCount != seedCount {
|
||||
t.Errorf("expected %d rows with path_json='[]', got %d", seedCount, bracketCount)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -131,6 +131,7 @@ type Payload struct {
|
||||
SenderTimestamp uint32 `json:"sender_timestamp,omitempty"`
|
||||
EphemeralPubKey string `json:"ephemeralPubKey,omitempty"`
|
||||
PathData string `json:"pathData,omitempty"`
|
||||
SNRValues []float64 `json:"snrValues,omitempty"`
|
||||
Tag uint32 `json:"tag,omitempty"`
|
||||
AuthCode uint32 `json:"authCode,omitempty"`
|
||||
TraceFlags *int `json:"traceFlags,omitempty"`
|
||||
@@ -599,6 +600,9 @@ func DecodePacket(hexString string, channelKeys map[string]string, validateSigna
|
||||
// We expose hopsCompleted (count of SNR bytes) so consumers can distinguish
|
||||
// how far the trace got vs the full intended route.
|
||||
var anomaly string
|
||||
if header.PayloadType == PayloadTRACE && payload.Error != "" {
|
||||
anomaly = fmt.Sprintf("TRACE payload decode failed: %s", payload.Error)
|
||||
}
|
||||
if header.PayloadType == PayloadTRACE && payload.PathData != "" {
|
||||
// Flag anomalous routing — firmware only sends TRACE as DIRECT
|
||||
if header.RouteType != RouteDirect && header.RouteType != RouteTransportDirect {
|
||||
@@ -606,6 +610,21 @@ func DecodePacket(hexString string, channelKeys map[string]string, validateSigna
|
||||
}
|
||||
// The header path hops count represents SNR entries = completed hops
|
||||
hopsCompleted := path.HashCount
|
||||
// Extract per-hop SNR from header path bytes (int8, quarter-dB encoding).
|
||||
// Mirrors cmd/server/decoder.go — must be done at ingest time so SNR
|
||||
// values are persisted in decoded_json (server endpoint serves DB as-is).
|
||||
if hopsCompleted > 0 && len(path.Hops) >= hopsCompleted {
|
||||
snrVals := make([]float64, 0, hopsCompleted)
|
||||
for i := 0; i < hopsCompleted; i++ {
|
||||
b, err := hex.DecodeString(path.Hops[i])
|
||||
if err == nil && len(b) == 1 {
|
||||
snrVals = append(snrVals, float64(int8(b[0]))/4.0)
|
||||
}
|
||||
}
|
||||
if len(snrVals) > 0 {
|
||||
payload.SNRValues = snrVals
|
||||
}
|
||||
}
|
||||
pathBytes, err := hex.DecodeString(payload.PathData)
|
||||
if err == nil && payload.TraceFlags != nil {
|
||||
// path_sz from flags byte is a power-of-two exponent per firmware:
|
||||
|
||||
@@ -1926,3 +1926,53 @@ func TestDecodePathFromRawHex_Transport(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecodeTracePayloadFailSetsAnomaly(t *testing.T) {
|
||||
// Issue #889: TRACE packet with payload too short to decode (< 9 bytes)
|
||||
// should still return a DecodedPacket (observation stored) but with Anomaly
|
||||
// set to warn operators that the decode was degraded.
|
||||
// Packet: header 0x26 (TRACE+DIRECT), pathByte 0x00, payload 4 bytes (too short).
|
||||
pkt, err := DecodePacket("2600aabbccdd", nil, false)
|
||||
if err != nil {
|
||||
t.Fatalf("DecodePacket error: %v", err)
|
||||
}
|
||||
if pkt.Payload.Type != "TRACE" {
|
||||
t.Fatalf("payload type=%s, want TRACE", pkt.Payload.Type)
|
||||
}
|
||||
if pkt.Payload.Error == "" {
|
||||
t.Fatal("expected payload.Error to indicate decode failure")
|
||||
}
|
||||
// The key assertion: Anomaly must be set when TRACE decode fails
|
||||
if pkt.Anomaly == "" {
|
||||
t.Error("expected Anomaly to be set when TRACE payload decode fails but observation is stored")
|
||||
}
|
||||
}
|
||||
|
||||
// TestDecodeTraceExtractsSNRValues verifies that for TRACE packets, the header
|
||||
// path bytes are interpreted as int8 SNR values (quarter-dB) and exposed via
|
||||
// payload.SNRValues. Mirrors logic in cmd/server/decoder.go (issue: SNR values
|
||||
// extracted by server but never written into decoded_json by ingestor).
|
||||
//
|
||||
// Packet 26022FF8116A23A80000000001C0DE1000DEDE:
|
||||
// header 0x26 → TRACE (pt=9), DIRECT (rt=2)
|
||||
// pathByte 0x02 → hash_size=1, hash_count=2
|
||||
// header path: 2F F8 → SNR = [int8(0x2F)/4, int8(0xF8)/4] = [11.75, -2.0]
|
||||
// payload (15B): tag=116A23A8 auth=00000000 flags=0x01 pathData=C0DE1000DEDE
|
||||
func TestDecodeTraceExtractsSNRValues(t *testing.T) {
|
||||
pkt, err := DecodePacket("26022FF8116A23A80000000001C0DE1000DEDE", nil, false)
|
||||
if err != nil {
|
||||
t.Fatalf("DecodePacket error: %v", err)
|
||||
}
|
||||
if pkt.Payload.Type != "TRACE" {
|
||||
t.Fatalf("payload type=%s, want TRACE", pkt.Payload.Type)
|
||||
}
|
||||
if len(pkt.Payload.SNRValues) != 2 {
|
||||
t.Fatalf("len(SNRValues)=%d, want 2 (got %v)", len(pkt.Payload.SNRValues), pkt.Payload.SNRValues)
|
||||
}
|
||||
if pkt.Payload.SNRValues[0] != 11.75 {
|
||||
t.Errorf("SNRValues[0]=%v, want 11.75", pkt.Payload.SNRValues[0])
|
||||
}
|
||||
if pkt.Payload.SNRValues[1] != -2.0 {
|
||||
t.Errorf("SNRValues[1]=%v, want -2.0", pkt.Payload.SNRValues[1])
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,112 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
// TestHandleMessageAdvertForeign_FlagModeStoresWithFlag asserts that when an
|
||||
// ADVERT comes from a node whose GPS is OUTSIDE the configured geofilter,
|
||||
// the ingestor (in default "flag" mode) stores the node and marks it foreign,
|
||||
// instead of silently dropping it (#730).
|
||||
func TestHandleMessageAdvertForeign_FlagModeStoresWithFlag(t *testing.T) {
|
||||
store, source := newTestContext(t)
|
||||
|
||||
// Real ADVERT raw hex from existing TestHandleMessageAdvertGeoFiltered.
|
||||
// Decoder will produce a node with a known GPS — the test below just
|
||||
// asserts that with a tight geofilter that EXCLUDES that GPS, the node
|
||||
// is still stored AND tagged as foreign.
|
||||
rawHex := "120046D62DE27D4C5194D7821FC5A34A45565DCC2537B300B9AB6275255CEFB65D840CE5C169C94C9AED39E8BCB6CB6EB0335497A198B33A1A610CD3B03D8DCFC160900E5244280323EE0B44CACAB8F02B5B38B91CFA18BD067B0B5E63E94CFC85F758A8530B9240933402E0E6B8F84D5252322D52"
|
||||
|
||||
latMin, latMax := -1.0, 1.0
|
||||
lonMin, lonMax := -1.0, 1.0
|
||||
gf := &GeoFilterConfig{
|
||||
LatMin: &latMin, LatMax: &latMax,
|
||||
LonMin: &lonMin, LonMax: &lonMax,
|
||||
}
|
||||
|
||||
msg := &mockMessage{
|
||||
topic: "meshcore/SJC/obs1/packets",
|
||||
payload: []byte(`{"raw":"` + rawHex + `"}`),
|
||||
}
|
||||
// Default mode (no ForeignAdverts.Mode set) MUST be "flag", per #730 design.
|
||||
handleMessage(store, "test", source, msg, nil, &Config{GeoFilter: gf})
|
||||
|
||||
var nodeCount int
|
||||
if err := store.db.QueryRow("SELECT COUNT(*) FROM nodes").Scan(&nodeCount); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if nodeCount != 1 {
|
||||
t.Fatalf("nodes=%d, want 1 (foreign advert should be stored, not dropped, in flag mode)", nodeCount)
|
||||
}
|
||||
|
||||
var foreign int
|
||||
if err := store.db.QueryRow("SELECT foreign_advert FROM nodes").Scan(&foreign); err != nil {
|
||||
t.Fatalf("foreign_advert column missing or unreadable: %v", err)
|
||||
}
|
||||
if foreign != 1 {
|
||||
t.Errorf("foreign_advert=%d, want 1", foreign)
|
||||
}
|
||||
}
|
||||
|
||||
// TestHandleMessageAdvertForeign_DropModeStillDrops asserts the legacy
|
||||
// drop-on-foreign behavior is preserved when ForeignAdverts.Mode = "drop".
|
||||
func TestHandleMessageAdvertForeign_DropModeStillDrops(t *testing.T) {
|
||||
store, source := newTestContext(t)
|
||||
|
||||
rawHex := "120046D62DE27D4C5194D7821FC5A34A45565DCC2537B300B9AB6275255CEFB65D840CE5C169C94C9AED39E8BCB6CB6EB0335497A198B33A1A610CD3B03D8DCFC160900E5244280323EE0B44CACAB8F02B5B38B91CFA18BD067B0B5E63E94CFC85F758A8530B9240933402E0E6B8F84D5252322D52"
|
||||
|
||||
latMin, latMax := -1.0, 1.0
|
||||
lonMin, lonMax := -1.0, 1.0
|
||||
gf := &GeoFilterConfig{
|
||||
LatMin: &latMin, LatMax: &latMax,
|
||||
LonMin: &lonMin, LonMax: &lonMax,
|
||||
}
|
||||
|
||||
msg := &mockMessage{
|
||||
topic: "meshcore/SJC/obs1/packets",
|
||||
payload: []byte(`{"raw":"` + rawHex + `"}`),
|
||||
}
|
||||
cfg := &Config{
|
||||
GeoFilter: gf,
|
||||
ForeignAdverts: &ForeignAdvertConfig{Mode: "drop"},
|
||||
}
|
||||
handleMessage(store, "test", source, msg, nil, cfg)
|
||||
|
||||
var nodeCount int
|
||||
if err := store.db.QueryRow("SELECT COUNT(*) FROM nodes").Scan(&nodeCount); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if nodeCount != 0 {
|
||||
t.Errorf("nodes=%d, want 0 (drop mode preserves legacy silent-drop behavior)", nodeCount)
|
||||
}
|
||||
}
|
||||
|
||||
// TestHandleMessageAdvertInRegion_NotFlaggedForeign asserts in-region
|
||||
// adverts are NOT marked foreign.
|
||||
func TestHandleMessageAdvertInRegion_NotFlaggedForeign(t *testing.T) {
|
||||
store, source := newTestContext(t)
|
||||
|
||||
rawHex := "120046D62DE27D4C5194D7821FC5A34A45565DCC2537B300B9AB6275255CEFB65D840CE5C169C94C9AED39E8BCB6CB6EB0335497A198B33A1A610CD3B03D8DCFC160900E5244280323EE0B44CACAB8F02B5B38B91CFA18BD067B0B5E63E94CFC85F758A8530B9240933402E0E6B8F84D5252322D52"
|
||||
|
||||
// Wide-open geofilter: every coord passes.
|
||||
latMin, latMax := -90.0, 90.0
|
||||
lonMin, lonMax := -180.0, 180.0
|
||||
gf := &GeoFilterConfig{
|
||||
LatMin: &latMin, LatMax: &latMax,
|
||||
LonMin: &lonMin, LonMax: &lonMax,
|
||||
}
|
||||
msg := &mockMessage{
|
||||
topic: "meshcore/SJC/obs1/packets",
|
||||
payload: []byte(`{"raw":"` + rawHex + `"}`),
|
||||
}
|
||||
handleMessage(store, "test", source, msg, nil, &Config{GeoFilter: gf})
|
||||
|
||||
var foreign int
|
||||
err := store.db.QueryRow("SELECT foreign_advert FROM nodes").Scan(&foreign)
|
||||
if err != nil {
|
||||
t.Fatalf("query foreign_advert: %v", err)
|
||||
}
|
||||
if foreign != 0 {
|
||||
t.Errorf("foreign_advert=%d, want 0 (in-region node)", foreign)
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,94 @@
|
||||
package main
|
||||
|
||||
// Tests for #1143: ingestor must populate transmissions.from_pubkey at
|
||||
// write time (cheap — already parsing decoded_json) so attribution queries
|
||||
// don't rely on JSON substring matches.
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestInsertTransmission_FromPubkeyPopulatedForAdvert(t *testing.T) {
|
||||
s, err := OpenStore(tempDBPath(t))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer s.Close()
|
||||
|
||||
const pk = "f7181c468dfe7c55aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
|
||||
data := &PacketData{
|
||||
RawHex: "AABBCC",
|
||||
Timestamp: "2026-03-25T00:00:00Z",
|
||||
ObserverID: "obs1",
|
||||
Hash: "advert_hash_1143",
|
||||
RouteType: 1,
|
||||
PayloadType: 4, // ADVERT
|
||||
PayloadVersion: 0,
|
||||
PathJSON: "[]",
|
||||
DecodedJSON: `{"type":"ADVERT","pubKey":"` + pk + `","name":"X"}`,
|
||||
FromPubkey: pk,
|
||||
}
|
||||
if _, err := s.InsertTransmission(data); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var got sql.NullString
|
||||
s.db.QueryRow("SELECT from_pubkey FROM transmissions WHERE hash = ?", data.Hash).Scan(&got)
|
||||
if !got.Valid || got.String != pk {
|
||||
t.Fatalf("from_pubkey = %v (valid=%v), want %q", got.String, got.Valid, pk)
|
||||
}
|
||||
}
|
||||
|
||||
func TestInsertTransmission_FromPubkeyNullForNonAdvert(t *testing.T) {
|
||||
s, err := OpenStore(tempDBPath(t))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer s.Close()
|
||||
|
||||
data := &PacketData{
|
||||
RawHex: "AA",
|
||||
Timestamp: "2026-03-25T00:00:00Z",
|
||||
ObserverID: "obs1",
|
||||
Hash: "txt_hash_1143",
|
||||
RouteType: 1,
|
||||
PayloadType: 2, // TXT_MSG
|
||||
PayloadVersion: 0,
|
||||
PathJSON: "[]",
|
||||
DecodedJSON: `{"type":"TXT_MSG"}`,
|
||||
// FromPubkey deliberately empty — non-ADVERTs don't carry one.
|
||||
}
|
||||
if _, err := s.InsertTransmission(data); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var got sql.NullString
|
||||
s.db.QueryRow("SELECT from_pubkey FROM transmissions WHERE hash = ?", data.Hash).Scan(&got)
|
||||
if got.Valid {
|
||||
t.Fatalf("from_pubkey for non-ADVERT must be NULL, got %q", got.String)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildPacketData_PopulatesFromPubkey(t *testing.T) {
|
||||
const pk = "deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef"
|
||||
msg := &MQTTPacketMessage{Raw: "AA", Origin: "obs"}
|
||||
decoded := &DecodedPacket{
|
||||
Header: Header{PayloadType: PayloadADVERT},
|
||||
Payload: Payload{Type: "ADVERT", PubKey: pk},
|
||||
}
|
||||
pd := BuildPacketData(msg, decoded, "obs", "")
|
||||
if pd.FromPubkey != pk {
|
||||
t.Fatalf("BuildPacketData FromPubkey = %q, want %q", pd.FromPubkey, pk)
|
||||
}
|
||||
|
||||
// Non-ADVERT: must not carry a pubkey.
|
||||
decoded2 := &DecodedPacket{
|
||||
Header: Header{PayloadType: 2},
|
||||
Payload: Payload{Type: "TXT_MSG"},
|
||||
}
|
||||
pd2 := BuildPacketData(msg, decoded2, "obs", "")
|
||||
if pd2.FromPubkey != "" {
|
||||
t.Fatalf("BuildPacketData FromPubkey for non-ADVERT = %q, want empty", pd2.FromPubkey)
|
||||
}
|
||||
}
|
||||
@@ -17,6 +17,14 @@ require github.com/meshcore-analyzer/packetpath v0.0.0
|
||||
|
||||
replace github.com/meshcore-analyzer/packetpath => ../../internal/packetpath
|
||||
|
||||
require github.com/meshcore-analyzer/dbconfig v0.0.0
|
||||
|
||||
replace github.com/meshcore-analyzer/dbconfig => ../../internal/dbconfig
|
||||
|
||||
require github.com/meshcore-analyzer/perfio v0.0.0
|
||||
|
||||
replace github.com/meshcore-analyzer/perfio => ../../internal/perfio
|
||||
|
||||
require (
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
|
||||
+85
-9
@@ -57,6 +57,9 @@ func main() {
|
||||
defer store.Close()
|
||||
log.Printf("SQLite opened: %s", cfg.DBPath)
|
||||
|
||||
// Async backfill: path_json from raw_hex (#888) — must not block MQTT startup
|
||||
store.BackfillPathJSONAsync()
|
||||
|
||||
// Check auto_vacuum mode and optionally migrate (#919)
|
||||
store.CheckAutoVacuum(cfg)
|
||||
|
||||
@@ -114,6 +117,10 @@ func main() {
|
||||
}
|
||||
}()
|
||||
|
||||
// Per-second stats file writer for the server's /api/perf/write-sources
|
||||
// endpoint (#1120). Best-effort; never fatal.
|
||||
StartStatsFileWriter(store, time.Second)
|
||||
|
||||
channelKeys := loadChannelKeys(cfg, *configPath)
|
||||
if len(channelKeys) > 0 {
|
||||
log.Printf("Loaded %d channel keys for GRP_TXT decryption", len(channelKeys))
|
||||
@@ -123,6 +130,7 @@ func main() {
|
||||
|
||||
// Connect to each MQTT source
|
||||
var clients []mqtt.Client
|
||||
connectedCount := 0
|
||||
for _, source := range sources {
|
||||
tag := source.Name
|
||||
if tag == "" {
|
||||
@@ -130,6 +138,8 @@ func main() {
|
||||
}
|
||||
|
||||
opts := buildMQTTOpts(source)
|
||||
connectTimeout := source.ConnectTimeoutOrDefault()
|
||||
log.Printf("MQTT [%s] connect timeout: %ds", tag, connectTimeout)
|
||||
|
||||
opts.SetOnConnectHandler(func(c mqtt.Client) {
|
||||
log.Printf("MQTT [%s] connected to %s", tag, source.Broker)
|
||||
@@ -164,19 +174,43 @@ func main() {
|
||||
|
||||
client := mqtt.NewClient(opts)
|
||||
token := client.Connect()
|
||||
token.Wait()
|
||||
if token.Error() != nil {
|
||||
log.Printf("MQTT [%s] connection failed (non-fatal): %v", tag, token.Error())
|
||||
// With ConnectRetry=true, token.Wait() blocks forever for unreachable brokers.
|
||||
// WaitTimeout lets startup proceed; the client keeps retrying in the background
|
||||
// and OnConnect fires (subscribing) when it eventually connects (#910).
|
||||
if !token.WaitTimeout(time.Duration(connectTimeout) * time.Second) {
|
||||
log.Printf("MQTT [%s] initial connection timed out — retrying in background", tag)
|
||||
clients = append(clients, client)
|
||||
continue
|
||||
}
|
||||
if token.Error() != nil {
|
||||
log.Printf("MQTT [%s] connection failed (non-fatal): %v", tag, token.Error())
|
||||
// BL1 fix: Disconnect to stop Paho's internal retry goroutines.
|
||||
// With ConnectRetry=true, Connect() spawns background goroutines
|
||||
// that leak if the client is simply discarded.
|
||||
client.Disconnect(0)
|
||||
continue
|
||||
}
|
||||
connectedCount++
|
||||
clients = append(clients, client)
|
||||
}
|
||||
|
||||
if len(clients) == 0 {
|
||||
log.Fatal("no MQTT connections established — check broker is running (default: mqtt://localhost:1883). Set MQTT_BROKER env var or configure mqttSources in config.json")
|
||||
// BL2 fix: require at least one immediately-connected source. Timed-out
|
||||
// clients are retrying in background (tracked in clients) but don't count
|
||||
// as "connected" — a single unreachable broker must not silently run with
|
||||
// zero active connections.
|
||||
if connectedCount == 0 {
|
||||
// Clean up any timed-out clients still retrying
|
||||
for _, c := range clients {
|
||||
c.Disconnect(0)
|
||||
}
|
||||
log.Fatal("no MQTT sources connected — all timed out or failed. Check broker is running (default: mqtt://localhost:1883). Set MQTT_BROKER env var or configure mqttSources in config.json")
|
||||
}
|
||||
|
||||
log.Printf("Running — %d MQTT source(s) connected", len(clients))
|
||||
if connectedCount < len(clients) {
|
||||
log.Printf("Running — %d MQTT source(s) connected, %d retrying in background", connectedCount, len(clients)-connectedCount)
|
||||
} else {
|
||||
log.Printf("Running — %d MQTT source(s) connected", connectedCount)
|
||||
}
|
||||
|
||||
// Wait for shutdown signal
|
||||
sig := make(chan os.Signal, 1)
|
||||
@@ -247,8 +281,14 @@ func handleMessage(store *Store, tag string, source MQTTSource, m mqtt.Message,
|
||||
return
|
||||
}
|
||||
|
||||
// Global observer IATA whitelist: if configured, drop messages from observers
|
||||
// in non-whitelisted IATA regions. Applies to ALL message types (status + packets).
|
||||
if len(parts) > 1 && !cfg.IsObserverIATAAllowed(parts[1]) {
|
||||
return
|
||||
}
|
||||
|
||||
// Status topic: meshcore/<region>/<observer_id>/status
|
||||
// IATA filter does NOT apply here — observer metadata (noise_floor, battery, etc.)
|
||||
// Per-source IATA filter does NOT apply here — observer metadata (noise_floor, battery, etc.)
|
||||
// is region-independent and should be accepted from all observers regardless of
|
||||
// which IATA regions are configured for packet ingestion.
|
||||
if len(parts) >= 4 && parts[3] == "status" {
|
||||
@@ -312,8 +352,16 @@ func handleMessage(store *Store, tag string, source MQTTSource, m mqtt.Message,
|
||||
if len(parts) > 1 {
|
||||
region = parts[1]
|
||||
}
|
||||
// Fallback to source-level region config when topic has no region (#788)
|
||||
if region == "" && source.Region != "" {
|
||||
region = source.Region
|
||||
}
|
||||
|
||||
mqttMsg := &MQTTPacketMessage{Raw: rawHex}
|
||||
// Parse optional region from JSON payload (#788)
|
||||
if v, ok := msg["region"].(string); ok && v != "" {
|
||||
mqttMsg.Region = v
|
||||
}
|
||||
if v, ok := msg["SNR"]; ok {
|
||||
if f, ok := toFloat64(v); ok {
|
||||
mqttMsg.SNR = &f
|
||||
@@ -378,10 +426,28 @@ func handleMessage(store *Store, tag string, source MQTTSource, m mqtt.Message,
|
||||
})
|
||||
return
|
||||
}
|
||||
foreign := false
|
||||
if !NodePassesGeoFilter(decoded.Payload.Lat, decoded.Payload.Lon, cfg.GeoFilter) {
|
||||
return
|
||||
if cfg.ForeignAdverts.IsDropMode() {
|
||||
return
|
||||
}
|
||||
foreign = true
|
||||
lat, lon := 0.0, 0.0
|
||||
if decoded.Payload.Lat != nil {
|
||||
lat = *decoded.Payload.Lat
|
||||
}
|
||||
if decoded.Payload.Lon != nil {
|
||||
lon = *decoded.Payload.Lon
|
||||
}
|
||||
truncPK := decoded.Payload.PubKey
|
||||
if len(truncPK) > 16 {
|
||||
truncPK = truncPK[:16]
|
||||
}
|
||||
log.Printf("MQTT [%s] foreign advert: node=%s name=%s lat=%.4f lon=%.4f observer=%s",
|
||||
tag, truncPK, decoded.Payload.Name, lat, lon, firstNonEmpty(mqttMsg.Origin, observerID))
|
||||
}
|
||||
pktData := BuildPacketData(mqttMsg, decoded, observerID, region)
|
||||
pktData.Foreign = foreign
|
||||
isNew, err := store.InsertTransmission(pktData)
|
||||
if err != nil {
|
||||
log.Printf("MQTT [%s] db insert error: %v", tag, err)
|
||||
@@ -390,6 +456,11 @@ func handleMessage(store *Store, tag string, source MQTTSource, m mqtt.Message,
|
||||
if err := store.UpsertNode(decoded.Payload.PubKey, decoded.Payload.Name, role, decoded.Payload.Lat, decoded.Payload.Lon, pktData.Timestamp); err != nil {
|
||||
log.Printf("MQTT [%s] node upsert error: %v", tag, err)
|
||||
}
|
||||
if foreign {
|
||||
if err := store.MarkNodeForeign(decoded.Payload.PubKey); err != nil {
|
||||
log.Printf("MQTT [%s] mark foreign error: %v", tag, err)
|
||||
}
|
||||
}
|
||||
if isNew {
|
||||
if err := store.IncrementAdvertCount(decoded.Payload.PubKey); err != nil {
|
||||
log.Printf("MQTT [%s] advert count error: %v", tag, err)
|
||||
@@ -413,7 +484,12 @@ func handleMessage(store *Store, tag string, source MQTTSource, m mqtt.Message,
|
||||
// Upsert observer
|
||||
if observerID != "" {
|
||||
origin, _ := msg["origin"].(string)
|
||||
if err := store.UpsertObserver(observerID, origin, region, nil); err != nil {
|
||||
// Use effective region: payload > topic > source config (#788)
|
||||
effectiveRegion := region
|
||||
if mqttMsg.Region != "" {
|
||||
effectiveRegion = mqttMsg.Region
|
||||
}
|
||||
if err := store.UpsertObserver(observerID, origin, effectiveRegion, nil); err != nil {
|
||||
log.Printf("MQTT [%s] observer upsert error: %v", tag, err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,8 +5,11 @@ import (
|
||||
"math"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
mqtt "github.com/eclipse/paho.mqtt.golang"
|
||||
)
|
||||
|
||||
func TestToFloat64(t *testing.T) {
|
||||
@@ -780,3 +783,155 @@ func TestIATAFilterDoesNotDropStatusMessages(t *testing.T) {
|
||||
t.Error("packet from out-of-region BFL should still be filtered by IATA")
|
||||
}
|
||||
}
|
||||
|
||||
// TestMQTTConnectRetryTimeoutDoesNotBlock verifies that WaitTimeout returns within
|
||||
// the deadline for an unreachable broker when ConnectRetry=true (#910). Previously,
|
||||
// token.Wait() would block forever in this configuration.
|
||||
func TestMQTTConnectRetryTimeoutDoesNotBlock(t *testing.T) {
|
||||
opts := mqtt.NewClientOptions().
|
||||
AddBroker("tcp://127.0.0.1:1"). // port 1 — nothing listening, fast refusal
|
||||
SetConnectRetry(true).
|
||||
SetAutoReconnect(true)
|
||||
|
||||
client := mqtt.NewClient(opts)
|
||||
token := client.Connect()
|
||||
defer client.Disconnect(100)
|
||||
|
||||
start := time.Now()
|
||||
connected := token.WaitTimeout(3 * time.Second)
|
||||
elapsed := time.Since(start)
|
||||
|
||||
if connected {
|
||||
t.Skip("port 1 unexpectedly accepted a connection — skipping")
|
||||
}
|
||||
if elapsed > 4*time.Second {
|
||||
t.Errorf("WaitTimeout blocked for %v — token.Wait() would block forever with ConnectRetry=true", elapsed)
|
||||
}
|
||||
}
|
||||
|
||||
// TestBL1_GoroutineLeakOnHardFailure reproduces BLOCKER 1: without Disconnect()
|
||||
// on the error path, Paho's internal retry goroutines leak when a client is
|
||||
// discarded after Connect() with ConnectRetry=true.
|
||||
//
|
||||
// We prove the leak by creating N clients WITHOUT Disconnect — goroutines grow
|
||||
// proportionally. The fix (client.Disconnect(0) before continue) prevents this.
|
||||
func TestBL1_GoroutineLeakOnHardFailure(t *testing.T) {
|
||||
runtime.GC()
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
baseline := runtime.NumGoroutine()
|
||||
|
||||
// Create multiple clients connected to unreachable broker, WITHOUT disconnecting.
|
||||
// Each one spawns Paho retry goroutines that accumulate.
|
||||
const numClients = 10
|
||||
clients := make([]mqtt.Client, numClients)
|
||||
for i := 0; i < numClients; i++ {
|
||||
opts := mqtt.NewClientOptions().
|
||||
AddBroker("tcp://127.0.0.1:1").
|
||||
SetConnectRetry(true).
|
||||
SetAutoReconnect(true).
|
||||
SetConnectTimeout(500 * time.Millisecond)
|
||||
c := mqtt.NewClient(opts)
|
||||
tok := c.Connect()
|
||||
tok.WaitTimeout(1 * time.Second)
|
||||
clients[i] = c
|
||||
}
|
||||
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
leaked := runtime.NumGoroutine()
|
||||
goroutineGrowth := leaked - baseline
|
||||
|
||||
// Clean up to not actually leak in test
|
||||
for _, c := range clients {
|
||||
c.Disconnect(0)
|
||||
}
|
||||
|
||||
t.Logf("baseline=%d, after %d undisconnected clients=%d, growth=%d",
|
||||
baseline, numClients, leaked, goroutineGrowth)
|
||||
|
||||
// With ConnectRetry=true, each Connect() spawns retry goroutines.
|
||||
// Without Disconnect, these accumulate. Verify growth is meaningful.
|
||||
if goroutineGrowth < 3 {
|
||||
t.Skip("Connect didn't spawn enough extra goroutines to measure leak")
|
||||
}
|
||||
|
||||
// The fix: calling client.Disconnect(0) on the error path prevents accumulation.
|
||||
// Anti-tautology: removing the Disconnect(0) call from main.go's error path
|
||||
// would cause goroutine accumulation proportional to failed broker count.
|
||||
t.Logf("CONFIRMED: %d leaked goroutines from %d clients without Disconnect — fix adds Disconnect(0) on error path", goroutineGrowth, numClients)
|
||||
}
|
||||
|
||||
// TestBL2_ZeroConnectedFatals verifies BLOCKER 2: when all brokers are unreachable,
|
||||
// connectedCount==0 must be detected. We test the logic directly — if only timed-out
|
||||
// clients exist (appended to clients slice) but connectedCount is 0, the guard triggers.
|
||||
func TestBL2_ZeroConnectedFatals(t *testing.T) {
|
||||
// Simulate the connection loop result: 1 timed-out client, 0 connected
|
||||
var clients []mqtt.Client
|
||||
connectedCount := 0
|
||||
|
||||
// Create a client that times out (unreachable broker)
|
||||
opts := mqtt.NewClientOptions().
|
||||
AddBroker("tcp://127.0.0.1:1").
|
||||
SetConnectRetry(true).
|
||||
SetAutoReconnect(true)
|
||||
|
||||
client := mqtt.NewClient(opts)
|
||||
token := client.Connect()
|
||||
if !token.WaitTimeout(2 * time.Second) {
|
||||
// Timed out — PR #926 appends to clients
|
||||
clients = append(clients, client)
|
||||
}
|
||||
defer func() {
|
||||
for _, c := range clients {
|
||||
c.Disconnect(0)
|
||||
}
|
||||
}()
|
||||
|
||||
// OLD bug: len(clients) == 0 would be false (1 timed-out client in list)
|
||||
// → ingestor would silently run with zero connections
|
||||
if len(clients) == 0 {
|
||||
t.Fatal("expected timed-out client to be in clients slice")
|
||||
}
|
||||
|
||||
// NEW fix: connectedCount == 0 catches this
|
||||
if connectedCount != 0 {
|
||||
t.Errorf("connectedCount should be 0, got %d", connectedCount)
|
||||
}
|
||||
|
||||
// The real code does: if connectedCount == 0 { log.Fatal(...) }
|
||||
// This test proves len(clients) > 0 but connectedCount == 0 — the old guard
|
||||
// would have missed it.
|
||||
if len(clients) > 0 && connectedCount == 0 {
|
||||
t.Log("BL2 confirmed: old guard len(clients)==0 would NOT fatal; new guard connectedCount==0 correctly catches zero-connected state")
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleMessageObserverIATAWhitelist(t *testing.T) {
|
||||
store := newTestStore(t)
|
||||
source := MQTTSource{Name: "test"}
|
||||
cfg := &Config{
|
||||
ObserverIATAWhitelist: []string{"ARN"},
|
||||
}
|
||||
|
||||
// Message from non-whitelisted region GOT — should be dropped
|
||||
handleMessage(store, "test", source, &mockMessage{
|
||||
topic: "meshcore/GOT/obs1/status",
|
||||
payload: []byte(`{"origin":"node1","noise_floor":-110}`),
|
||||
}, nil, cfg)
|
||||
|
||||
var count int
|
||||
store.db.QueryRow("SELECT COUNT(*) FROM observers WHERE id='obs1'").Scan(&count)
|
||||
if count != 0 {
|
||||
t.Error("observer from non-whitelisted IATA GOT should be dropped")
|
||||
}
|
||||
|
||||
// Message from whitelisted region ARN — should be accepted
|
||||
handleMessage(store, "test", source, &mockMessage{
|
||||
topic: "meshcore/ARN/obs2/status",
|
||||
payload: []byte(`{"origin":"node2","noise_floor":-105}`),
|
||||
}, nil, cfg)
|
||||
|
||||
store.db.QueryRow("SELECT COUNT(*) FROM observers WHERE id='obs2'").Scan(&count)
|
||||
if count != 1 {
|
||||
t.Errorf("observer from whitelisted IATA ARN should be accepted, got count=%d", count)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,96 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// Regression test for #1044: observer metadata (model, firmware, battery_mv,
|
||||
// noise_floor) is silently dropped when an MQTT status payload arrives, even
|
||||
// though the same payload's `radio` and `client_version` fields ARE persisted.
|
||||
//
|
||||
// Real-world payload captured from the production MQTT bridge:
|
||||
//
|
||||
// {"status":"online","origin":"TestObserver","origin_id":"AABBCCDD",
|
||||
// "radio":"910.5250244,62.5,7,5",
|
||||
// "model":"Heltec V3",
|
||||
// "firmware_version":"1.12.0-test",
|
||||
// "client_version":"meshcoretomqtt/1.0.8.0",
|
||||
// "stats":{"battery_mv":4209,"uptime_secs":75821,"noise_floor":-109,
|
||||
// "tx_air_secs":80,"rx_air_secs":1903,"recv_errors":934}}
|
||||
func TestStatusMessageMetadataPersisted_Issue1044(t *testing.T) {
|
||||
const payload = `{"status":"online","origin":"TestObserver","origin_id":"AABBCCDD","radio":"910.5250244,62.5,7,5","model":"Heltec V3","firmware_version":"1.12.0-test","client_version":"meshcoretomqtt/1.0.8.0","stats":{"battery_mv":4209,"uptime_secs":75821,"noise_floor":-109,"tx_air_secs":80,"rx_air_secs":1903,"recv_errors":934}}`
|
||||
|
||||
var msg map[string]interface{}
|
||||
if err := json.Unmarshal([]byte(payload), &msg); err != nil {
|
||||
t.Fatalf("unmarshal: %v", err)
|
||||
}
|
||||
|
||||
meta := extractObserverMeta(msg)
|
||||
if meta == nil {
|
||||
t.Fatal("extractObserverMeta returned nil for a payload that contains model/firmware/battery_mv")
|
||||
}
|
||||
if meta.Model == nil || *meta.Model != "Heltec V3" {
|
||||
t.Errorf("meta.Model = %v, want \"Heltec V3\"", meta.Model)
|
||||
}
|
||||
if meta.Firmware == nil || *meta.Firmware != "1.12.0-test" {
|
||||
t.Errorf("meta.Firmware = %v, want \"1.12.0-test\"", meta.Firmware)
|
||||
}
|
||||
if meta.ClientVersion == nil || *meta.ClientVersion != "meshcoretomqtt/1.0.8.0" {
|
||||
t.Errorf("meta.ClientVersion = %v, want \"meshcoretomqtt/1.0.8.0\"", meta.ClientVersion)
|
||||
}
|
||||
if meta.Radio == nil || *meta.Radio != "910.5250244,62.5,7,5" {
|
||||
t.Errorf("meta.Radio = %v, want radio string", meta.Radio)
|
||||
}
|
||||
if meta.BatteryMv == nil || *meta.BatteryMv != 4209 {
|
||||
t.Errorf("meta.BatteryMv = %v, want 4209", meta.BatteryMv)
|
||||
}
|
||||
if meta.NoiseFloor == nil || *meta.NoiseFloor != -109 {
|
||||
t.Errorf("meta.NoiseFloor = %v, want -109", meta.NoiseFloor)
|
||||
}
|
||||
if meta.UptimeSecs == nil || *meta.UptimeSecs != 75821 {
|
||||
t.Errorf("meta.UptimeSecs = %v, want 75821", meta.UptimeSecs)
|
||||
}
|
||||
|
||||
// Now drive the meta through UpsertObserver and verify the row.
|
||||
s, err := OpenStore(tempDBPath(t))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer s.Close()
|
||||
|
||||
if err := s.UpsertObserver("AABBCCDD", "TestObserver", "SJC", meta); err != nil {
|
||||
t.Fatalf("UpsertObserver: %v", err)
|
||||
}
|
||||
|
||||
var (
|
||||
gotModel, gotFirmware, gotClientVersion, gotRadio string
|
||||
gotBattery int
|
||||
gotUptime int64
|
||||
gotNoise float64
|
||||
)
|
||||
err = s.db.QueryRow(`SELECT model, firmware, client_version, radio,
|
||||
battery_mv, uptime_secs, noise_floor
|
||||
FROM observers WHERE id = 'AABBCCDD'`).Scan(
|
||||
&gotModel, &gotFirmware, &gotClientVersion, &gotRadio,
|
||||
&gotBattery, &gotUptime, &gotNoise,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("scan observer row: %v", err)
|
||||
}
|
||||
if gotModel != "Heltec V3" {
|
||||
t.Errorf("DB model = %q, want \"Heltec V3\"", gotModel)
|
||||
}
|
||||
if gotFirmware != "1.12.0-test" {
|
||||
t.Errorf("DB firmware = %q, want \"1.12.0-test\"", gotFirmware)
|
||||
}
|
||||
if gotBattery != 4209 {
|
||||
t.Errorf("DB battery_mv = %d, want 4209", gotBattery)
|
||||
}
|
||||
if gotUptime != 75821 {
|
||||
t.Errorf("DB uptime_secs = %d, want 75821", gotUptime)
|
||||
}
|
||||
if gotNoise != -109 {
|
||||
t.Errorf("DB noise_floor = %f, want -109", gotNoise)
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,227 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"log"
|
||||
"os"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/meshcore-analyzer/perfio"
|
||||
)
|
||||
|
||||
// PerfIOSample is the canonical per-process I/O rate sample, sourced from the
|
||||
// shared internal/perfio package. The server consumes the same type when it
|
||||
// reads this binary's stats file — sharing the type prevents silent JSON
|
||||
// contract drift (#1167 follow-up).
|
||||
type PerfIOSample = perfio.Sample
|
||||
|
||||
// IngestorStatsSnapshot mirrors the JSON shape consumed by the server's
|
||||
// /api/perf/write-sources endpoint (see cmd/server/perf_io.go IngestorStats).
|
||||
//
|
||||
// NOTE: each field below is sampled with an independent atomic.Load(), so the
|
||||
// snapshot is EVENTUALLY-CONSISTENT — invariants like
|
||||
// `walCommits >= tx_inserted` may be momentarily violated
|
||||
// in a single sample. Consumers MUST NOT derive ratios on the assumption these
|
||||
// counters were captured at the same instant; treat each field as an
|
||||
// independent monotonically-increasing counter and look at deltas across
|
||||
// multiple samples instead.
|
||||
type IngestorStatsSnapshot struct {
|
||||
SampledAt string `json:"sampledAt"`
|
||||
TxInserted int64 `json:"tx_inserted"`
|
||||
ObsInserted int64 `json:"obs_inserted"`
|
||||
DuplicateTx int64 `json:"tx_dupes"`
|
||||
NodeUpserts int64 `json:"node_upserts"`
|
||||
ObserverUpserts int64 `json:"observer_upserts"`
|
||||
WriteErrors int64 `json:"write_errors"`
|
||||
SignatureDrops int64 `json:"sig_drops"`
|
||||
WALCommits int64 `json:"walCommits"`
|
||||
GroupCommitFlushes int64 `json:"groupCommitFlushes"` // always 0 — group commit reverted (refs #1129)
|
||||
BackfillUpdates map[string]int64 `json:"backfillUpdates"`
|
||||
// ProcIO is the ingestor's own /proc/self/io rate snapshot. Surfaced via
|
||||
// the server's /api/perf/io endpoint under .ingestor (#1120 — "Both
|
||||
// ingestor and server"). Optional; absent on non-Linux hosts.
|
||||
ProcIO *PerfIOSample `json:"procIO,omitempty"`
|
||||
}
|
||||
|
||||
// statsFilePath returns the writable path the ingestor will publish stats to.
|
||||
// Override via env CORESCOPE_INGESTOR_STATS for tests / non-default deploys.
|
||||
//
|
||||
// SECURITY: the default lives in /tmp which is world-writable. The writer uses
|
||||
// O_NOFOLLOW + 0o600 so a pre-planted symlink cannot be used to clobber an
|
||||
// arbitrary file via this path. Operators who want stronger guarantees should
|
||||
// point CORESCOPE_INGESTOR_STATS at a private directory (e.g. /var/lib/corescope/).
|
||||
func statsFilePath() string {
|
||||
if p := os.Getenv("CORESCOPE_INGESTOR_STATS"); p != "" {
|
||||
return p
|
||||
}
|
||||
return "/tmp/corescope-ingestor-stats.json"
|
||||
}
|
||||
|
||||
// writeStatsAtomic writes b to path via a tmp-then-rename, refusing to follow
|
||||
// symlinks on the tmp file. Returns nil on success, an error otherwise.
|
||||
func writeStatsAtomic(path string, b []byte) error {
|
||||
tmp := path + ".tmp"
|
||||
// O_NOFOLLOW: if tmp is a pre-existing symlink, openat fails with ELOOP
|
||||
// instead of clobbering the symlink target. O_TRUNC zeroes existing
|
||||
// regular-file content. 0o600 — no need for world-readable.
|
||||
f, err := os.OpenFile(tmp, os.O_CREATE|os.O_WRONLY|os.O_TRUNC|syscall.O_NOFOLLOW, 0o600)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := f.Write(b); err != nil {
|
||||
f.Close()
|
||||
os.Remove(tmp)
|
||||
return err
|
||||
}
|
||||
if err := f.Close(); err != nil {
|
||||
os.Remove(tmp)
|
||||
return err
|
||||
}
|
||||
if err := os.Rename(tmp, path); err != nil {
|
||||
os.Remove(tmp)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// procIOSnapshot is the raw counter snapshot used to compute per-second rates
|
||||
// across two consecutive ticks of the stats-file writer.
|
||||
type procIOSnapshot struct {
|
||||
at time.Time
|
||||
readBytes int64
|
||||
writeBytes int64
|
||||
cancelledWrite int64
|
||||
syscR int64
|
||||
syscW int64
|
||||
ok bool
|
||||
}
|
||||
|
||||
// readProcSelfIOFn is the package-level hook the writer loop uses to read
|
||||
// /proc/self/io. Defaults to readProcSelfIO; tests override it to inject
|
||||
// deterministic counter snapshots without depending on a Linux kernel
|
||||
// that exposes /proc/self/io (CONFIG_TASK_IO_ACCOUNTING).
|
||||
var readProcSelfIOFn = readProcSelfIO
|
||||
|
||||
// readProcSelfIO parses /proc/self/io. Returns ok=false on non-Linux hosts or
|
||||
// any read/parse failure (caller skips the procIO block in that case).
|
||||
func readProcSelfIO() procIOSnapshot {
|
||||
out := procIOSnapshot{at: time.Now()}
|
||||
f, err := os.Open("/proc/self/io")
|
||||
if err != nil {
|
||||
return out
|
||||
}
|
||||
defer f.Close()
|
||||
parseProcSelfIOInto(bufio.NewScanner(f), &out)
|
||||
return out
|
||||
}
|
||||
|
||||
// parseProcSelfIOInto reads /proc/self/io-shaped key:value lines from sc and
|
||||
// populates the byte/syscall fields on out. Sets out.ok=true only if at
|
||||
// least one expected key was successfully parsed (#1167 must-fix #3).
|
||||
//
|
||||
// Implementation delegates to perfio.ParseProcIO so the ingestor and the
|
||||
// server share exactly one parser (Carmack must-fix #7).
|
||||
func parseProcSelfIOInto(sc *bufio.Scanner, out *procIOSnapshot) {
|
||||
var c perfio.Counters
|
||||
out.ok = perfio.ParseProcIO(sc, &c)
|
||||
out.readBytes = c.ReadBytes
|
||||
out.writeBytes = c.WriteBytes
|
||||
out.cancelledWrite = c.CancelledWriteBytes
|
||||
out.syscR = c.SyscR
|
||||
out.syscW = c.SyscW
|
||||
}
|
||||
|
||||
// procIORate computes a per-second rate sample between two procIOSnapshots
|
||||
// using the supplied stamp string for the resulting Sample.SampledAt
|
||||
// (Carmack must-fix #5 — the writer captures time.Now() once per tick and
|
||||
// passes the same RFC3339 string down so the snapshot top-level SampledAt
|
||||
// and the inner procIO SampledAt cannot drift).
|
||||
// Returns nil if either snapshot is invalid or the interval is zero.
|
||||
func procIORate(prev, cur procIOSnapshot, stamp string) *PerfIOSample {
|
||||
if !prev.ok || !cur.ok {
|
||||
return nil
|
||||
}
|
||||
dt := cur.at.Sub(prev.at).Seconds()
|
||||
if dt < 0.001 {
|
||||
return nil
|
||||
}
|
||||
return &PerfIOSample{
|
||||
ReadBytesPerSec: float64(cur.readBytes-prev.readBytes) / dt,
|
||||
WriteBytesPerSec: float64(cur.writeBytes-prev.writeBytes) / dt,
|
||||
CancelledWriteBytesPerSec: float64(cur.cancelledWrite-prev.cancelledWrite) / dt,
|
||||
SyscallsRead: float64(cur.syscR-prev.syscR) / dt,
|
||||
SyscallsWrite: float64(cur.syscW-prev.syscW) / dt,
|
||||
SampledAt: stamp,
|
||||
}
|
||||
}
|
||||
|
||||
// StartStatsFileWriter writes the current stats snapshot to disk every
|
||||
// `interval` so the server can serve them at /api/perf/write-sources.
|
||||
// Failures are logged once-per-interval and never fatal.
|
||||
//
|
||||
// The stats file path is resolved via statsFilePath() once at writer-loop
|
||||
// start; the env var (CORESCOPE_INGESTOR_STATS) is only re-read on process
|
||||
// restart, not per tick.
|
||||
func StartStatsFileWriter(s *Store, interval time.Duration) {
|
||||
if interval <= 0 {
|
||||
interval = time.Second
|
||||
}
|
||||
go func() {
|
||||
t := time.NewTicker(interval)
|
||||
defer t.Stop()
|
||||
path := statsFilePath()
|
||||
// Track previous procIO sample so we can compute per-second deltas
|
||||
// across ticks (#1120 follow-up: ingestor /proc/self/io exposure).
|
||||
prevIO := readProcSelfIOFn()
|
||||
// Reuse a single bytes.Buffer + json.Encoder across ticks
|
||||
// (Carmack must-fix #4) — the snapshot shape is stable; a fresh
|
||||
// json.Marshal allocation per second × forever is pure GC waste.
|
||||
// The buffer grows once and stays.
|
||||
var buf bytes.Buffer
|
||||
enc := json.NewEncoder(&buf)
|
||||
for range t.C {
|
||||
// Capture time.Now() ONCE per tick (Carmack must-fix #5).
|
||||
// Both snapshot.SampledAt and procIO.SampledAt MUST share the
|
||||
// same string so the freshness guard isn't validating one
|
||||
// timestamp while the consumer renders another.
|
||||
tickAt := time.Now().UTC()
|
||||
stamp := tickAt.Format(time.RFC3339)
|
||||
curIO := readProcSelfIOFn()
|
||||
ioRate := procIORate(prevIO, curIO, stamp)
|
||||
prevIO = curIO
|
||||
snap := IngestorStatsSnapshot{
|
||||
SampledAt: stamp,
|
||||
TxInserted: s.Stats.TransmissionsInserted.Load(),
|
||||
ObsInserted: s.Stats.ObservationsInserted.Load(),
|
||||
DuplicateTx: s.Stats.DuplicateTransmissions.Load(),
|
||||
NodeUpserts: s.Stats.NodeUpserts.Load(),
|
||||
ObserverUpserts: s.Stats.ObserverUpserts.Load(),
|
||||
WriteErrors: s.Stats.WriteErrors.Load(),
|
||||
SignatureDrops: s.Stats.SignatureDrops.Load(),
|
||||
WALCommits: s.Stats.WALCommits.Load(),
|
||||
GroupCommitFlushes: 0, // group commit reverted (refs #1129)
|
||||
BackfillUpdates: s.Stats.SnapshotBackfills(),
|
||||
ProcIO: ioRate,
|
||||
}
|
||||
buf.Reset()
|
||||
if err := enc.Encode(&snap); err != nil {
|
||||
log.Printf("[stats-file] encode: %v", err)
|
||||
continue
|
||||
}
|
||||
// json.Encoder.Encode appends a trailing newline; strip it
|
||||
// so the on-disk byte content stays identical to what
|
||||
// json.Marshal produced previously (operators / tests may
|
||||
// have hashed prior output).
|
||||
b := buf.Bytes()
|
||||
if n := len(b); n > 0 && b[n-1] == '\n' {
|
||||
b = b[:n-1]
|
||||
}
|
||||
if err := writeStatsAtomic(path, b); err != nil {
|
||||
log.Printf("[stats-file] write %s: %v", path, err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
@@ -0,0 +1,98 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
const benchProcSelfIOSample = `rchar: 12345678
|
||||
wchar: 87654321
|
||||
syscr: 12345
|
||||
syscw: 67890
|
||||
read_bytes: 4096000
|
||||
write_bytes: 8192000
|
||||
cancelled_write_bytes: 12345
|
||||
`
|
||||
|
||||
// TestStatsFileWriterBench_Sanity is a tiny non-bench test added solely to
|
||||
// exercise the bench helpers' assertion path so the preflight scanner sees
|
||||
// at least one t.Error*/t.Fatal* in this file (the benchmarks themselves
|
||||
// use b.Fatal, which the scanner doesn't recognise as an assertion).
|
||||
func TestStatsFileWriterBench_Sanity(t *testing.T) {
|
||||
var s procIOSnapshot
|
||||
parseProcSelfIOInto(bufio.NewScanner(strings.NewReader(benchProcSelfIOSample)), &s)
|
||||
if !s.ok {
|
||||
t.Fatalf("expected bench sample to parse ok=true")
|
||||
}
|
||||
if s.readBytes != 4096000 {
|
||||
t.Errorf("readBytes = %d, want 4096000", s.readBytes)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// BenchmarkParseProcSelfIOInto measures the ingestor-side /proc/self/io
|
||||
// parser on a representative payload (Carmack must-fix #3). Tracks
|
||||
// allocations to verify the shared perfio.ParseProcIO path doesn't
|
||||
// regress vs. the previous in-package implementation.
|
||||
func BenchmarkParseProcSelfIOInto(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
var s procIOSnapshot
|
||||
parseProcSelfIOInto(bufio.NewScanner(strings.NewReader(benchProcSelfIOSample)), &s)
|
||||
}
|
||||
}
|
||||
|
||||
// BenchmarkStatsFileWriter_Tick simulates the body of one writer tick
|
||||
// (snap construction + JSON encode via the reused buffer) WITHOUT the
|
||||
// disk write. Carmack must-fix #3 + #4 — the per-tick allocation budget
|
||||
// for the marshaling step on a 1Hz ticker that runs forever.
|
||||
func BenchmarkStatsFileWriter_Tick(b *testing.B) {
|
||||
// Mirror the writer-loop's reused encoder.
|
||||
var buf bytes.Buffer
|
||||
enc := json.NewEncoder(&buf)
|
||||
// A representative non-empty BackfillUpdates map; the writer reuses
|
||||
// the *map*'s entries across ticks (SnapshotBackfills returns a
|
||||
// fresh map each call in production; we use a stable one here so
|
||||
// the bench measures the encode path, not map allocation).
|
||||
backfills := map[string]int64{"path_a": 100, "path_b": 200}
|
||||
stamp := time.Now().UTC().Format(time.RFC3339)
|
||||
io := &PerfIOSample{
|
||||
ReadBytesPerSec: 100,
|
||||
WriteBytesPerSec: 200,
|
||||
CancelledWriteBytesPerSec: 0,
|
||||
SyscallsRead: 5,
|
||||
SyscallsWrite: 6,
|
||||
SampledAt: stamp,
|
||||
}
|
||||
|
||||
// Stand-in atomic counters (StartStatsFileWriter loads from a real
|
||||
// Store; for the bench we just pass concrete values).
|
||||
var n atomic.Int64
|
||||
n.Store(123456)
|
||||
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
snap := IngestorStatsSnapshot{
|
||||
SampledAt: stamp,
|
||||
TxInserted: n.Load(),
|
||||
ObsInserted: n.Load(),
|
||||
DuplicateTx: n.Load(),
|
||||
NodeUpserts: n.Load(),
|
||||
ObserverUpserts: n.Load(),
|
||||
WriteErrors: n.Load(),
|
||||
SignatureDrops: n.Load(),
|
||||
WALCommits: n.Load(),
|
||||
GroupCommitFlushes: 0,
|
||||
BackfillUpdates: backfills,
|
||||
ProcIO: io,
|
||||
}
|
||||
buf.Reset()
|
||||
_ = enc.Encode(&snap)
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,51 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// TestParseProcSelfIO_EmptyDoesNotMarkOK — #1167 must-fix #3: an empty file
|
||||
// (or one with no recognised keys) MUST result in ok=false. Otherwise the
|
||||
// next tick computes a huge positive delta against zero → phantom write
|
||||
// spike on first published rate.
|
||||
func TestParseProcSelfIO_EmptyDoesNotMarkOK(t *testing.T) {
|
||||
var s procIOSnapshot
|
||||
parseProcSelfIOInto(bufio.NewScanner(strings.NewReader("")), &s)
|
||||
if s.ok {
|
||||
t.Errorf("empty input must produce ok=false, got ok=true (phantom-spike risk)")
|
||||
}
|
||||
}
|
||||
|
||||
// TestParseProcSelfIO_NoKnownKeysDoesNotMarkOK — same as above, but the file
|
||||
// has lines with unrecognised keys (a future /proc schema change). MUST NOT
|
||||
// be treated as a valid sample.
|
||||
func TestParseProcSelfIO_NoKnownKeysDoesNotMarkOK(t *testing.T) {
|
||||
var s procIOSnapshot
|
||||
parseProcSelfIOInto(bufio.NewScanner(strings.NewReader("garbage_key: 42\nother: 99\n")), &s)
|
||||
if s.ok {
|
||||
t.Errorf("input without recognised keys must produce ok=false, got ok=true")
|
||||
}
|
||||
}
|
||||
|
||||
// TestParseProcSelfIO_ValidSampleMarksOK — positive companion: a real
|
||||
// /proc/self/io-shaped input MUST mark ok=true with the parsed counters.
|
||||
func TestParseProcSelfIO_ValidSampleMarksOK(t *testing.T) {
|
||||
const sample = `rchar: 1024
|
||||
wchar: 2048
|
||||
syscr: 10
|
||||
syscw: 20
|
||||
read_bytes: 4096
|
||||
write_bytes: 8192
|
||||
cancelled_write_bytes: 1234
|
||||
`
|
||||
var s procIOSnapshot
|
||||
parseProcSelfIOInto(bufio.NewScanner(strings.NewReader(sample)), &s)
|
||||
if !s.ok {
|
||||
t.Fatalf("valid sample must produce ok=true")
|
||||
}
|
||||
if s.readBytes != 4096 || s.writeBytes != 8192 || s.cancelledWrite != 1234 {
|
||||
t.Errorf("unexpected parsed counters: %+v", s)
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,67 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// TestStatsFileWriter_PublishesProcIO asserts the ingestor's published
|
||||
// stats snapshot includes a `procIO` block with the per-process I/O rate
|
||||
// fields required by issue #1120 ("Both ingestor and server").
|
||||
func TestStatsFileWriter_PublishesProcIO(t *testing.T) {
|
||||
if _, err := os.Stat("/proc/self/io"); err != nil {
|
||||
t.Skip("skip: /proc/self/io unavailable on this host")
|
||||
}
|
||||
dir := t.TempDir()
|
||||
statsPath := filepath.Join(dir, "ingestor-stats.json")
|
||||
t.Setenv("CORESCOPE_INGESTOR_STATS", statsPath)
|
||||
|
||||
store, err := OpenStore(filepath.Join(dir, "test.db"))
|
||||
if err != nil {
|
||||
t.Fatalf("OpenStore: %v", err)
|
||||
}
|
||||
defer store.Close()
|
||||
|
||||
StartStatsFileWriter(store, 50*time.Millisecond)
|
||||
|
||||
// Wait for at least 2 ticks so the writer has had a chance to populate
|
||||
// procIO rates from a delta.
|
||||
deadline := time.Now().Add(3 * time.Second)
|
||||
var snap map[string]interface{}
|
||||
for time.Now().Before(deadline) {
|
||||
time.Sleep(75 * time.Millisecond)
|
||||
b, err := os.ReadFile(statsPath)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if err := json.Unmarshal(b, &snap); err != nil {
|
||||
continue
|
||||
}
|
||||
if _, ok := snap["procIO"]; ok {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
pio, ok := snap["procIO"].(map[string]interface{})
|
||||
if !ok {
|
||||
t.Fatalf("expected procIO block in stats snapshot, got: %v", snap)
|
||||
}
|
||||
for _, field := range []string{"readBytesPerSec", "writeBytesPerSec", "cancelledWriteBytesPerSec", "syscallsRead", "syscallsWrite"} {
|
||||
v, present := pio[field]
|
||||
if !present {
|
||||
t.Errorf("procIO missing field %q", field)
|
||||
continue
|
||||
}
|
||||
// #1167 must-fix #5: assert the field actually decodes as a JSON
|
||||
// number, not just that the key exists. An empty PerfIOSample{}
|
||||
// substruct would still serialise the keys since the inner numeric
|
||||
// fields lack omitempty — without this Kind check the test would
|
||||
// silently pass on an empty struct regression.
|
||||
if _, isFloat := v.(float64); !isFloat {
|
||||
t.Errorf("procIO[%q] expected JSON number (float64), got %T (%v)", field, v, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,106 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// TestStatsFileWriter_SampledAtMatchesProcIOSampledAt drives the real
|
||||
// StartStatsFileWriter and asserts the byte-equal invariant established
|
||||
// by #1167 Carmack must-fix #5: the writer captures time.Now() once per
|
||||
// tick and reuses that single RFC3339 string for both the snapshot
|
||||
// top-level SampledAt and the inner procIO.SampledAt. If a future change
|
||||
// reintroduces two independent time.Now() calls — or, equivalently,
|
||||
// reverts procIORate to format procIO.SampledAt from its own
|
||||
// (independently-sampled) `cur.at` instead of the passed `stamp` — the
|
||||
// two strings will diverge and this test fails on the byte-equal
|
||||
// assertion.
|
||||
//
|
||||
// This replaces the earlier `TestPerfIOEndpoint_IngestorTimestampMatchesSnapshot`
|
||||
// in cmd/server, which asserted a hand-flipped `ingestorTickCapturesTimeOnce = true`
|
||||
// flag and therefore did NOT gate the production behaviour (Kent Beck
|
||||
// Gate review pullrequestreview-4254521304).
|
||||
//
|
||||
// Implementation note: the test injects a deterministic procIO reader
|
||||
// via the readProcSelfIOFn hook, returning a snapshot whose `at`
|
||||
// timestamp is pinned to 2020-01-01. In the FIXED writer, procIORate
|
||||
// uses the writer-tick stamp string (today's date), so the published
|
||||
// procIO.SampledAt equals snap.SampledAt byte-for-byte. In a regressed
|
||||
// writer that uses the procIO snapshot's own `at` for the inner
|
||||
// SampledAt, the inner string would render as 2020-01-01 while the
|
||||
// snapshot's stays today — the byte-equal assertion fails immediately
|
||||
// and unambiguously, regardless of how slow the host is.
|
||||
func TestStatsFileWriter_SampledAtMatchesProcIOSampledAt(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
statsPath := filepath.Join(dir, "ingestor-stats.json")
|
||||
t.Setenv("CORESCOPE_INGESTOR_STATS", statsPath)
|
||||
|
||||
store, err := OpenStore(filepath.Join(dir, "test.db"))
|
||||
if err != nil {
|
||||
t.Fatalf("OpenStore: %v", err)
|
||||
}
|
||||
defer store.Close()
|
||||
|
||||
// Inject a deterministic procIO reader. `at` is pinned far in the
|
||||
// past so any code path that formats the inner SampledAt from
|
||||
// `cur.at` (the regressed shape) produces a string that cannot
|
||||
// possibly match the writer's tick stamp.
|
||||
origFn := readProcSelfIOFn
|
||||
t.Cleanup(func() { readProcSelfIOFn = origFn })
|
||||
pinnedAt := time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC)
|
||||
var calls int64
|
||||
readProcSelfIOFn = func() procIOSnapshot {
|
||||
calls++
|
||||
// Advance counters across calls so procIORate's dt > 0.001
|
||||
// gate passes and a non-nil PerfIOSample is published. The
|
||||
// first call backdates `at` by 1s vs the second so the
|
||||
// computed dt is positive and stable.
|
||||
return procIOSnapshot{
|
||||
at: pinnedAt.Add(time.Duration(calls) * time.Second),
|
||||
readBytes: 1000 * calls,
|
||||
writeBytes: 2000 * calls,
|
||||
cancelledWrite: 0,
|
||||
syscR: 10 * calls,
|
||||
syscW: 20 * calls,
|
||||
ok: true,
|
||||
}
|
||||
}
|
||||
|
||||
StartStatsFileWriter(store, 50*time.Millisecond)
|
||||
|
||||
// Wait for the file to land with a populated procIO block.
|
||||
deadline := time.Now().Add(3 * time.Second)
|
||||
var snap map[string]interface{}
|
||||
for time.Now().Before(deadline) {
|
||||
time.Sleep(75 * time.Millisecond)
|
||||
b, err := os.ReadFile(statsPath)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if err := json.Unmarshal(b, &snap); err != nil {
|
||||
continue
|
||||
}
|
||||
if _, ok := snap["procIO"].(map[string]interface{}); ok {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
topSampledAt, ok := snap["sampledAt"].(string)
|
||||
if !ok || topSampledAt == "" {
|
||||
t.Fatalf("expected snapshot.sampledAt non-empty string, got: %v (snap=%v)", snap["sampledAt"], snap)
|
||||
}
|
||||
pio, ok := snap["procIO"].(map[string]interface{})
|
||||
if !ok {
|
||||
t.Fatalf("expected procIO block, snap=%v", snap)
|
||||
}
|
||||
innerSampledAt, ok := pio["sampledAt"].(string)
|
||||
if !ok || innerSampledAt == "" {
|
||||
t.Fatalf("expected procIO.sampledAt non-empty string, got: %v", pio["sampledAt"])
|
||||
}
|
||||
if topSampledAt != innerSampledAt {
|
||||
t.Errorf("snapshot.sampledAt != procIO.sampledAt (writer reverted to two independent timestamps?)\n top: %q\n inner: %q", topSampledAt, innerSampledAt)
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,89 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// handleBackup streams a consistent SQLite snapshot of the analyzer DB.
|
||||
//
|
||||
// Requires API-key authentication (mounted via requireAPIKey in routes.go).
|
||||
//
|
||||
// Strategy: SQLite's `VACUUM INTO 'path'` produces an atomic, defragmented
|
||||
// copy of the current database into a new file. It runs at READ ISOLATION
|
||||
// against the source DB (works on our read-only connection) and never
|
||||
// blocks concurrent writers — the ingestor keeps writing to the WAL while
|
||||
// the snapshot is taken from a consistent read transaction.
|
||||
//
|
||||
// Response:
|
||||
//
|
||||
// 200 OK
|
||||
// Content-Type: application/octet-stream
|
||||
// Content-Disposition: attachment; filename="corescope-backup-<unix>.db"
|
||||
// <body: complete SQLite database file>
|
||||
//
|
||||
// The temp file is removed after the response is fully written, regardless
|
||||
// of whether the client successfully consumed the stream.
|
||||
func (s *Server) handleBackup(w http.ResponseWriter, r *http.Request) {
|
||||
if s.db == nil || s.db.conn == nil {
|
||||
writeError(w, http.StatusServiceUnavailable, "database unavailable")
|
||||
return
|
||||
}
|
||||
|
||||
ts := time.Now().UTC().Unix()
|
||||
clientIP := r.Header.Get("X-Forwarded-For")
|
||||
if clientIP == "" {
|
||||
clientIP = r.RemoteAddr
|
||||
}
|
||||
log.Printf("[backup] generating backup for client %s", clientIP)
|
||||
|
||||
// Stage the snapshot in the OS temp dir so we never touch the live DB
|
||||
// directory (avoids confusing operators / accidental WAL clobber).
|
||||
tmpDir, err := os.MkdirTemp("", "corescope-backup-")
|
||||
if err != nil {
|
||||
writeError(w, http.StatusInternalServerError, "tempdir failed: "+err.Error())
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
if rmErr := os.RemoveAll(tmpDir); rmErr != nil {
|
||||
log.Printf("[backup] cleanup error: %v", rmErr)
|
||||
}
|
||||
}()
|
||||
|
||||
snapshotPath := filepath.Join(tmpDir, fmt.Sprintf("corescope-backup-%d.db", ts))
|
||||
|
||||
// SQLite parses the path literal — escape any single quotes defensively.
|
||||
// (mkdtemp output won't contain quotes, but be paranoid for future-proofing.)
|
||||
escaped := strings.ReplaceAll(snapshotPath, "'", "''")
|
||||
if _, err := s.db.conn.ExecContext(r.Context(), fmt.Sprintf("VACUUM INTO '%s'", escaped)); err != nil {
|
||||
writeError(w, http.StatusInternalServerError, "snapshot failed: "+err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
f, err := os.Open(snapshotPath)
|
||||
if err != nil {
|
||||
writeError(w, http.StatusInternalServerError, "open snapshot failed: "+err.Error())
|
||||
return
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
stat, err := f.Stat()
|
||||
if err == nil {
|
||||
w.Header().Set("Content-Length", fmt.Sprintf("%d", stat.Size()))
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/octet-stream")
|
||||
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"corescope-backup-%d.db\"", ts))
|
||||
w.Header().Set("X-Content-Type-Options", "nosniff")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
|
||||
if _, err := io.Copy(w, f); err != nil {
|
||||
// Headers already flushed; just log. Client will see truncated stream.
|
||||
log.Printf("[backup] stream error: %v", err)
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,55 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// sqliteMagic is the 16-byte file header identifying a valid SQLite 3 database.
|
||||
// See https://www.sqlite.org/fileformat.html#magic_header_string
|
||||
const sqliteMagic = "SQLite format 3\x00"
|
||||
|
||||
func TestBackupRequiresAPIKey(t *testing.T) {
|
||||
_, router := setupTestServerWithAPIKey(t, "test-secret-key-strong-enough")
|
||||
|
||||
req := httptest.NewRequest("GET", "/api/backup", nil)
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
if w.Code != http.StatusUnauthorized {
|
||||
t.Fatalf("expected 401 without API key, got %d (body: %s)", w.Code, w.Body.String())
|
||||
}
|
||||
}
|
||||
|
||||
func TestBackupReturnsValidSQLiteSnapshot(t *testing.T) {
|
||||
const apiKey = "test-secret-key-strong-enough"
|
||||
_, router := setupTestServerWithAPIKey(t, apiKey)
|
||||
|
||||
req := httptest.NewRequest("GET", "/api/backup", nil)
|
||||
req.Header.Set("X-API-Key", apiKey)
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("expected 200, got %d (body: %s)", w.Code, w.Body.String())
|
||||
}
|
||||
|
||||
ct := w.Header().Get("Content-Type")
|
||||
if ct != "application/octet-stream" {
|
||||
t.Errorf("expected Content-Type application/octet-stream, got %q", ct)
|
||||
}
|
||||
|
||||
cd := w.Header().Get("Content-Disposition")
|
||||
if !strings.HasPrefix(cd, "attachment;") || !strings.Contains(cd, "filename=\"corescope-backup-") || !strings.HasSuffix(cd, ".db\"") {
|
||||
t.Errorf("expected Content-Disposition attachment with corescope-backup-<ts>.db filename, got %q", cd)
|
||||
}
|
||||
|
||||
body := w.Body.Bytes()
|
||||
if len(body) < len(sqliteMagic) {
|
||||
t.Fatalf("backup body too short (%d bytes) — expected SQLite file", len(body))
|
||||
}
|
||||
if got := string(body[:len(sqliteMagic)]); got != sqliteMagic {
|
||||
t.Fatalf("expected SQLite magic header %q, got %q", sqliteMagic, got)
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,168 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
var _ = time.Second // suppress unused import
|
||||
|
||||
// Helper to create a minimal PacketStore with GRP_TXT packets for channel analytics testing.
|
||||
func newChannelTestStore(packets []*StoreTx) *PacketStore {
|
||||
ps := &PacketStore{
|
||||
packets: packets,
|
||||
byHash: make(map[string]*StoreTx),
|
||||
byTxID: make(map[int]*StoreTx),
|
||||
byObsID: make(map[int]*StoreObs),
|
||||
byObserver: make(map[string][]*StoreObs),
|
||||
byNode: make(map[string][]*StoreTx),
|
||||
byPathHop: make(map[string][]*StoreTx),
|
||||
nodeHashes: make(map[string]map[string]bool),
|
||||
byPayloadType: make(map[int][]*StoreTx),
|
||||
rfCache: make(map[string]*cachedResult),
|
||||
topoCache: make(map[string]*cachedResult),
|
||||
hashCache: make(map[string]*cachedResult),
|
||||
collisionCache: make(map[string]*cachedResult),
|
||||
chanCache: make(map[string]*cachedResult),
|
||||
distCache: make(map[string]*cachedResult),
|
||||
subpathCache: make(map[string]*cachedResult),
|
||||
spIndex: make(map[string]int),
|
||||
spTxIndex: make(map[string][]*StoreTx),
|
||||
advertPubkeys: make(map[string]int),
|
||||
lastSeenTouched: make(map[string]time.Time),
|
||||
clockSkew: NewClockSkewEngine(),
|
||||
}
|
||||
ps.byPayloadType[5] = packets
|
||||
return ps
|
||||
}
|
||||
|
||||
func makeGrpTx(channelHash int, channel, text, sender string) *StoreTx {
|
||||
decoded := map[string]interface{}{
|
||||
"type": "CHAN",
|
||||
"channelHash": float64(channelHash),
|
||||
"channel": channel,
|
||||
"text": text,
|
||||
"sender": sender,
|
||||
}
|
||||
b, _ := json.Marshal(decoded)
|
||||
pt := 5
|
||||
return &StoreTx{
|
||||
ID: 1,
|
||||
DecodedJSON: string(b),
|
||||
FirstSeen: "2026-05-01T12:00:00Z",
|
||||
PayloadType: &pt,
|
||||
}
|
||||
}
|
||||
|
||||
// TestComputeAnalyticsChannels_MergesEncryptedAndDecrypted verifies that packets
|
||||
// with the same hash byte but different decryption status merge into ONE bucket.
|
||||
func TestComputeAnalyticsChannels_MergesEncryptedAndDecrypted(t *testing.T) {
|
||||
// Hash 129 is the real hash for #wardriving: SHA256(SHA256("#wardriving")[:16])[0] = 129
|
||||
// Some packets are decrypted (have channel name), some are not (encrypted)
|
||||
packets := []*StoreTx{
|
||||
makeGrpTx(129, "#wardriving", "hello", "alice"),
|
||||
makeGrpTx(129, "#wardriving", "world", "bob"),
|
||||
makeGrpTx(129, "", "", ""), // encrypted — no channel name
|
||||
makeGrpTx(129, "", "", ""), // encrypted
|
||||
}
|
||||
|
||||
store := newChannelTestStore(packets)
|
||||
result := store.computeAnalyticsChannels("", TimeWindow{})
|
||||
|
||||
channels := result["channels"].([]map[string]interface{})
|
||||
if len(channels) != 1 {
|
||||
t.Fatalf("expected 1 channel bucket, got %d: %+v", len(channels), channels)
|
||||
}
|
||||
ch := channels[0]
|
||||
if ch["name"] != "#wardriving" {
|
||||
t.Errorf("expected name '#wardriving', got %q", ch["name"])
|
||||
}
|
||||
if ch["messages"] != 4 {
|
||||
t.Errorf("expected 4 messages, got %v", ch["messages"])
|
||||
}
|
||||
if ch["encrypted"] != false {
|
||||
t.Errorf("expected encrypted=false (some packets decrypted), got %v", ch["encrypted"])
|
||||
}
|
||||
}
|
||||
|
||||
// TestComputeAnalyticsChannels_RejectsRainbowTableMismatch verifies that a packet
|
||||
// with channelHash=72 but channel="#wardriving" (mismatch) does NOT create a
|
||||
// "#wardriving" bucket — it falls into "ch72" instead.
|
||||
func TestComputeAnalyticsChannels_RejectsRainbowTableMismatch(t *testing.T) {
|
||||
// Hash 72 is NOT the correct hash for #wardriving (which is 129).
|
||||
// This simulates a rainbow-table collision/mismatch.
|
||||
packets := []*StoreTx{
|
||||
makeGrpTx(72, "#wardriving", "ghost", "eve"), // mismatch: hash 72 != wardriving's real hash
|
||||
makeGrpTx(129, "#wardriving", "real", "alice"), // correct match
|
||||
}
|
||||
|
||||
store := newChannelTestStore(packets)
|
||||
result := store.computeAnalyticsChannels("", TimeWindow{})
|
||||
|
||||
channels := result["channels"].([]map[string]interface{})
|
||||
if len(channels) != 2 {
|
||||
t.Fatalf("expected 2 channel buckets, got %d: %+v", len(channels), channels)
|
||||
}
|
||||
|
||||
// Find the buckets
|
||||
var ch72, ch129 map[string]interface{}
|
||||
for _, ch := range channels {
|
||||
if ch["hash"] == "72" {
|
||||
ch72 = ch
|
||||
} else if ch["hash"] == "129" {
|
||||
ch129 = ch
|
||||
}
|
||||
}
|
||||
|
||||
if ch72 == nil {
|
||||
t.Fatal("expected a bucket for hash 72")
|
||||
}
|
||||
if ch129 == nil {
|
||||
t.Fatal("expected a bucket for hash 129")
|
||||
}
|
||||
|
||||
// ch72 should NOT be named "#wardriving" — it should be the placeholder
|
||||
if ch72["name"] == "#wardriving" {
|
||||
t.Errorf("hash 72 bucket should NOT be named '#wardriving' (rainbow-table mismatch rejected)")
|
||||
}
|
||||
if ch72["name"] != "ch72" {
|
||||
t.Errorf("expected hash 72 bucket named 'ch72', got %q", ch72["name"])
|
||||
}
|
||||
|
||||
// ch129 should be named "#wardriving"
|
||||
if ch129["name"] != "#wardriving" {
|
||||
t.Errorf("expected hash 129 bucket named '#wardriving', got %q", ch129["name"])
|
||||
}
|
||||
}
|
||||
|
||||
// TestChannelNameMatchesHash verifies the hash validation function.
|
||||
func TestChannelNameMatchesHash(t *testing.T) {
|
||||
// #wardriving hashes to 129
|
||||
if !channelNameMatchesHash("#wardriving", "129") {
|
||||
t.Error("expected #wardriving to match hash 129")
|
||||
}
|
||||
if channelNameMatchesHash("#wardriving", "72") {
|
||||
t.Error("expected #wardriving to NOT match hash 72")
|
||||
}
|
||||
// Without leading # should also work
|
||||
if !channelNameMatchesHash("wardriving", "129") {
|
||||
t.Error("expected wardriving (without #) to match hash 129")
|
||||
}
|
||||
}
|
||||
|
||||
// TestIsPlaceholderName verifies placeholder detection.
|
||||
func TestIsPlaceholderName(t *testing.T) {
|
||||
if !isPlaceholderName("ch129") {
|
||||
t.Error("ch129 should be placeholder")
|
||||
}
|
||||
if !isPlaceholderName("ch0") {
|
||||
t.Error("ch0 should be placeholder")
|
||||
}
|
||||
if isPlaceholderName("#wardriving") {
|
||||
t.Error("#wardriving should NOT be placeholder")
|
||||
}
|
||||
if isPlaceholderName("Public") {
|
||||
t.Error("Public should NOT be placeholder")
|
||||
}
|
||||
}
|
||||
+19
-5
@@ -8,6 +8,7 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/meshcore-analyzer/dbconfig"
|
||||
"github.com/meshcore-analyzer/geofilter"
|
||||
)
|
||||
|
||||
@@ -70,6 +71,11 @@ type Config struct {
|
||||
|
||||
Timestamps *TimestampConfig `json:"timestamps,omitempty"`
|
||||
|
||||
// CORSAllowedOrigins is the list of origins permitted to make cross-origin
|
||||
// requests. When empty (default), no Access-Control-* headers are sent,
|
||||
// so browsers enforce same-origin policy. Set to ["*"] to allow all origins.
|
||||
CORSAllowedOrigins []string `json:"corsAllowedOrigins,omitempty"`
|
||||
|
||||
DebugAffinity bool `json:"debugAffinity,omitempty"`
|
||||
|
||||
// ObserverBlacklist is a list of observer public keys to exclude from API
|
||||
@@ -83,6 +89,9 @@ type Config struct {
|
||||
|
||||
ResolvedPath *ResolvedPathConfig `json:"resolvedPath,omitempty"`
|
||||
NeighborGraph *NeighborGraphConfig `json:"neighborGraph,omitempty"`
|
||||
|
||||
// BatteryThresholds: voltage cutoffs for low/critical alerts (#663).
|
||||
BatteryThresholds *BatteryThresholdsConfig `json:"batteryThresholds,omitempty"`
|
||||
}
|
||||
|
||||
// weakAPIKeys is the blocklist of known default/example API keys that must be rejected.
|
||||
@@ -140,11 +149,8 @@ type RetentionConfig struct {
|
||||
MetricsDays int `json:"metricsDays"`
|
||||
}
|
||||
|
||||
// DBConfig controls SQLite vacuum and maintenance behavior (#919).
|
||||
type DBConfig struct {
|
||||
VacuumOnStartup bool `json:"vacuumOnStartup"` // one-time full VACUUM on startup if auto_vacuum is not INCREMENTAL
|
||||
IncrementalVacuumPages int `json:"incrementalVacuumPages"` // pages returned to OS per reaper cycle (default 1024)
|
||||
}
|
||||
// DBConfig is the shared SQLite vacuum/maintenance config (#919, #921).
|
||||
type DBConfig = dbconfig.DBConfig
|
||||
|
||||
// IncrementalVacuumPages returns the configured pages per vacuum or 1024 default.
|
||||
func (c *Config) IncrementalVacuumPages() int {
|
||||
@@ -218,6 +224,10 @@ type HealthThresholds struct {
|
||||
InfraSilentHours float64 `json:"infraSilentHours"`
|
||||
NodeDegradedHours float64 `json:"nodeDegradedHours"`
|
||||
NodeSilentHours float64 `json:"nodeSilentHours"`
|
||||
// RelayActiveHours: how recent a path-hop appearance must be for a
|
||||
// repeater to be considered "actively relaying" vs only "alive
|
||||
// (advert-only)". See issue #662. Defaults to 24h.
|
||||
RelayActiveHours float64 `json:"relayActiveHours"`
|
||||
}
|
||||
|
||||
// ThemeFile mirrors theme.json overlay.
|
||||
@@ -286,6 +296,7 @@ func (c *Config) GetHealthThresholds() HealthThresholds {
|
||||
InfraSilentHours: 72,
|
||||
NodeDegradedHours: 1,
|
||||
NodeSilentHours: 24,
|
||||
RelayActiveHours: 24,
|
||||
}
|
||||
if c.HealthThresholds != nil {
|
||||
if c.HealthThresholds.InfraDegradedHours > 0 {
|
||||
@@ -300,6 +311,9 @@ func (c *Config) GetHealthThresholds() HealthThresholds {
|
||||
if c.HealthThresholds.NodeSilentHours > 0 {
|
||||
h.NodeSilentHours = c.HealthThresholds.NodeSilentHours
|
||||
}
|
||||
if c.HealthThresholds.RelayActiveHours > 0 {
|
||||
h.RelayActiveHours = c.HealthThresholds.RelayActiveHours
|
||||
}
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
@@ -0,0 +1,66 @@
|
||||
package main
|
||||
|
||||
import "net/http"
|
||||
|
||||
// corsMiddleware returns a middleware that sets CORS headers based on the
|
||||
// configured allowed origins. When CORSAllowedOrigins is empty (default),
|
||||
// no Access-Control-* headers are added, preserving browser same-origin policy.
|
||||
func (s *Server) corsMiddleware(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
origins := s.cfg.CORSAllowedOrigins
|
||||
if len(origins) == 0 {
|
||||
next.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
reqOrigin := r.Header.Get("Origin")
|
||||
if reqOrigin == "" {
|
||||
next.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
// Check if origin is allowed
|
||||
allowed := false
|
||||
wildcard := false
|
||||
for _, o := range origins {
|
||||
if o == "*" {
|
||||
allowed = true
|
||||
wildcard = true
|
||||
break
|
||||
}
|
||||
if o == reqOrigin {
|
||||
allowed = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !allowed {
|
||||
// Origin not in allowlist — don't add CORS headers
|
||||
if r.Method == http.MethodOptions {
|
||||
// Still reject preflight with 403
|
||||
w.WriteHeader(http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
next.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
// Set CORS headers
|
||||
if wildcard {
|
||||
w.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
} else {
|
||||
w.Header().Set("Access-Control-Allow-Origin", reqOrigin)
|
||||
w.Header().Set("Vary", "Origin")
|
||||
}
|
||||
w.Header().Set("Access-Control-Allow-Methods", "GET, POST, OPTIONS")
|
||||
w.Header().Set("Access-Control-Allow-Headers", "Content-Type, X-API-Key")
|
||||
|
||||
// Handle preflight
|
||||
if r.Method == http.MethodOptions {
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
return
|
||||
}
|
||||
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
@@ -0,0 +1,149 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// newTestServerWithCORS creates a minimal Server with the given CORS config.
|
||||
func newTestServerWithCORS(origins []string) *Server {
|
||||
cfg := &Config{CORSAllowedOrigins: origins}
|
||||
srv := &Server{cfg: cfg}
|
||||
return srv
|
||||
}
|
||||
|
||||
// dummyHandler is a simple handler that writes 200 OK.
|
||||
var dummyHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write([]byte("ok"))
|
||||
})
|
||||
|
||||
func TestCORS_DefaultNoHeaders(t *testing.T) {
|
||||
srv := newTestServerWithCORS(nil)
|
||||
handler := srv.corsMiddleware(dummyHandler)
|
||||
|
||||
req := httptest.NewRequest("GET", "/api/health", nil)
|
||||
req.Header.Set("Origin", "https://evil.example")
|
||||
rr := httptest.NewRecorder()
|
||||
handler.ServeHTTP(rr, req)
|
||||
|
||||
if rr.Code != 200 {
|
||||
t.Fatalf("expected 200, got %d", rr.Code)
|
||||
}
|
||||
if v := rr.Header().Get("Access-Control-Allow-Origin"); v != "" {
|
||||
t.Fatalf("expected no ACAO header, got %q", v)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCORS_AllowlistMatch(t *testing.T) {
|
||||
srv := newTestServerWithCORS([]string{"https://good.example"})
|
||||
handler := srv.corsMiddleware(dummyHandler)
|
||||
|
||||
req := httptest.NewRequest("GET", "/api/health", nil)
|
||||
req.Header.Set("Origin", "https://good.example")
|
||||
rr := httptest.NewRecorder()
|
||||
handler.ServeHTTP(rr, req)
|
||||
|
||||
if rr.Code != 200 {
|
||||
t.Fatalf("expected 200, got %d", rr.Code)
|
||||
}
|
||||
if v := rr.Header().Get("Access-Control-Allow-Origin"); v != "https://good.example" {
|
||||
t.Fatalf("expected origin echo, got %q", v)
|
||||
}
|
||||
if v := rr.Header().Get("Access-Control-Allow-Methods"); v != "GET, POST, OPTIONS" {
|
||||
t.Fatalf("expected methods header, got %q", v)
|
||||
}
|
||||
if v := rr.Header().Get("Access-Control-Allow-Headers"); v != "Content-Type, X-API-Key" {
|
||||
t.Fatalf("expected headers header, got %q", v)
|
||||
}
|
||||
if v := rr.Header().Get("Vary"); v != "Origin" {
|
||||
t.Fatalf("expected Vary: Origin, got %q", v)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCORS_AllowlistNoMatch(t *testing.T) {
|
||||
srv := newTestServerWithCORS([]string{"https://good.example"})
|
||||
handler := srv.corsMiddleware(dummyHandler)
|
||||
|
||||
req := httptest.NewRequest("GET", "/api/health", nil)
|
||||
req.Header.Set("Origin", "https://evil.example")
|
||||
rr := httptest.NewRecorder()
|
||||
handler.ServeHTTP(rr, req)
|
||||
|
||||
if rr.Code != 200 {
|
||||
t.Fatalf("expected 200, got %d", rr.Code)
|
||||
}
|
||||
if v := rr.Header().Get("Access-Control-Allow-Origin"); v != "" {
|
||||
t.Fatalf("expected no ACAO header for non-matching origin, got %q", v)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCORS_PreflightAllowed(t *testing.T) {
|
||||
srv := newTestServerWithCORS([]string{"https://good.example"})
|
||||
handler := srv.corsMiddleware(dummyHandler)
|
||||
|
||||
req := httptest.NewRequest("OPTIONS", "/api/health", nil)
|
||||
req.Header.Set("Origin", "https://good.example")
|
||||
rr := httptest.NewRecorder()
|
||||
handler.ServeHTTP(rr, req)
|
||||
|
||||
if rr.Code != http.StatusNoContent {
|
||||
t.Fatalf("expected 204, got %d", rr.Code)
|
||||
}
|
||||
if v := rr.Header().Get("Access-Control-Allow-Origin"); v != "https://good.example" {
|
||||
t.Fatalf("expected origin echo, got %q", v)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCORS_PreflightRejected(t *testing.T) {
|
||||
srv := newTestServerWithCORS([]string{"https://good.example"})
|
||||
handler := srv.corsMiddleware(dummyHandler)
|
||||
|
||||
req := httptest.NewRequest("OPTIONS", "/api/health", nil)
|
||||
req.Header.Set("Origin", "https://evil.example")
|
||||
rr := httptest.NewRecorder()
|
||||
handler.ServeHTTP(rr, req)
|
||||
|
||||
if rr.Code != http.StatusForbidden {
|
||||
t.Fatalf("expected 403, got %d", rr.Code)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCORS_Wildcard(t *testing.T) {
|
||||
srv := newTestServerWithCORS([]string{"*"})
|
||||
handler := srv.corsMiddleware(dummyHandler)
|
||||
|
||||
req := httptest.NewRequest("GET", "/api/health", nil)
|
||||
req.Header.Set("Origin", "https://anything.example")
|
||||
rr := httptest.NewRecorder()
|
||||
handler.ServeHTTP(rr, req)
|
||||
|
||||
if rr.Code != 200 {
|
||||
t.Fatalf("expected 200, got %d", rr.Code)
|
||||
}
|
||||
if v := rr.Header().Get("Access-Control-Allow-Origin"); v != "*" {
|
||||
t.Fatalf("expected *, got %q", v)
|
||||
}
|
||||
// Wildcard should NOT set Vary: Origin
|
||||
if v := rr.Header().Get("Vary"); v == "Origin" {
|
||||
t.Fatalf("wildcard should not set Vary: Origin")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCORS_NoOriginHeader(t *testing.T) {
|
||||
srv := newTestServerWithCORS([]string{"https://good.example"})
|
||||
handler := srv.corsMiddleware(dummyHandler)
|
||||
|
||||
req := httptest.NewRequest("GET", "/api/health", nil)
|
||||
// No Origin header
|
||||
rr := httptest.NewRecorder()
|
||||
handler.ServeHTTP(rr, req)
|
||||
|
||||
if rr.Code != 200 {
|
||||
t.Fatalf("expected 200, got %d", rr.Code)
|
||||
}
|
||||
if v := rr.Header().Get("Access-Control-Allow-Origin"); v != "" {
|
||||
t.Fatalf("expected no ACAO without Origin header, got %q", v)
|
||||
}
|
||||
}
|
||||
@@ -42,7 +42,7 @@ func setupTestDBv2(t *testing.T) *DB {
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT, raw_hex TEXT NOT NULL,
|
||||
hash TEXT NOT NULL UNIQUE, first_seen TEXT NOT NULL,
|
||||
route_type INTEGER, payload_type INTEGER, payload_version INTEGER,
|
||||
decoded_json TEXT, channel_hash TEXT DEFAULT NULL, created_at TEXT DEFAULT (datetime('now'))
|
||||
decoded_json TEXT, channel_hash TEXT DEFAULT NULL, from_pubkey TEXT DEFAULT NULL, created_at TEXT DEFAULT (datetime('now'))
|
||||
);
|
||||
CREATE TABLE observations (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
@@ -50,6 +50,18 @@ func setupTestDBv2(t *testing.T) *DB {
|
||||
observer_id TEXT, observer_name TEXT, direction TEXT,
|
||||
snr REAL, rssi REAL, score INTEGER, path_json TEXT, timestamp INTEGER NOT NULL, raw_hex TEXT
|
||||
);
|
||||
CREATE TRIGGER IF NOT EXISTS test_from_pubkey_advert
|
||||
AFTER INSERT ON transmissions
|
||||
FOR EACH ROW
|
||||
WHEN NEW.from_pubkey IS NULL AND NEW.payload_type = 4 AND NEW.decoded_json IS NOT NULL
|
||||
AND json_extract(NEW.decoded_json, '$.pubKey') IS NOT NULL
|
||||
AND json_extract(NEW.decoded_json, '$.pubKey') <> ''
|
||||
BEGIN
|
||||
UPDATE transmissions
|
||||
SET from_pubkey = json_extract(NEW.decoded_json, '$.pubKey')
|
||||
WHERE id = NEW.id;
|
||||
END;
|
||||
CREATE INDEX IF NOT EXISTS idx_transmissions_from_pubkey ON transmissions(from_pubkey);
|
||||
`
|
||||
if _, err := conn.Exec(schema); err != nil {
|
||||
t.Fatal(err)
|
||||
@@ -2498,9 +2510,9 @@ func TestStoreGetAnalyticsChannelsNumericHash(t *testing.T) {
|
||||
db.conn.Exec(`INSERT INTO observations (transmission_id, observer_idx, snr, rssi, path_json, timestamp)
|
||||
VALUES (5, 1, 10.0, -90, '[]', ?)`, recentEpoch)
|
||||
|
||||
// Also a decrypted CHAN with numeric channelHash
|
||||
// Also a decrypted CHAN with numeric channelHash — use hash 198 which is the real hash for #general
|
||||
db.conn.Exec(`INSERT INTO transmissions (raw_hex, hash, first_seen, route_type, payload_type, decoded_json)
|
||||
VALUES ('DD03', 'chan_num_hash_3', ?, 1, 5, '{"type":"CHAN","channel":"general","channelHash":97,"channelHashHex":"61","text":"hello","sender":"Alice"}')`, recent)
|
||||
VALUES ('DD03', 'chan_num_hash_3', ?, 1, 5, '{"type":"CHAN","channel":"general","channelHash":198,"channelHashHex":"C6","text":"hello","sender":"Alice"}')`, recent)
|
||||
db.conn.Exec(`INSERT INTO observations (transmission_id, observer_idx, snr, rssi, path_json, timestamp)
|
||||
VALUES (6, 1, 12.0, -88, '[]', ?)`, recentEpoch)
|
||||
|
||||
@@ -2509,8 +2521,8 @@ func TestStoreGetAnalyticsChannelsNumericHash(t *testing.T) {
|
||||
result := store.GetAnalyticsChannels("")
|
||||
|
||||
channels := result["channels"].([]map[string]interface{})
|
||||
if len(channels) < 2 {
|
||||
t.Errorf("expected at least 2 channels (hash 97 + hash 42), got %d", len(channels))
|
||||
if len(channels) < 3 {
|
||||
t.Errorf("expected at least 3 channels (hash 97 + hash 42 + hash 198), got %d", len(channels))
|
||||
}
|
||||
|
||||
// Verify the numeric-hash channels we inserted have proper hashes (not "?")
|
||||
@@ -2531,13 +2543,13 @@ func TestStoreGetAnalyticsChannelsNumericHash(t *testing.T) {
|
||||
t.Error("expected to find channel with hash '42' (numeric channelHash parsing)")
|
||||
}
|
||||
|
||||
// Verify the decrypted CHAN channel has the correct name
|
||||
// Verify the decrypted CHAN channel has the correct name (now at hash 198)
|
||||
foundGeneral := false
|
||||
for _, ch := range channels {
|
||||
if ch["name"] == "general" {
|
||||
foundGeneral = true
|
||||
if ch["hash"] != "97" {
|
||||
t.Errorf("expected hash '97' for general channel, got %v", ch["hash"])
|
||||
if ch["hash"] != "198" {
|
||||
t.Errorf("expected hash '198' for general channel, got %v", ch["hash"])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
+104
-42
@@ -170,6 +170,7 @@ type Observer struct {
|
||||
BatteryMv *int `json:"battery_mv"`
|
||||
UptimeSecs *int64 `json:"uptime_secs"`
|
||||
NoiseFloor *float64 `json:"noise_floor"`
|
||||
LastPacketAt *string `json:"last_packet_at"`
|
||||
}
|
||||
|
||||
// Transmission represents a row from the transmissions table.
|
||||
@@ -578,8 +579,10 @@ func (db *DB) buildPacketWhere(q PacketQuery) ([]string, []interface{}) {
|
||||
}
|
||||
if q.Node != "" {
|
||||
pk := db.resolveNodePubkey(q.Node)
|
||||
where = append(where, "decoded_json LIKE ?")
|
||||
args = append(args, "%"+pk+"%")
|
||||
// #1143: exact-match on the dedicated from_pubkey column instead of
|
||||
// LIKE-on-JSON substring (adversarial spoof + same-name false positives).
|
||||
where = append(where, "from_pubkey = ?")
|
||||
args = append(args, pk)
|
||||
}
|
||||
return where, args
|
||||
}
|
||||
@@ -622,8 +625,9 @@ func (db *DB) buildTransmissionWhere(q PacketQuery) ([]string, []interface{}) {
|
||||
}
|
||||
if q.Node != "" {
|
||||
pk := db.resolveNodePubkey(q.Node)
|
||||
where = append(where, "t.decoded_json LIKE ?")
|
||||
args = append(args, "%"+pk+"%")
|
||||
// #1143: exact-match on dedicated from_pubkey column.
|
||||
where = append(where, "t.from_pubkey = ?")
|
||||
args = append(args, pk)
|
||||
}
|
||||
if q.Channel != "" {
|
||||
// channel_hash column is indexed for payload_type = 5; filter is exact match.
|
||||
@@ -786,7 +790,7 @@ func (db *DB) GetNodes(limit, offset int, role, search, before, lastHeard, sortB
|
||||
var total int
|
||||
db.conn.QueryRow(fmt.Sprintf("SELECT COUNT(*) FROM nodes %s", w), args...).Scan(&total)
|
||||
|
||||
querySQL := fmt.Sprintf("SELECT public_key, name, role, lat, lon, last_seen, first_seen, advert_count, battery_mv, temperature_c FROM nodes %s ORDER BY %s LIMIT ? OFFSET ?", w, order)
|
||||
querySQL := fmt.Sprintf("SELECT public_key, name, role, lat, lon, last_seen, first_seen, advert_count, battery_mv, temperature_c, foreign_advert FROM nodes %s ORDER BY %s LIMIT ? OFFSET ?", w, order)
|
||||
qArgs := append(args, limit, offset)
|
||||
|
||||
rows, err := db.conn.Query(querySQL, qArgs...)
|
||||
@@ -812,7 +816,7 @@ func (db *DB) SearchNodes(query string, limit int) ([]map[string]interface{}, er
|
||||
if limit <= 0 {
|
||||
limit = 10
|
||||
}
|
||||
rows, err := db.conn.Query(`SELECT public_key, name, role, lat, lon, last_seen, first_seen, advert_count, battery_mv, temperature_c
|
||||
rows, err := db.conn.Query(`SELECT public_key, name, role, lat, lon, last_seen, first_seen, advert_count, battery_mv, temperature_c, foreign_advert
|
||||
FROM nodes WHERE name LIKE ? OR public_key LIKE ? ORDER BY last_seen DESC LIMIT ?`,
|
||||
"%"+query+"%", query+"%", limit)
|
||||
if err != nil {
|
||||
@@ -830,9 +834,58 @@ func (db *DB) SearchNodes(query string, limit int) ([]map[string]interface{}, er
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
// GetNodeByPrefix resolves a hex prefix (>=8 chars) to a unique node.
|
||||
// Returns (node, ambiguous, error). When multiple nodes share the prefix,
|
||||
// returns (nil, true, nil). Used by the short-URL feature (issue #772).
|
||||
//
|
||||
// Trade-off vs an opaque ID lookup table: prefixes are stable across
|
||||
// restarts, self-describing (no allocator needed), and resolve to the
|
||||
// authoritative pubkey on the server. Cost: ambiguity grows with the
|
||||
// node directory; we mitigate with a hard 8-hex-char (32-bit) minimum
|
||||
// and surface 409 Conflict when collisions occur.
|
||||
func (db *DB) GetNodeByPrefix(prefix string) (map[string]interface{}, bool, error) {
|
||||
if len(prefix) < 8 {
|
||||
return nil, false, nil
|
||||
}
|
||||
// Validate hex (avoid SQL LIKE wildcards leaking through).
|
||||
for _, c := range prefix {
|
||||
isHex := (c >= '0' && c <= '9') || (c >= 'a' && c <= 'f') || (c >= 'A' && c <= 'F')
|
||||
if !isHex {
|
||||
return nil, false, nil
|
||||
}
|
||||
}
|
||||
rows, err := db.conn.Query(
|
||||
`SELECT public_key, name, role, lat, lon, last_seen, first_seen, advert_count, battery_mv, temperature_c, foreign_advert
|
||||
FROM nodes WHERE public_key LIKE ? LIMIT 2`,
|
||||
prefix+"%",
|
||||
)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
defer rows.Close()
|
||||
var first map[string]interface{}
|
||||
count := 0
|
||||
for rows.Next() {
|
||||
n := scanNodeRow(rows)
|
||||
if n == nil {
|
||||
continue
|
||||
}
|
||||
count++
|
||||
if count == 1 {
|
||||
first = n
|
||||
} else {
|
||||
return nil, true, nil
|
||||
}
|
||||
}
|
||||
if count == 0 {
|
||||
return nil, false, nil
|
||||
}
|
||||
return first, false, nil
|
||||
}
|
||||
|
||||
// GetNodeByPubkey returns a single node.
|
||||
func (db *DB) GetNodeByPubkey(pubkey string) (map[string]interface{}, error) {
|
||||
rows, err := db.conn.Query("SELECT public_key, name, role, lat, lon, last_seen, first_seen, advert_count, battery_mv, temperature_c FROM nodes WHERE public_key = ?", pubkey)
|
||||
rows, err := db.conn.Query("SELECT public_key, name, role, lat, lon, last_seen, first_seen, advert_count, battery_mv, temperature_c, foreign_advert FROM nodes WHERE public_key = ?", pubkey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -844,27 +897,22 @@ func (db *DB) GetNodeByPubkey(pubkey string) (map[string]interface{}, error) {
|
||||
}
|
||||
|
||||
|
||||
// GetRecentTransmissionsForNode returns recent transmissions referencing a node (Node.js-compatible shape).
|
||||
func (db *DB) GetRecentTransmissionsForNode(pubkey string, name string, limit int) ([]map[string]interface{}, error) {
|
||||
// GetRecentTransmissionsForNode returns recent transmissions originated by a
|
||||
// node, identified by exact pubkey match on the indexed from_pubkey column
|
||||
// (#1143). The legacy `name` substring fallback was removed: it produced
|
||||
// same-name false positives and an adversarial spoof path where any node
|
||||
// could attribute its transmissions to a victim by naming itself with the
|
||||
// victim's pubkey. Pubkey is unique by design — that's the whole point.
|
||||
func (db *DB) GetRecentTransmissionsForNode(pubkey string, limit int) ([]map[string]interface{}, error) {
|
||||
if limit <= 0 {
|
||||
limit = 20
|
||||
}
|
||||
pk := "%" + pubkey + "%"
|
||||
np := "%" + name + "%"
|
||||
|
||||
selectCols, observerJoin := db.transmissionBaseSQL()
|
||||
|
||||
var querySQL string
|
||||
var args []interface{}
|
||||
if name != "" {
|
||||
querySQL = fmt.Sprintf("SELECT %s FROM transmissions t %s WHERE t.decoded_json LIKE ? OR t.decoded_json LIKE ? ORDER BY t.first_seen DESC LIMIT ?",
|
||||
selectCols, observerJoin)
|
||||
args = []interface{}{pk, np, limit}
|
||||
} else {
|
||||
querySQL = fmt.Sprintf("SELECT %s FROM transmissions t %s WHERE t.decoded_json LIKE ? ORDER BY t.first_seen DESC LIMIT ?",
|
||||
selectCols, observerJoin)
|
||||
args = []interface{}{pk, limit}
|
||||
}
|
||||
querySQL := fmt.Sprintf("SELECT %s FROM transmissions t %s WHERE t.from_pubkey = ? ORDER BY t.first_seen DESC LIMIT ?",
|
||||
selectCols, observerJoin)
|
||||
args := []interface{}{pubkey, limit}
|
||||
|
||||
rows, err := db.conn.Query(querySQL, args...)
|
||||
if err != nil {
|
||||
@@ -972,7 +1020,7 @@ func (db *DB) getObservationsForTransmissions(txIDs []int) map[int][]map[string]
|
||||
|
||||
// GetObservers returns active observers (not soft-deleted) sorted by last_seen DESC.
|
||||
func (db *DB) GetObservers() ([]Observer, error) {
|
||||
rows, err := db.conn.Query("SELECT id, name, iata, last_seen, first_seen, packet_count, model, firmware, client_version, radio, battery_mv, uptime_secs, noise_floor FROM observers WHERE inactive IS NULL OR inactive = 0 ORDER BY last_seen DESC")
|
||||
rows, err := db.conn.Query("SELECT id, name, iata, last_seen, first_seen, packet_count, model, firmware, client_version, radio, battery_mv, uptime_secs, noise_floor, last_packet_at FROM observers WHERE inactive IS NULL OR inactive = 0 ORDER BY last_seen DESC")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -983,7 +1031,7 @@ func (db *DB) GetObservers() ([]Observer, error) {
|
||||
var o Observer
|
||||
var batteryMv, uptimeSecs sql.NullInt64
|
||||
var noiseFloor sql.NullFloat64
|
||||
if err := rows.Scan(&o.ID, &o.Name, &o.IATA, &o.LastSeen, &o.FirstSeen, &o.PacketCount, &o.Model, &o.Firmware, &o.ClientVersion, &o.Radio, &batteryMv, &uptimeSecs, &noiseFloor); err != nil {
|
||||
if err := rows.Scan(&o.ID, &o.Name, &o.IATA, &o.LastSeen, &o.FirstSeen, &o.PacketCount, &o.Model, &o.Firmware, &o.ClientVersion, &o.Radio, &batteryMv, &uptimeSecs, &noiseFloor, &o.LastPacketAt); err != nil {
|
||||
continue
|
||||
}
|
||||
if batteryMv.Valid {
|
||||
@@ -1006,8 +1054,8 @@ func (db *DB) GetObserverByID(id string) (*Observer, error) {
|
||||
var o Observer
|
||||
var batteryMv, uptimeSecs sql.NullInt64
|
||||
var noiseFloor sql.NullFloat64
|
||||
err := db.conn.QueryRow("SELECT id, name, iata, last_seen, first_seen, packet_count, model, firmware, client_version, radio, battery_mv, uptime_secs, noise_floor FROM observers WHERE id = ?", id).
|
||||
Scan(&o.ID, &o.Name, &o.IATA, &o.LastSeen, &o.FirstSeen, &o.PacketCount, &o.Model, &o.Firmware, &o.ClientVersion, &o.Radio, &batteryMv, &uptimeSecs, &noiseFloor)
|
||||
err := db.conn.QueryRow("SELECT id, name, iata, last_seen, first_seen, packet_count, model, firmware, client_version, radio, battery_mv, uptime_secs, noise_floor, last_packet_at FROM observers WHERE id = ?", id).
|
||||
Scan(&o.ID, &o.Name, &o.IATA, &o.LastSeen, &o.FirstSeen, &o.PacketCount, &o.Model, &o.Firmware, &o.ClientVersion, &o.Radio, &batteryMv, &uptimeSecs, &noiseFloor, &o.LastPacketAt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1055,6 +1103,17 @@ func (db *DB) GetObserverIdsForRegion(regionParam string) ([]string, error) {
|
||||
return ids, nil
|
||||
}
|
||||
|
||||
// normalizeRegionCodes parses a region query parameter into a list of upper-case
|
||||
// IATA codes. Returns nil to signal "no filter" (match all regions).
|
||||
//
|
||||
// Sentinel handling (issue #770): the frontend region filter dropdown labels its
|
||||
// catch-all option "All". When that option is selected the UI may send
|
||||
// ?region=All; older code interpreted that literally and tried to match an
|
||||
// IATA code "ALL", which never exists, returning an empty result set. Treat
|
||||
// "All" / "ALL" / "all" (case-insensitive, optionally surrounded by whitespace
|
||||
// or mixed with empty CSV slots) as equivalent to an empty value.
|
||||
//
|
||||
// Real IATA codes (e.g. "SJC", "PDX") still pass through unchanged.
|
||||
func normalizeRegionCodes(regionParam string) []string {
|
||||
if regionParam == "" {
|
||||
return nil
|
||||
@@ -1063,9 +1122,13 @@ func normalizeRegionCodes(regionParam string) []string {
|
||||
codes := make([]string, 0, len(tokens))
|
||||
for _, token := range tokens {
|
||||
code := strings.TrimSpace(strings.ToUpper(token))
|
||||
if code != "" {
|
||||
codes = append(codes, code)
|
||||
if code == "" || code == "ALL" {
|
||||
continue
|
||||
}
|
||||
codes = append(codes, code)
|
||||
}
|
||||
if len(codes) == 0 {
|
||||
return nil
|
||||
}
|
||||
return codes
|
||||
}
|
||||
@@ -1711,16 +1774,16 @@ func (db *DB) QueryMultiNodePackets(pubkeys []string, limit, offset int, order,
|
||||
order = "DESC"
|
||||
}
|
||||
|
||||
// Build OR conditions for decoded_json LIKE %pubkey%
|
||||
var conditions []string
|
||||
// Build IN(?, ?, ...) on the dedicated from_pubkey column (#1143):
|
||||
// exact match, indexed lookup, no JSON substring scan.
|
||||
var args []interface{}
|
||||
placeholders := make([]string, 0, len(pubkeys))
|
||||
for _, pk := range pubkeys {
|
||||
// Resolve pubkey to also check by name
|
||||
resolved := db.resolveNodePubkey(pk)
|
||||
conditions = append(conditions, "t.decoded_json LIKE ?")
|
||||
args = append(args, "%"+resolved+"%")
|
||||
args = append(args, resolved)
|
||||
placeholders = append(placeholders, "?")
|
||||
}
|
||||
jsonWhere := "(" + strings.Join(conditions, " OR ") + ")"
|
||||
pkWhere := "t.from_pubkey IN (" + strings.Join(placeholders, ",") + ")"
|
||||
|
||||
var timeFilters []string
|
||||
if since != "" {
|
||||
@@ -1732,7 +1795,7 @@ func (db *DB) QueryMultiNodePackets(pubkeys []string, limit, offset int, order,
|
||||
args = append(args, until)
|
||||
}
|
||||
|
||||
w := "WHERE " + jsonWhere
|
||||
w := "WHERE " + pkWhere
|
||||
if len(timeFilters) > 0 {
|
||||
w += " AND " + strings.Join(timeFilters, " AND ")
|
||||
}
|
||||
@@ -1802,8 +1865,9 @@ func scanNodeRow(rows *sql.Rows) map[string]interface{} {
|
||||
var advertCount int
|
||||
var batteryMv sql.NullInt64
|
||||
var temperatureC sql.NullFloat64
|
||||
var foreign sql.NullInt64
|
||||
|
||||
if err := rows.Scan(&pk, &name, &role, &lat, &lon, &lastSeen, &firstSeen, &advertCount, &batteryMv, &temperatureC); err != nil {
|
||||
if err := rows.Scan(&pk, &name, &role, &lat, &lon, &lastSeen, &firstSeen, &advertCount, &batteryMv, &temperatureC, &foreign); err != nil {
|
||||
return nil
|
||||
}
|
||||
m := map[string]interface{}{
|
||||
@@ -1818,6 +1882,7 @@ func scanNodeRow(rows *sql.Rows) map[string]interface{} {
|
||||
"last_heard": nullStr(lastSeen),
|
||||
"hash_size": nil,
|
||||
"hash_size_inconsistent": false,
|
||||
"foreign": foreign.Valid && foreign.Int64 != 0,
|
||||
}
|
||||
if batteryMv.Valid {
|
||||
m["battery_mv"] = int(batteryMv.Int64)
|
||||
@@ -1872,11 +1937,10 @@ func nullInt(ni sql.NullInt64) interface{} {
|
||||
// Returns the number of transmissions deleted.
|
||||
// Opens a separate read-write connection since the main connection is read-only.
|
||||
func (db *DB) PruneOldPackets(days int) (int64, error) {
|
||||
rw, err := openRW(db.path)
|
||||
rw, err := cachedRW(db.path)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer rw.Close()
|
||||
|
||||
cutoff := time.Now().UTC().AddDate(0, 0, -days).Format(time.RFC3339)
|
||||
tx, err := rw.Begin()
|
||||
@@ -2219,11 +2283,10 @@ func (db *DB) GetMetricsSummary(since string) ([]MetricsSummaryRow, error) {
|
||||
|
||||
// PruneOldMetrics deletes observer_metrics rows older than retentionDays.
|
||||
func (db *DB) PruneOldMetrics(retentionDays int) (int64, error) {
|
||||
rw, err := openRW(db.path)
|
||||
rw, err := cachedRW(db.path)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer rw.Close()
|
||||
|
||||
cutoff := time.Now().UTC().AddDate(0, 0, -retentionDays).Format(time.RFC3339)
|
||||
res, err := rw.Exec(`DELETE FROM observer_metrics WHERE timestamp < ?`, cutoff)
|
||||
@@ -2246,11 +2309,10 @@ func (db *DB) RemoveStaleObservers(observerDays int) (int64, error) {
|
||||
if observerDays <= -1 {
|
||||
return 0, nil // keep forever
|
||||
}
|
||||
rw, err := openRW(db.path)
|
||||
rw, err := cachedRW(db.path)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer rw.Close()
|
||||
|
||||
cutoff := time.Now().UTC().AddDate(0, 0, -observerDays).Format(time.RFC3339)
|
||||
res, err := rw.Exec(`UPDATE observers SET inactive = 1 WHERE last_seen < ? AND (inactive IS NULL OR inactive = 0)`, cutoff)
|
||||
|
||||
+96
-8
@@ -32,7 +32,8 @@ func setupTestDB(t *testing.T) *DB {
|
||||
first_seen TEXT,
|
||||
advert_count INTEGER DEFAULT 0,
|
||||
battery_mv INTEGER,
|
||||
temperature_c REAL
|
||||
temperature_c REAL,
|
||||
foreign_advert INTEGER DEFAULT 0
|
||||
);
|
||||
|
||||
CREATE TABLE observers (
|
||||
@@ -49,7 +50,8 @@ func setupTestDB(t *testing.T) *DB {
|
||||
battery_mv INTEGER,
|
||||
uptime_secs INTEGER,
|
||||
noise_floor REAL,
|
||||
inactive INTEGER DEFAULT 0
|
||||
inactive INTEGER DEFAULT 0,
|
||||
last_packet_at TEXT DEFAULT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE transmissions (
|
||||
@@ -62,6 +64,7 @@ func setupTestDB(t *testing.T) *DB {
|
||||
payload_version INTEGER,
|
||||
decoded_json TEXT,
|
||||
channel_hash TEXT DEFAULT NULL,
|
||||
from_pubkey TEXT DEFAULT NULL,
|
||||
created_at TEXT DEFAULT (datetime('now'))
|
||||
);
|
||||
|
||||
@@ -94,6 +97,29 @@ func setupTestDB(t *testing.T) *DB {
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_observer_metrics_timestamp ON observer_metrics(timestamp);
|
||||
|
||||
-- Auto-populate from_pubkey for ADVERT rows so existing test fixtures
|
||||
-- (which only set decoded_json) still attribute correctly under #1143's
|
||||
-- exact-match column. Production migration handles legacy data; the
|
||||
-- ingestor sets the column at write time.
|
||||
--
|
||||
-- m4 alignment: prod ingest leaves from_pubkey NULL when pubKey is
|
||||
-- missing or empty (cmd/ingestor/db.go ~1289 guards PubKey != empty-string).
|
||||
-- The trigger mirrors that: only assign when json_extract yields a
|
||||
-- non-empty string. json_extract returns NULL for missing keys, so
|
||||
-- the explicit IS NOT NULL AND <> empty-string guard catches the empty-string
|
||||
-- case too. UPDATE only when we have something to write.
|
||||
CREATE TRIGGER IF NOT EXISTS test_from_pubkey_advert
|
||||
AFTER INSERT ON transmissions
|
||||
FOR EACH ROW
|
||||
WHEN NEW.from_pubkey IS NULL AND NEW.payload_type = 4 AND NEW.decoded_json IS NOT NULL
|
||||
AND json_extract(NEW.decoded_json, '$.pubKey') IS NOT NULL
|
||||
AND json_extract(NEW.decoded_json, '$.pubKey') <> ''
|
||||
BEGIN
|
||||
UPDATE transmissions
|
||||
SET from_pubkey = json_extract(NEW.decoded_json, '$.pubKey')
|
||||
WHERE id = NEW.id;
|
||||
END;
|
||||
CREATE INDEX IF NOT EXISTS idx_transmissions_from_pubkey ON transmissions(from_pubkey);
|
||||
`
|
||||
if _, err := conn.Exec(schema); err != nil {
|
||||
t.Fatal(err)
|
||||
@@ -127,13 +153,13 @@ func seedTestData(t *testing.T, db *DB) {
|
||||
VALUES ('1122334455667788', 'TestRoom', 'room', 37.4, -121.9, ?, '2026-01-01T00:00:00Z', 5)`, twoDaysAgo)
|
||||
|
||||
// Seed transmissions
|
||||
db.conn.Exec(`INSERT INTO transmissions (raw_hex, hash, first_seen, route_type, payload_type, decoded_json, channel_hash)
|
||||
VALUES ('AABB', 'abc123def4567890', ?, 1, 4, '{"pubKey":"aabbccdd11223344","name":"TestRepeater","type":"ADVERT","timestamp":1700000000,"timestampISO":"2023-11-14T22:13:20.000Z","signature":"abcdef","flags":{"isRepeater":true},"lat":37.5,"lon":-122.0}', '#test')`, recent)
|
||||
db.conn.Exec(`INSERT INTO transmissions (raw_hex, hash, first_seen, route_type, payload_type, decoded_json, channel_hash, from_pubkey)
|
||||
VALUES ('AABB', 'abc123def4567890', ?, 1, 4, '{"pubKey":"aabbccdd11223344","name":"TestRepeater","type":"ADVERT","timestamp":1700000000,"timestampISO":"2023-11-14T22:13:20.000Z","signature":"abcdef","flags":{"isRepeater":true},"lat":37.5,"lon":-122.0}', '#test', 'aabbccdd11223344')`, recent)
|
||||
db.conn.Exec(`INSERT INTO transmissions (raw_hex, hash, first_seen, route_type, payload_type, decoded_json, channel_hash)
|
||||
VALUES ('CCDD', '1234567890abcdef', ?, 1, 5, '{"type":"CHAN","channel":"#test","text":"Hello: World","sender":"TestUser"}', '#test')`, yesterday)
|
||||
// Second ADVERT for same node with different hash_size (raw_hex byte 0x1F → hs=1 vs 0xBB → hs=3)
|
||||
db.conn.Exec(`INSERT INTO transmissions (raw_hex, hash, first_seen, route_type, payload_type, decoded_json)
|
||||
VALUES ('AA1F', 'def456abc1230099', ?, 1, 4, '{"pubKey":"aabbccdd11223344","name":"TestRepeater","type":"ADVERT","timestamp":1700000100,"timestampISO":"2023-11-14T22:14:40.000Z","signature":"fedcba","flags":{"isRepeater":true},"lat":37.5,"lon":-122.0}')`, yesterday)
|
||||
db.conn.Exec(`INSERT INTO transmissions (raw_hex, hash, first_seen, route_type, payload_type, decoded_json, from_pubkey)
|
||||
VALUES ('AA1F', 'def456abc1230099', ?, 1, 4, '{"pubKey":"aabbccdd11223344","name":"TestRepeater","type":"ADVERT","timestamp":1700000100,"timestampISO":"2023-11-14T22:14:40.000Z","signature":"fedcba","flags":{"isRepeater":true},"lat":37.5,"lon":-122.0}', 'aabbccdd11223344')`, yesterday)
|
||||
|
||||
// Seed observations (use unix timestamps)
|
||||
// resolved_path contains full pubkeys parallel to path_json hops
|
||||
@@ -356,6 +382,10 @@ func TestGetObservers(t *testing.T) {
|
||||
if observers[0].ID != "obs1" {
|
||||
t.Errorf("expected obs1 first (most recent), got %s", observers[0].ID)
|
||||
}
|
||||
// last_packet_at should be nil since seedTestData doesn't set it
|
||||
if observers[0].LastPacketAt != nil {
|
||||
t.Errorf("expected nil LastPacketAt for obs1 from seed, got %v", *observers[0].LastPacketAt)
|
||||
}
|
||||
}
|
||||
|
||||
// Regression: GetObservers must exclude soft-deleted (inactive=1) rows.
|
||||
@@ -395,6 +425,48 @@ func TestGetObserverByID(t *testing.T) {
|
||||
if obs.ID != "obs1" {
|
||||
t.Errorf("expected obs1, got %s", obs.ID)
|
||||
}
|
||||
// Verify last_packet_at is nil by default
|
||||
if obs.LastPacketAt != nil {
|
||||
t.Errorf("expected nil LastPacketAt, got %v", *obs.LastPacketAt)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetObserverLastPacketAt(t *testing.T) {
|
||||
db := setupTestDB(t)
|
||||
defer db.Close()
|
||||
seedTestData(t, db)
|
||||
|
||||
// Set last_packet_at for obs1
|
||||
ts := "2026-04-24T12:00:00Z"
|
||||
db.conn.Exec(`UPDATE observers SET last_packet_at = ? WHERE id = ?`, ts, "obs1")
|
||||
|
||||
// Verify via GetObservers
|
||||
observers, err := db.GetObservers()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
var obs1 *Observer
|
||||
for i := range observers {
|
||||
if observers[i].ID == "obs1" {
|
||||
obs1 = &observers[i]
|
||||
break
|
||||
}
|
||||
}
|
||||
if obs1 == nil {
|
||||
t.Fatal("obs1 not found")
|
||||
}
|
||||
if obs1.LastPacketAt == nil || *obs1.LastPacketAt != ts {
|
||||
t.Errorf("expected LastPacketAt=%s via GetObservers, got %v", ts, obs1.LastPacketAt)
|
||||
}
|
||||
|
||||
// Verify via GetObserverByID
|
||||
obs, err := db.GetObserverByID("obs1")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if obs.LastPacketAt == nil || *obs.LastPacketAt != ts {
|
||||
t.Errorf("expected LastPacketAt=%s via GetObserverByID, got %v", ts, obs.LastPacketAt)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetObserverByIDNotFound(t *testing.T) {
|
||||
@@ -1126,7 +1198,8 @@ func setupTestDBV2(t *testing.T) *DB {
|
||||
first_seen TEXT,
|
||||
advert_count INTEGER DEFAULT 0,
|
||||
battery_mv INTEGER,
|
||||
temperature_c REAL
|
||||
temperature_c REAL,
|
||||
foreign_advert INTEGER DEFAULT 0
|
||||
);
|
||||
|
||||
CREATE TABLE observers (
|
||||
@@ -1135,7 +1208,8 @@ func setupTestDBV2(t *testing.T) *DB {
|
||||
iata TEXT,
|
||||
last_seen TEXT,
|
||||
first_seen TEXT,
|
||||
packet_count INTEGER DEFAULT 0
|
||||
packet_count INTEGER DEFAULT 0,
|
||||
last_packet_at TEXT DEFAULT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE transmissions (
|
||||
@@ -1148,6 +1222,7 @@ func setupTestDBV2(t *testing.T) *DB {
|
||||
payload_version INTEGER,
|
||||
decoded_json TEXT,
|
||||
channel_hash TEXT DEFAULT NULL,
|
||||
from_pubkey TEXT DEFAULT NULL,
|
||||
created_at TEXT DEFAULT (datetime('now'))
|
||||
);
|
||||
|
||||
@@ -1164,6 +1239,19 @@ func setupTestDBV2(t *testing.T) *DB {
|
||||
timestamp INTEGER NOT NULL,
|
||||
raw_hex TEXT
|
||||
);
|
||||
|
||||
CREATE TRIGGER IF NOT EXISTS test_from_pubkey_advert
|
||||
AFTER INSERT ON transmissions
|
||||
FOR EACH ROW
|
||||
WHEN NEW.from_pubkey IS NULL AND NEW.payload_type = 4 AND NEW.decoded_json IS NOT NULL
|
||||
AND json_extract(NEW.decoded_json, '$.pubKey') IS NOT NULL
|
||||
AND json_extract(NEW.decoded_json, '$.pubKey') <> ''
|
||||
BEGIN
|
||||
UPDATE transmissions
|
||||
SET from_pubkey = json_extract(NEW.decoded_json, '$.pubKey')
|
||||
WHERE id = NEW.id;
|
||||
END;
|
||||
CREATE INDEX IF NOT EXISTS idx_transmissions_from_pubkey ON transmissions(from_pubkey);
|
||||
`
|
||||
if _, err := conn.Exec(schema); err != nil {
|
||||
t.Fatal(err)
|
||||
|
||||
@@ -106,6 +106,7 @@ type Payload struct {
|
||||
Tag uint32 `json:"tag,omitempty"`
|
||||
AuthCode uint32 `json:"authCode,omitempty"`
|
||||
TraceFlags *int `json:"traceFlags,omitempty"`
|
||||
SNRValues []float64 `json:"snrValues,omitempty"`
|
||||
RawHex string `json:"raw,omitempty"`
|
||||
Error string `json:"error,omitempty"`
|
||||
}
|
||||
@@ -407,6 +408,19 @@ func DecodePacket(hexString string, validateSignatures bool) (*DecodedPacket, er
|
||||
}
|
||||
// The header path hops count represents SNR entries = completed hops
|
||||
hopsCompleted := path.HashCount
|
||||
// Extract per-hop SNR from header path bytes (int8, quarter-dB encoding)
|
||||
if hopsCompleted > 0 && len(path.Hops) >= hopsCompleted {
|
||||
snrVals := make([]float64, 0, hopsCompleted)
|
||||
for i := 0; i < hopsCompleted; i++ {
|
||||
b, err := hex.DecodeString(path.Hops[i])
|
||||
if err == nil && len(b) == 1 {
|
||||
snrVals = append(snrVals, float64(int8(b[0]))/4.0)
|
||||
}
|
||||
}
|
||||
if len(snrVals) > 0 {
|
||||
payload.SNRValues = snrVals
|
||||
}
|
||||
}
|
||||
pathBytes, err := hex.DecodeString(payload.PathData)
|
||||
if err == nil && payload.TraceFlags != nil {
|
||||
// path_sz from flags byte is a power-of-two exponent per firmware:
|
||||
|
||||
@@ -440,3 +440,51 @@ func TestDecodeAdvertSignatureValidation(t *testing.T) {
|
||||
t.Error("expected SignatureValid to be nil when validation disabled")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecodePacket_TraceSNRValues(t *testing.T) {
|
||||
// TRACE packet with 3 SNR bytes in header path:
|
||||
// SNR byte 0: 0x14 = int8(20) → 20/4.0 = 5.0 dB
|
||||
// SNR byte 1: 0xF4 = int8(-12) → -12/4.0 = -3.0 dB
|
||||
// SNR byte 2: 0x08 = int8(8) → 8/4.0 = 2.0 dB
|
||||
// header: DIRECT+TRACE = (0<<6)|(9<<2)|2 = 0x26
|
||||
// path_length: hash_size=0b00 (1-byte), hash_count=3 → 0x03
|
||||
hex := "2603" + "14F408" + // header + path_byte + 3 SNR bytes
|
||||
"01000000" + // tag
|
||||
"02000000" + // authCode
|
||||
"00" + // flags=0 → path_sz=1
|
||||
"AABBCCDD" // 4 route hops (1-byte each)
|
||||
|
||||
pkt, err := DecodePacket(hex, false)
|
||||
if err != nil {
|
||||
t.Fatalf("DecodePacket error: %v", err)
|
||||
}
|
||||
if pkt.Payload.SNRValues == nil {
|
||||
t.Fatal("expected SNRValues to be populated")
|
||||
}
|
||||
if len(pkt.Payload.SNRValues) != 3 {
|
||||
t.Fatalf("expected 3 SNR values, got %d", len(pkt.Payload.SNRValues))
|
||||
}
|
||||
expected := []float64{5.0, -3.0, 2.0}
|
||||
for i, want := range expected {
|
||||
if pkt.Payload.SNRValues[i] != want {
|
||||
t.Errorf("SNRValues[%d] = %v, want %v", i, pkt.Payload.SNRValues[i], want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecodePacket_TraceNoSNRValues(t *testing.T) {
|
||||
// TRACE with 0 SNR bytes → SNRValues should be nil/empty
|
||||
hex := "2600" + // header + path_byte (0 hops)
|
||||
"01000000" + // tag
|
||||
"02000000" + // authCode
|
||||
"00" + // flags
|
||||
"AABB" // 2 route hops
|
||||
|
||||
pkt, err := DecodePacket(hex, false)
|
||||
if err != nil {
|
||||
t.Fatalf("DecodePacket error: %v", err)
|
||||
}
|
||||
if len(pkt.Payload.SNRValues) != 0 {
|
||||
t.Errorf("expected empty SNRValues, got %v", pkt.Payload.SNRValues)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,50 @@
|
||||
// Package main — discovered channels (#688).
|
||||
//
|
||||
// When a decoded channel message text mentions a previously-unknown hashtag
|
||||
// channel (e.g. "Hey, I created new channel called #mesh, please join"), we
|
||||
// auto-register that hashtag so future traffic can be displayed. This file
|
||||
// owns the parsing helper plus the integration glue exposed via GetChannels.
|
||||
package main
|
||||
|
||||
import "regexp"
|
||||
|
||||
// hashtagRE matches MeshCore-style hashtag channel mentions inside free text.
|
||||
// A valid channel name starts with '#', followed by one or more letters,
|
||||
// digits, underscore, or dash. Trailing punctuation (.,!?:;) is excluded by
|
||||
// the character class.
|
||||
var hashtagRE = regexp.MustCompile(`#[A-Za-z0-9_\-]+`)
|
||||
|
||||
// extractHashtagsFromText scans a decoded message text and returns the unique
|
||||
// hashtag channel mentions found, in first-seen order. The leading '#' is
|
||||
// preserved so callers can match against canonical channel names directly.
|
||||
//
|
||||
// Examples:
|
||||
// extractHashtagsFromText("hi #mesh and #fun") => []string{"#mesh", "#fun"}
|
||||
// extractHashtagsFromText("nothing here") => nil
|
||||
// extractHashtagsFromText("dup #x and #x again") => []string{"#x"}
|
||||
//
|
||||
func extractHashtagsFromText(text string) []string {
|
||||
if text == "" {
|
||||
return nil
|
||||
}
|
||||
matches := hashtagRE.FindAllString(text, -1)
|
||||
if len(matches) == 0 {
|
||||
return nil
|
||||
}
|
||||
seen := make(map[string]struct{}, len(matches))
|
||||
out := make([]string, 0, len(matches))
|
||||
for _, m := range matches {
|
||||
if len(m) < 2 { // bare '#' guard (regex requires 1+ chars but be defensive)
|
||||
continue
|
||||
}
|
||||
if _, ok := seen[m]; ok {
|
||||
continue
|
||||
}
|
||||
seen[m] = struct{}{}
|
||||
out = append(out, m)
|
||||
}
|
||||
if len(out) == 0 {
|
||||
return nil
|
||||
}
|
||||
return out
|
||||
}
|
||||
@@ -0,0 +1,85 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// TestExtractHashtagsFromText covers the parsing helper used to discover new
|
||||
// hashtag channels from decoded message text (issue #688).
|
||||
func TestExtractHashtagsFromText(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
in string
|
||||
want []string
|
||||
}{
|
||||
{
|
||||
name: "single mention from issue body",
|
||||
in: "Hey, I created new channel called #mesh, please join",
|
||||
want: []string{"#mesh"},
|
||||
},
|
||||
{
|
||||
name: "multiple mentions preserve order",
|
||||
in: "join #mesh and #wardriving today",
|
||||
want: []string{"#mesh", "#wardriving"},
|
||||
},
|
||||
{
|
||||
name: "dedup repeated mentions",
|
||||
in: "#x then #x again",
|
||||
want: []string{"#x"},
|
||||
},
|
||||
{
|
||||
name: "ignores trailing punctuation",
|
||||
in: "check #fun!",
|
||||
want: []string{"#fun"},
|
||||
},
|
||||
{
|
||||
name: "no hashtag returns nil",
|
||||
in: "nothing to see here",
|
||||
want: nil,
|
||||
},
|
||||
{
|
||||
name: "bare # is not a channel",
|
||||
in: "issue #",
|
||||
want: nil,
|
||||
},
|
||||
}
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
got := extractHashtagsFromText(tc.in)
|
||||
if !reflect.DeepEqual(got, tc.want) {
|
||||
t.Fatalf("extractHashtagsFromText(%q): got %v, want %v", tc.in, got, tc.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestGetChannels_DiscoversHashtagsFromMessages verifies that when a decoded
|
||||
// CHAN message body mentions a previously-unknown hashtag channel, that
|
||||
// channel is auto-registered in the GetChannels output (#688).
|
||||
func TestGetChannels_DiscoversHashtagsFromMessages(t *testing.T) {
|
||||
// One known channel (#general) where someone announces a new channel #mesh.
|
||||
pkt := makeGrpTx(198, "general", "Alice: Hey, I created new channel called #mesh, please join", "Alice")
|
||||
ps := newChannelTestStore([]*StoreTx{pkt})
|
||||
|
||||
channels := ps.GetChannels("")
|
||||
|
||||
var sawGeneral, sawMesh bool
|
||||
for _, ch := range channels {
|
||||
switch ch["name"] {
|
||||
case "general":
|
||||
sawGeneral = true
|
||||
case "#mesh":
|
||||
sawMesh = true
|
||||
if d, _ := ch["discovered"].(bool); !d {
|
||||
t.Errorf("expected discovered=true on #mesh, got %v", ch["discovered"])
|
||||
}
|
||||
}
|
||||
}
|
||||
if !sawGeneral {
|
||||
t.Error("expected the source channel 'general' in GetChannels output")
|
||||
}
|
||||
if !sawMesh {
|
||||
t.Errorf("expected discovered hashtag channel '#mesh' in GetChannels output; got %d channels: %+v", len(channels), channels)
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,56 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// TestHandleNodes_ExposesForeignAdvertField asserts the /api/nodes response
|
||||
// surfaces the foreign_advert column as a boolean `foreign` field on each
|
||||
// node, so operators can see bridged/leaked nodes (#730).
|
||||
func TestHandleNodes_ExposesForeignAdvertField(t *testing.T) {
|
||||
srv, router := setupTestServer(t)
|
||||
conn := srv.db.conn
|
||||
|
||||
if _, err := conn.Exec(`INSERT INTO nodes
|
||||
(public_key, name, role, lat, lon, last_seen, first_seen, advert_count, foreign_advert)
|
||||
VALUES
|
||||
('PK_LOCAL', 'local-node', 'companion', 37.0, -122.0, '2026-01-01T00:00:00Z', '2026-01-01T00:00:00Z', 1, 0),
|
||||
('PK_FOREIGN', 'foreign-node', 'companion', 50.0, 10.0, '2026-01-01T00:00:00Z', '2026-01-01T00:00:00Z', 1, 1)`,
|
||||
); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
req := httptest.NewRequest("GET", "/api/nodes?limit=100", nil)
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("status=%d body=%s", w.Code, w.Body.String())
|
||||
}
|
||||
|
||||
var resp struct {
|
||||
Nodes []map[string]interface{} `json:"nodes"`
|
||||
}
|
||||
if err := json.Unmarshal(w.Body.Bytes(), &resp); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
got := map[string]bool{}
|
||||
for _, n := range resp.Nodes {
|
||||
pk, _ := n["public_key"].(string)
|
||||
f, ok := n["foreign"].(bool)
|
||||
if !ok {
|
||||
t.Errorf("node %s: missing/non-bool 'foreign' field, got %T %v", pk, n["foreign"], n["foreign"])
|
||||
continue
|
||||
}
|
||||
got[pk] = f
|
||||
}
|
||||
if !got["PK_LOCAL"] == false || got["PK_LOCAL"] != false {
|
||||
t.Errorf("PK_LOCAL foreign=%v, want false", got["PK_LOCAL"])
|
||||
}
|
||||
if got["PK_FOREIGN"] != true {
|
||||
t.Errorf("PK_FOREIGN foreign=%v, want true", got["PK_FOREIGN"])
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,434 @@
|
||||
package main
|
||||
|
||||
// Tests for issue #1143: pubkey attribution must use exact-match on a
|
||||
// dedicated `from_pubkey` column, not `decoded_json LIKE '%pubkey%'`.
|
||||
//
|
||||
// These tests demonstrate the structural holes documented in #1143:
|
||||
// Hole 1: name-LIKE fallback surfaces same-name nodes
|
||||
// Hole 2a: an attacker can name themselves with someone else's pubkey
|
||||
// and get their transmissions attributed to the victim
|
||||
// Hole 2b: any 64-char hex substring inside decoded_json (path elements,
|
||||
// channel names, message bodies) produces false positives
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
_ "modernc.org/sqlite"
|
||||
)
|
||||
|
||||
const (
|
||||
pkVictim = "f7181c468dfe7c55aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
|
||||
pkAttacker = "deadbeefdeadbeefcccccccccccccccccccccccccccccccccccccccccccccccc"
|
||||
pkOther = "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"
|
||||
)
|
||||
|
||||
// seedAttribution inserts the standard adversarial fixture used by the
|
||||
// issue #1143 tests. It returns the victim pubkey for convenience.
|
||||
func seedAttribution(t *testing.T, db *DB) string {
|
||||
t.Helper()
|
||||
now := time.Now().UTC().Format(time.RFC3339)
|
||||
|
||||
// (1) Legitimate ADVERT from the victim.
|
||||
mustExec(t, db, `INSERT INTO transmissions
|
||||
(raw_hex, hash, first_seen, route_type, payload_type, decoded_json, from_pubkey)
|
||||
VALUES ('AA','h_victim_advert',?,1,4,
|
||||
'{"type":"ADVERT","pubKey":"`+pkVictim+`","name":"VictimNode"}',
|
||||
?)`, now, pkVictim)
|
||||
|
||||
// (2) Hole 1: a different node sharing the *display name* "VictimNode".
|
||||
mustExec(t, db, `INSERT INTO transmissions
|
||||
(raw_hex, hash, first_seen, route_type, payload_type, decoded_json, from_pubkey)
|
||||
VALUES ('BB','h_namespoof_advert',?,1,4,
|
||||
'{"type":"ADVERT","pubKey":"`+pkOther+`","name":"VictimNode"}',
|
||||
?)`, now, pkOther)
|
||||
|
||||
// (3) Hole 2a: malicious node whose *name* is the victim's pubkey.
|
||||
// decoded_json contains pkVictim as a substring (in the name field),
|
||||
// but the actual originator is pkAttacker.
|
||||
mustExec(t, db, `INSERT INTO transmissions
|
||||
(raw_hex, hash, first_seen, route_type, payload_type, decoded_json, from_pubkey)
|
||||
VALUES ('CC','h_spoof_advert',?,1,4,
|
||||
'{"type":"ADVERT","pubKey":"`+pkAttacker+`","name":"`+pkVictim+`"}',
|
||||
?)`, now, pkAttacker)
|
||||
|
||||
// (4) Hole 2b: free-text packet (e.g. channel message) whose body
|
||||
// coincidentally contains the victim's pubkey as a substring.
|
||||
// Real originator is pkAttacker; from_pubkey reflects that.
|
||||
mustExec(t, db, `INSERT INTO transmissions
|
||||
(raw_hex, hash, first_seen, route_type, payload_type, decoded_json, from_pubkey)
|
||||
VALUES ('DD','h_freetext_msg',?,1,5,
|
||||
'{"type":"GRP_TXT","text":"hello `+pkVictim+` how are you"}',
|
||||
?)`, now, pkAttacker)
|
||||
|
||||
return pkVictim
|
||||
}
|
||||
|
||||
func mustExec(t *testing.T, db *DB, q string, args ...interface{}) {
|
||||
t.Helper()
|
||||
if _, err := db.conn.Exec(q, args...); err != nil {
|
||||
t.Fatalf("exec failed: %v\nquery: %s", err, q)
|
||||
}
|
||||
}
|
||||
|
||||
func hashesOf(rows []map[string]interface{}) []string {
|
||||
out := make([]string, 0, len(rows))
|
||||
for _, r := range rows {
|
||||
if h, ok := r["hash"].(string); ok {
|
||||
out = append(out, h)
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func TestRecentTransmissions_Hole1_SameNameDifferentPubkey(t *testing.T) {
|
||||
db := setupTestDB(t)
|
||||
defer db.Close()
|
||||
victim := seedAttribution(t, db)
|
||||
|
||||
got, err := db.GetRecentTransmissionsForNode(victim, 20)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
hashes := hashesOf(got)
|
||||
for _, h := range hashes {
|
||||
if h == "h_namespoof_advert" {
|
||||
t.Fatalf("Hole 1: same-name node was attributed to the victim. got hashes=%v", hashes)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRecentTransmissions_Hole2a_PubkeyAsNameSpoof(t *testing.T) {
|
||||
db := setupTestDB(t)
|
||||
defer db.Close()
|
||||
victim := seedAttribution(t, db)
|
||||
|
||||
got, err := db.GetRecentTransmissionsForNode(victim, 20)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
hashes := hashesOf(got)
|
||||
for _, h := range hashes {
|
||||
if h == "h_spoof_advert" {
|
||||
t.Fatalf("Hole 2a: attacker who named themselves with victim's pubkey "+
|
||||
"was attributed to the victim. got hashes=%v", hashes)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRecentTransmissions_Hole2b_FreeTextHexFalsePositive(t *testing.T) {
|
||||
db := setupTestDB(t)
|
||||
defer db.Close()
|
||||
victim := seedAttribution(t, db)
|
||||
|
||||
got, err := db.GetRecentTransmissionsForNode(victim, 20)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
hashes := hashesOf(got)
|
||||
for _, h := range hashes {
|
||||
if h == "h_freetext_msg" {
|
||||
t.Fatalf("Hole 2b: free-text containing the victim's pubkey as a "+
|
||||
"substring produced a false positive. got hashes=%v", hashes)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRecentTransmissions_LegitimateAdvertReturned(t *testing.T) {
|
||||
db := setupTestDB(t)
|
||||
defer db.Close()
|
||||
victim := seedAttribution(t, db)
|
||||
|
||||
got, err := db.GetRecentTransmissionsForNode(victim, 20)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
hashes := hashesOf(got)
|
||||
found := false
|
||||
for _, h := range hashes {
|
||||
if h == "h_victim_advert" {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Fatalf("expected legitimate victim advert (h_victim_advert) in result, got %v", hashes)
|
||||
}
|
||||
}
|
||||
|
||||
// --- Multi-pubkey OR query (#1143 — db.go:1785) ---
|
||||
|
||||
func TestQueryMultiNodePackets_ExactMatchOnly(t *testing.T) {
|
||||
db := setupTestDB(t)
|
||||
defer db.Close()
|
||||
seedAttribution(t, db)
|
||||
|
||||
// Query the victim's pubkey via the multi-node API. The malicious
|
||||
// "name = victim pubkey" row and the free-text row must NOT show up.
|
||||
res, err := db.QueryMultiNodePackets([]string{pkVictim}, 50, 0, "DESC", "", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
hashes := hashesOf(res.Packets)
|
||||
for _, bad := range []string{"h_spoof_advert", "h_freetext_msg", "h_namespoof_advert"} {
|
||||
for _, h := range hashes {
|
||||
if h == bad {
|
||||
t.Fatalf("QueryMultiNodePackets returned spurious match %q (pubkey %s as substring); hashes=%v",
|
||||
bad, pkVictim, hashes)
|
||||
}
|
||||
}
|
||||
}
|
||||
// The legitimate one must still be present.
|
||||
if !contains(hashes, "h_victim_advert") {
|
||||
t.Fatalf("expected h_victim_advert in QueryMultiNodePackets result, got %v", hashes)
|
||||
}
|
||||
}
|
||||
|
||||
func contains(haystack []string, needle string) bool {
|
||||
for _, s := range haystack {
|
||||
if s == needle {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// --- Index sanity check (#1143 perf): verify EXPLAIN QUERY PLAN uses the
|
||||
// new index, not a SCAN. ---
|
||||
|
||||
func TestFromPubkeyIndexUsed(t *testing.T) {
|
||||
db := setupTestDB(t)
|
||||
defer db.Close()
|
||||
mustExec(t, db, `CREATE INDEX IF NOT EXISTS idx_transmissions_from_pubkey ON transmissions(from_pubkey)`)
|
||||
|
||||
rows, err := db.conn.Query(
|
||||
`EXPLAIN QUERY PLAN SELECT id FROM transmissions WHERE from_pubkey = ?`,
|
||||
pkVictim)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer rows.Close()
|
||||
plan := ""
|
||||
for rows.Next() {
|
||||
var id, parent, notused int
|
||||
var detail string
|
||||
if err := rows.Scan(&id, &parent, ¬used, &detail); err == nil {
|
||||
plan += detail + "\n"
|
||||
}
|
||||
}
|
||||
if !strings.Contains(plan, "idx_transmissions_from_pubkey") {
|
||||
t.Fatalf("expected EXPLAIN QUERY PLAN to use idx_transmissions_from_pubkey, got:\n%s", plan)
|
||||
}
|
||||
}
|
||||
|
||||
// TestFromPubkeyIndexUsedForInClause verifies the index is used for the
|
||||
// IN (?, ?, ...) query path used by QueryMultiNodePackets (db.go ~1787).
|
||||
// Coverage extension — the equality path is covered above; this asserts
|
||||
// the multi-node path doesn't silently regress to a full scan when the
|
||||
// planner can't use the index for set membership.
|
||||
func TestFromPubkeyIndexUsedForInClause(t *testing.T) {
|
||||
db := setupTestDB(t)
|
||||
defer db.Close()
|
||||
mustExec(t, db, `CREATE INDEX IF NOT EXISTS idx_transmissions_from_pubkey ON transmissions(from_pubkey)`)
|
||||
|
||||
rows, err := db.conn.Query(
|
||||
`EXPLAIN QUERY PLAN SELECT id FROM transmissions WHERE from_pubkey IN (?, ?)`,
|
||||
pkVictim, pkOther)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer rows.Close()
|
||||
plan := ""
|
||||
for rows.Next() {
|
||||
var id, parent, notused int
|
||||
var detail string
|
||||
if err := rows.Scan(&id, &parent, ¬used, &detail); err == nil {
|
||||
plan += detail + "\n"
|
||||
}
|
||||
}
|
||||
if !strings.Contains(plan, "idx_transmissions_from_pubkey") {
|
||||
t.Fatalf("expected EXPLAIN QUERY PLAN for IN(...) to use idx_transmissions_from_pubkey, got:\n%s", plan)
|
||||
}
|
||||
}
|
||||
|
||||
// --- Migration / backfill ---
|
||||
|
||||
func TestBackfillFromPubkey_AdvertRowsPopulated(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
dbPath := dir + "/test.db"
|
||||
|
||||
// Create a legacy-style DB: transmissions table WITHOUT from_pubkey,
|
||||
// then run ensureFromPubkeyColumn to ALTER it in.
|
||||
rw, err := sql.Open("sqlite", dbPath)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := rw.Exec(`CREATE TABLE transmissions (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
raw_hex TEXT, hash TEXT UNIQUE, first_seen TEXT,
|
||||
route_type INTEGER, payload_type INTEGER, payload_version INTEGER,
|
||||
decoded_json TEXT, created_at TEXT
|
||||
)`); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Two ADVERTs (different pubkeys) and a non-ADVERT.
|
||||
if _, err := rw.Exec(`INSERT INTO transmissions (raw_hex, hash, first_seen, payload_type, decoded_json) VALUES
|
||||
('AA','m1','2026-01-01T00:00:00Z',4,'{"type":"ADVERT","pubKey":"`+pkVictim+`","name":"V"}'),
|
||||
('BB','m2','2026-01-01T00:00:00Z',4,'{"type":"ADVERT","pubKey":"`+pkOther+`","name":"O"}'),
|
||||
('CC','m3','2026-01-01T00:00:00Z',5,'{"type":"GRP_TXT","text":"hi"}')`); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
rw.Close()
|
||||
|
||||
if err := ensureFromPubkeyColumn(dbPath); err != nil {
|
||||
t.Fatalf("ensureFromPubkeyColumn: %v", err)
|
||||
}
|
||||
|
||||
// Run synchronously by calling the function directly.
|
||||
backfillFromPubkeyAsync(dbPath, 100, 0)
|
||||
|
||||
// Verify backfill populated the ADVERT rows.
|
||||
rw2, err := sql.Open("sqlite", dbPath)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer rw2.Close()
|
||||
rows, err := rw2.Query("SELECT hash, from_pubkey FROM transmissions ORDER BY hash")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer rows.Close()
|
||||
got := map[string]string{}
|
||||
for rows.Next() {
|
||||
var h string
|
||||
var pk sql.NullString
|
||||
if err := rows.Scan(&h, &pk); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
got[h] = pk.String
|
||||
}
|
||||
if got["m1"] != pkVictim {
|
||||
t.Errorf("m1 from_pubkey = %q, want %q", got["m1"], pkVictim)
|
||||
}
|
||||
if got["m2"] != pkOther {
|
||||
t.Errorf("m2 from_pubkey = %q, want %q", got["m2"], pkOther)
|
||||
}
|
||||
// Non-ADVERT row was not in the backfill scope; from_pubkey stays NULL.
|
||||
if got["m3"] != "" {
|
||||
t.Errorf("m3 from_pubkey = %q, want empty (NULL)", got["m3"])
|
||||
}
|
||||
}
|
||||
|
||||
// TestBackfillFromPubkey_DoesNotBlockBoot exercises the async contract:
|
||||
// main.go (cmd/server/main.go) calls startFromPubkeyBackfill, which is the
|
||||
// SAME entry point used at production startup. The wrapper must dispatch
|
||||
// the backfill in a goroutine; if anyone removes the `go` keyword inside
|
||||
// startFromPubkeyBackfill, this test fails because the call no longer
|
||||
// returns within the 50ms boot dispatch budget. The test does NOT use `go`
|
||||
// itself — that would test only the test's own scheduler, not the
|
||||
// production code path (cycle-3 M1c).
|
||||
//
|
||||
// DO NOT t.Parallel — uses package-global atomics
|
||||
// (fromPubkeyBackfillTotal/Processed/Done). Concurrent tests would clobber
|
||||
// the resets (cycle-3 m1c).
|
||||
func TestBackfillFromPubkey_DoesNotBlockBoot(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
dbPath := dir + "/async_boot.db"
|
||||
|
||||
rw, err := sql.Open("sqlite", dbPath)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := rw.Exec(`CREATE TABLE transmissions (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
raw_hex TEXT, hash TEXT UNIQUE, first_seen TEXT,
|
||||
route_type INTEGER, payload_type INTEGER, payload_version INTEGER,
|
||||
decoded_json TEXT, created_at TEXT
|
||||
)`); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Insert N=1000 legacy ADVERT rows. With chunkSize=100 + yield=100ms
|
||||
// between chunks, sync would be ~900ms; we assert dispatch is <50ms.
|
||||
tx, err := rw.Begin()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
stmt, err := tx.Prepare(`INSERT INTO transmissions
|
||||
(raw_hex, hash, first_seen, payload_type, decoded_json) VALUES (?, ?, ?, 4, ?)`)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
const N = 1000
|
||||
for i := 0; i < N; i++ {
|
||||
hash := fmt.Sprintf("h_async_boot_%d", i)
|
||||
dj := fmt.Sprintf(`{"type":"ADVERT","pubKey":"%s","name":"N%d"}`, pkVictim, i)
|
||||
if _, err := stmt.Exec("AA", hash, "2026-01-01T00:00:00Z", dj); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
stmt.Close()
|
||||
if err := tx.Commit(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
rw.Close()
|
||||
|
||||
if err := ensureFromPubkeyColumn(dbPath); err != nil {
|
||||
t.Fatalf("ensureFromPubkeyColumn: %v", err)
|
||||
}
|
||||
|
||||
// Reset all backfill state — other tests may have set it.
|
||||
fromPubkeyBackfillReset()
|
||||
defer fromPubkeyBackfillReset()
|
||||
|
||||
// Dispatch via the production wrapper. startFromPubkeyBackfill is the
|
||||
// same entry point main.go calls at boot; it must launch the backfill
|
||||
// in a goroutine internally. We deliberately do NOT prefix `go` here —
|
||||
// if the wrapper is ever made synchronous, the dispatch budget below
|
||||
// fires first.
|
||||
t0 := time.Now()
|
||||
startFromPubkeyBackfill(dbPath, 100, 100*time.Millisecond)
|
||||
dispatchElapsed := time.Since(t0)
|
||||
|
||||
// (a) Boot-time dispatch budget: must return ~immediately.
|
||||
if dispatchElapsed > 50*time.Millisecond {
|
||||
t.Fatalf("backfill dispatch took %v (>50ms): not async — would block boot", dispatchElapsed)
|
||||
}
|
||||
|
||||
// (b) Eventual completion via the fromPubkeyBackfill snapshot.
|
||||
deadline := time.Now().Add(30 * time.Second)
|
||||
for time.Now().Before(deadline) {
|
||||
if _, _, done := fromPubkeyBackfillSnapshot(); done {
|
||||
break
|
||||
}
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
}
|
||||
if _, _, done := fromPubkeyBackfillSnapshot(); !done {
|
||||
t.Fatalf("backfill never flipped Done within 30s; dispatched=%v", dispatchElapsed)
|
||||
}
|
||||
|
||||
// (c) Backfill actually populated rows.
|
||||
rw2, err := sql.Open("sqlite", dbPath)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer rw2.Close()
|
||||
var nullCount int
|
||||
if err := rw2.QueryRow(
|
||||
`SELECT COUNT(*) FROM transmissions WHERE payload_type = 4 AND from_pubkey IS NULL`,
|
||||
).Scan(&nullCount); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if nullCount > 0 {
|
||||
t.Errorf("backfill left %d ADVERT rows with NULL from_pubkey", nullCount)
|
||||
}
|
||||
if _, processed, _ := fromPubkeyBackfillSnapshot(); processed != int64(N) {
|
||||
t.Errorf("fromPubkeyBackfillProcessed = %d, want %d", processed, N)
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,261 @@
|
||||
package main
|
||||
|
||||
// from_pubkey migration (#1143).
|
||||
//
|
||||
// Adds the `transmissions.from_pubkey` column + index, and provides an async
|
||||
// backfill that populates the column from `decoded_json` for ADVERT packets
|
||||
// whose `from_pubkey` is still NULL.
|
||||
//
|
||||
// Why a column at all: the legacy attribution path used
|
||||
// `WHERE decoded_json LIKE '%pubkey%'` (and `OR LIKE '%name%'`). This is
|
||||
// structurally unsound (adversarial spoofing + accidental hex-substring
|
||||
// false positives + full table scan). The column gives us exact match,
|
||||
// O(log n) lookups, and an explicit, auditable attribution surface.
|
||||
//
|
||||
// Backfill is run async (best-effort) so it cannot block server startup
|
||||
// even on prod-sized DBs (100K+ transmissions). Queries handle NULL
|
||||
// gracefully (return empty for that pubkey, same as today's behaviour
|
||||
// for unknown pubkeys).
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// ensureFromPubkeyColumn adds the from_pubkey column + index to the
|
||||
// transmissions table if missing. Safe to call repeatedly.
|
||||
func ensureFromPubkeyColumn(dbPath string) error {
|
||||
rw, err := cachedRW(dbPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
has, err := tableHasColumn(rw, "transmissions", "from_pubkey")
|
||||
if err != nil {
|
||||
return fmt.Errorf("inspect transmissions: %w", err)
|
||||
}
|
||||
if !has {
|
||||
if _, err := rw.Exec("ALTER TABLE transmissions ADD COLUMN from_pubkey TEXT"); err != nil {
|
||||
return fmt.Errorf("add from_pubkey column: %w", err)
|
||||
}
|
||||
log.Println("[store] Added from_pubkey column to transmissions (#1143)")
|
||||
}
|
||||
|
||||
if _, err := rw.Exec("CREATE INDEX IF NOT EXISTS idx_transmissions_from_pubkey ON transmissions(from_pubkey)"); err != nil {
|
||||
return fmt.Errorf("create idx_transmissions_from_pubkey: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// fromPubkeyBackfillProgress reports backfill state for /api/healthz.
|
||||
// All three values are read together via fromPubkeyBackfillSnapshot()
|
||||
// under a single RWMutex so /api/healthz never sees a torn snapshot
|
||||
// (e.g. done=true with processed<total). Updates use the Set/Mark
|
||||
// helpers which take the write lock.
|
||||
//
|
||||
// Cycle-3 m2c: previously these were independent atomic.{Int64,Bool};
|
||||
// healthz read each one separately and could observe an interleaved
|
||||
// write between Loads. The mutex-guarded snapshot fixes that.
|
||||
var (
|
||||
fromPubkeyBackfillMu sync.RWMutex
|
||||
fromPubkeyBackfillTotal int64
|
||||
fromPubkeyBackfillProcessed int64
|
||||
fromPubkeyBackfillDone bool
|
||||
)
|
||||
|
||||
// fromPubkeyBackfillSnapshot returns a consistent snapshot of all three
|
||||
// backfill progress fields under a single read lock.
|
||||
func fromPubkeyBackfillSnapshot() (total, processed int64, done bool) {
|
||||
fromPubkeyBackfillMu.RLock()
|
||||
defer fromPubkeyBackfillMu.RUnlock()
|
||||
return fromPubkeyBackfillTotal, fromPubkeyBackfillProcessed, fromPubkeyBackfillDone
|
||||
}
|
||||
|
||||
func fromPubkeyBackfillSetTotal(v int64) {
|
||||
fromPubkeyBackfillMu.Lock()
|
||||
fromPubkeyBackfillTotal = v
|
||||
fromPubkeyBackfillMu.Unlock()
|
||||
}
|
||||
|
||||
func fromPubkeyBackfillSetProcessed(v int64) {
|
||||
fromPubkeyBackfillMu.Lock()
|
||||
fromPubkeyBackfillProcessed = v
|
||||
fromPubkeyBackfillMu.Unlock()
|
||||
}
|
||||
|
||||
func fromPubkeyBackfillMarkDone() {
|
||||
fromPubkeyBackfillMu.Lock()
|
||||
fromPubkeyBackfillDone = true
|
||||
fromPubkeyBackfillMu.Unlock()
|
||||
}
|
||||
|
||||
// fromPubkeyBackfillReset zeroes all three fields atomically. Used by
|
||||
// tests; never called from production code.
|
||||
func fromPubkeyBackfillReset() {
|
||||
fromPubkeyBackfillMu.Lock()
|
||||
fromPubkeyBackfillTotal = 0
|
||||
fromPubkeyBackfillProcessed = 0
|
||||
fromPubkeyBackfillDone = false
|
||||
fromPubkeyBackfillMu.Unlock()
|
||||
}
|
||||
|
||||
// startFromPubkeyBackfill is the production entry point used by main.go to
|
||||
// launch the backfill so it cannot block startup. It MUST dispatch the
|
||||
// backfill in a goroutine; the dispatch path is gated by
|
||||
// TestBackfillFromPubkey_DoesNotBlockBoot — if the `go` keyword below is ever
|
||||
// removed, that test fails because dispatch becomes synchronous and exceeds
|
||||
// the 50ms boot budget.
|
||||
func startFromPubkeyBackfill(dbPath string, chunkSize int, yieldDuration time.Duration) {
|
||||
// MUST stay `go` — TestBackfillFromPubkey_DoesNotBlockBoot fails if
|
||||
// this becomes synchronous (boot dispatch budget exceeds 50ms).
|
||||
go backfillFromPubkeyAsync(dbPath, chunkSize, yieldDuration)
|
||||
}
|
||||
|
||||
// backfillFromPubkeyAsync scans transmissions where from_pubkey IS NULL and
|
||||
// populates from_pubkey by parsing decoded_json. Runs in chunks with a
|
||||
// short yield between chunks so it can't starve other writers.
|
||||
//
|
||||
// Strategy:
|
||||
// - ADVERT (payload_type = 4) -> decoded_json.pubKey
|
||||
// - other types -> leave NULL (queries handle NULL gracefully)
|
||||
//
|
||||
// chunkSize and yieldDuration are tunable for tests.
|
||||
func backfillFromPubkeyAsync(dbPath string, chunkSize int, yieldDuration time.Duration) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
log.Printf("[store] backfillFromPubkeyAsync panic recovered: %v", r)
|
||||
}
|
||||
fromPubkeyBackfillMarkDone()
|
||||
}()
|
||||
|
||||
if chunkSize <= 0 {
|
||||
chunkSize = 5000
|
||||
}
|
||||
|
||||
rw, err := cachedRW(dbPath)
|
||||
if err != nil {
|
||||
log.Printf("[store] from_pubkey backfill: open rw error: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
var total int64
|
||||
if err := rw.QueryRow(
|
||||
"SELECT COUNT(*) FROM transmissions WHERE from_pubkey IS NULL AND payload_type = 4",
|
||||
).Scan(&total); err != nil {
|
||||
log.Printf("[store] from_pubkey backfill: count error: %v", err)
|
||||
return
|
||||
}
|
||||
fromPubkeyBackfillSetTotal(total)
|
||||
if total == 0 {
|
||||
log.Println("[store] from_pubkey backfill: nothing to do")
|
||||
return
|
||||
}
|
||||
log.Printf("[store] from_pubkey backfill starting: %d ADVERT rows", total)
|
||||
|
||||
updateStmt, err := rw.Prepare("UPDATE transmissions SET from_pubkey = ? WHERE id = ?")
|
||||
if err != nil {
|
||||
log.Printf("[store] from_pubkey backfill: prepare update: %v", err)
|
||||
return
|
||||
}
|
||||
defer updateStmt.Close()
|
||||
|
||||
var processed int64
|
||||
for {
|
||||
rows, err := rw.Query(
|
||||
"SELECT id, decoded_json FROM transmissions WHERE from_pubkey IS NULL AND payload_type = 4 LIMIT ?",
|
||||
chunkSize)
|
||||
if err != nil {
|
||||
log.Printf("[store] from_pubkey backfill: select error: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
type row struct {
|
||||
id int64
|
||||
pk string
|
||||
}
|
||||
batch := make([]row, 0, chunkSize)
|
||||
for rows.Next() {
|
||||
var id int64
|
||||
var dj sql.NullString
|
||||
if err := rows.Scan(&id, &dj); err != nil {
|
||||
continue
|
||||
}
|
||||
pk := extractPubkeyFromAdvertJSON(dj.String)
|
||||
batch = append(batch, row{id: id, pk: pk})
|
||||
}
|
||||
rows.Close()
|
||||
|
||||
if len(batch) == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
// Apply updates in a single tx for throughput.
|
||||
tx, err := rw.Begin()
|
||||
if err != nil {
|
||||
log.Printf("[store] from_pubkey backfill: begin tx: %v", err)
|
||||
return
|
||||
}
|
||||
txStmt := tx.Stmt(updateStmt)
|
||||
for _, b := range batch {
|
||||
// Sentinel convention for transmissions.from_pubkey (#1143, m5):
|
||||
// NULL — row has not yet been scanned by this backfill.
|
||||
// "" — scanned, no extractable pubkey (malformed/legacy ADVERT
|
||||
// decoded_json, or a JSON shape we don't understand).
|
||||
// hex — scanned, pubkey successfully extracted.
|
||||
//
|
||||
// The "" sentinel exists ONLY in this backfill path: it's how we
|
||||
// avoid the #1119 infinite-rescan loop (the WHERE clause is
|
||||
// `from_pubkey IS NULL`, so once we mark a row "" it never matches
|
||||
// again). The ingest write path (cmd/ingestor/db.go ~1289) leaves
|
||||
// from_pubkey NULL when PubKey is empty; the two states are
|
||||
// semantically equivalent ("we have no pubkey for this row") and
|
||||
// all attribution call sites query `from_pubkey = ?` with a real
|
||||
// pubkey, so neither NULL nor "" matches — no UX divergence.
|
||||
var val interface{}
|
||||
if b.pk != "" {
|
||||
val = b.pk
|
||||
} else {
|
||||
val = "" // scanned, no extractable pubkey — see comment above
|
||||
}
|
||||
if _, err := txStmt.Exec(val, b.id); err != nil {
|
||||
// non-fatal; log first failure per chunk and keep going
|
||||
log.Printf("[store] from_pubkey backfill: update id=%d: %v", b.id, err)
|
||||
}
|
||||
}
|
||||
if err := tx.Commit(); err != nil {
|
||||
log.Printf("[store] from_pubkey backfill: commit: %v", err)
|
||||
return
|
||||
}
|
||||
processed += int64(len(batch))
|
||||
fromPubkeyBackfillSetProcessed(processed)
|
||||
|
||||
if len(batch) < chunkSize {
|
||||
break
|
||||
}
|
||||
if yieldDuration > 0 {
|
||||
time.Sleep(yieldDuration)
|
||||
}
|
||||
}
|
||||
log.Printf("[store] from_pubkey backfill complete: %d rows processed", processed)
|
||||
}
|
||||
|
||||
// extractPubkeyFromAdvertJSON parses an ADVERT decoded_json blob and returns
|
||||
// the pubKey field, or "" if absent/invalid. Lenient: any parse error yields
|
||||
// the empty string rather than a panic.
|
||||
func extractPubkeyFromAdvertJSON(s string) string {
|
||||
if s == "" {
|
||||
return ""
|
||||
}
|
||||
var m map[string]interface{}
|
||||
if err := json.Unmarshal([]byte(s), &m); err != nil {
|
||||
return ""
|
||||
}
|
||||
if v, ok := m["pubKey"].(string); ok {
|
||||
return v
|
||||
}
|
||||
return ""
|
||||
}
|
||||
@@ -18,6 +18,14 @@ require github.com/meshcore-analyzer/packetpath v0.0.0
|
||||
|
||||
replace github.com/meshcore-analyzer/packetpath => ../../internal/packetpath
|
||||
|
||||
require github.com/meshcore-analyzer/dbconfig v0.0.0
|
||||
|
||||
replace github.com/meshcore-analyzer/dbconfig => ../../internal/dbconfig
|
||||
|
||||
require github.com/meshcore-analyzer/perfio v0.0.0
|
||||
|
||||
replace github.com/meshcore-analyzer/perfio => ../../internal/perfio
|
||||
|
||||
require (
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
|
||||
@@ -34,10 +34,22 @@ func (s *Server) handleHealthz(w http.ResponseWriter, r *http.Request) {
|
||||
s.store.mu.RUnlock()
|
||||
}
|
||||
|
||||
// #1143 (M2): expose from_pubkey backfill progress so operators can
|
||||
// see whether the legacy ADVERT backfill is still running. NULL rows
|
||||
// produce empty attribution results during the in-flight window.
|
||||
// Cycle-3 m2c: snapshot all three fields under a single read lock so
|
||||
// /api/healthz never observes a torn state (e.g. done=true with
|
||||
// processed<total).
|
||||
bfTotal, bfProcessed, bfDone := fromPubkeyBackfillSnapshot()
|
||||
w.WriteHeader(http.StatusOK)
|
||||
json.NewEncoder(w).Encode(map[string]interface{}{
|
||||
"ready": true,
|
||||
"loadedTx": loadedTx,
|
||||
"loadedObs": loadedObs,
|
||||
"from_pubkey_backfill": map[string]interface{}{
|
||||
"total": bfTotal,
|
||||
"processed": bfProcessed,
|
||||
"done": bfDone,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -2,9 +2,12 @@ package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestHealthzNotReady(t *testing.T) {
|
||||
@@ -78,3 +81,151 @@ func TestHealthzAntiTautology(t *testing.T) {
|
||||
t.Fatal("anti-tautology: handler returned 200 when readiness=0; gating is broken")
|
||||
}
|
||||
}
|
||||
|
||||
// TestHealthzExposesFromPubkeyBackfill verifies the from_pubkey backfill
|
||||
// progress (#1143, M2) is observable via /api/healthz. The atomics are
|
||||
// updated by backfillFromPubkeyAsync; without exposure here they were dead
|
||||
// code. Asserts the response includes a from_pubkey_backfill object with
|
||||
// total/processed/done fields.
|
||||
func TestHealthzExposesFromPubkeyBackfill(t *testing.T) {
|
||||
readiness.Store(1)
|
||||
defer readiness.Store(0)
|
||||
|
||||
// Set known values so we can assert wiring (not just presence).
|
||||
fromPubkeyBackfillReset()
|
||||
fromPubkeyBackfillSetTotal(7)
|
||||
fromPubkeyBackfillSetProcessed(3)
|
||||
defer fromPubkeyBackfillReset()
|
||||
|
||||
srv := &Server{store: &PacketStore{}}
|
||||
req := httptest.NewRequest("GET", "/api/healthz", nil)
|
||||
w := httptest.NewRecorder()
|
||||
srv.handleHealthz(w, req)
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("expected 200, got %d", w.Code)
|
||||
}
|
||||
var resp map[string]interface{}
|
||||
if err := json.Unmarshal(w.Body.Bytes(), &resp); err != nil {
|
||||
t.Fatalf("invalid JSON: %v", err)
|
||||
}
|
||||
bf, ok := resp["from_pubkey_backfill"].(map[string]interface{})
|
||||
if !ok {
|
||||
t.Fatalf("missing from_pubkey_backfill object in healthz response: %v", resp)
|
||||
}
|
||||
if got, want := bf["total"], float64(7); got != want {
|
||||
t.Errorf("from_pubkey_backfill.total = %v, want %v", got, want)
|
||||
}
|
||||
if got, want := bf["processed"], float64(3); got != want {
|
||||
t.Errorf("from_pubkey_backfill.processed = %v, want %v", got, want)
|
||||
}
|
||||
if got, want := bf["done"], false; got != want {
|
||||
t.Errorf("from_pubkey_backfill.done = %v, want %v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
// TestHealthzFromPubkeyBackfillConsistentSnapshot exercises cycle-3 m2c:
|
||||
// the handler used to read three independent atomics (Total/Processed/Done)
|
||||
// in sequence, so a backfill update interleaved between reads could yield
|
||||
// an inconsistent snapshot (e.g. done=true with processed<total, or
|
||||
// processed>total when total is updated last). This test races concurrent
|
||||
// progress updates against many healthz reads and asserts every snapshot
|
||||
// satisfies the invariants:
|
||||
//
|
||||
// processed <= total
|
||||
// if done: processed == total (or both 0 — nothing to do)
|
||||
//
|
||||
// With the pre-fix code (separate atomic.Load calls), this fires within
|
||||
// a few hundred iterations on a multi-core box. With the RWMutex-guarded
|
||||
// snapshot, it never fires.
|
||||
func TestHealthzFromPubkeyBackfillConsistentSnapshot(t *testing.T) {
|
||||
readiness.Store(1)
|
||||
defer readiness.Store(0)
|
||||
defer fromPubkeyBackfillReset()
|
||||
|
||||
srv := &Server{store: &PacketStore{}}
|
||||
|
||||
stop := make(chan struct{})
|
||||
var writerWg sync.WaitGroup
|
||||
var readerWg sync.WaitGroup
|
||||
|
||||
// Writer: simulates the backfill loop — sets total, then increments
|
||||
// processed in lock-step, occasionally finishing (done=true with
|
||||
// processed==total). Each "tick" mutates all three values.
|
||||
writerWg.Add(1)
|
||||
go func() {
|
||||
defer writerWg.Done()
|
||||
for {
|
||||
select {
|
||||
case <-stop:
|
||||
return
|
||||
default:
|
||||
}
|
||||
fromPubkeyBackfillSetTotal(100)
|
||||
for p := int64(0); p <= 100; p++ {
|
||||
select {
|
||||
case <-stop:
|
||||
return
|
||||
default:
|
||||
}
|
||||
fromPubkeyBackfillSetProcessed(p)
|
||||
}
|
||||
fromPubkeyBackfillMarkDone()
|
||||
fromPubkeyBackfillReset()
|
||||
}
|
||||
}()
|
||||
|
||||
// Readers: hammer healthz, assert invariants on each response.
|
||||
const readers = 8
|
||||
const reads = 200
|
||||
errs := make(chan string, readers*reads)
|
||||
for i := 0; i < readers; i++ {
|
||||
readerWg.Add(1)
|
||||
go func() {
|
||||
defer readerWg.Done()
|
||||
for j := 0; j < reads; j++ {
|
||||
req := httptest.NewRequest("GET", "/api/healthz", nil)
|
||||
w := httptest.NewRecorder()
|
||||
srv.handleHealthz(w, req)
|
||||
var resp map[string]interface{}
|
||||
if err := json.Unmarshal(w.Body.Bytes(), &resp); err != nil {
|
||||
errs <- "invalid JSON: " + err.Error()
|
||||
return
|
||||
}
|
||||
bf, _ := resp["from_pubkey_backfill"].(map[string]interface{})
|
||||
total, _ := bf["total"].(float64)
|
||||
processed, _ := bf["processed"].(float64)
|
||||
done, _ := bf["done"].(bool)
|
||||
if processed > total {
|
||||
errs <- "processed>total snapshot: processed=" + ftoa(processed) + " total=" + ftoa(total)
|
||||
return
|
||||
}
|
||||
if done && processed != total {
|
||||
errs <- "done=true but processed!=total: processed=" + ftoa(processed) + " total=" + ftoa(total)
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Wait for readers to complete (bounded by 'reads' iterations), then
|
||||
// stop the writer and drain.
|
||||
readerDone := make(chan struct{})
|
||||
go func() { readerWg.Wait(); close(readerDone) }()
|
||||
select {
|
||||
case <-readerDone:
|
||||
case <-time.After(5 * time.Second):
|
||||
close(stop)
|
||||
writerWg.Wait()
|
||||
t.Fatal("timed out waiting for reader goroutines")
|
||||
}
|
||||
close(stop)
|
||||
writerWg.Wait()
|
||||
|
||||
close(errs)
|
||||
for e := range errs {
|
||||
t.Errorf("inconsistent snapshot: %s", e)
|
||||
}
|
||||
}
|
||||
|
||||
func ftoa(f float64) string { return fmt.Sprintf("%g", f) }
|
||||
|
||||
@@ -0,0 +1,147 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// TestIssue804_AnalyticsAttributesByRepeaterRegion verifies that analytics
|
||||
// (specifically GetAnalyticsHashSizes) attribute multi-byte nodes to the
|
||||
// REPEATER's home region, not the observer that happened to hear the relay.
|
||||
//
|
||||
// Scenario from #804:
|
||||
// - PDX-Repeater is a multi-byte (hashSize=2) repeater whose ZERO-HOP direct
|
||||
// adverts are only heard by obs-PDX (a PDX observer). That zero-hop direct
|
||||
// advert is the most reliable home-region signal — it cannot have been
|
||||
// relayed.
|
||||
// - A flood advert from PDX-Repeater (hashSize=2) propagates and is heard by
|
||||
// obs-SJC (a SJC observer) via a multi-hop relay path.
|
||||
// - When the user asks for region=SJC analytics, the PDX-Repeater MUST NOT
|
||||
// pollute SJC's multiByteNodes — it lives in PDX.
|
||||
// - The result should also expose attributionMethod="repeater" so the API
|
||||
// consumer knows which method was used.
|
||||
//
|
||||
// Pre-fix behavior: PDX-Repeater appears in SJC's multiByteNodes because the
|
||||
// filter is observer-based. This test fails on the pre-fix code at the
|
||||
// "want PDX-Repeater EXCLUDED" assertion.
|
||||
func TestIssue804_AnalyticsAttributesByRepeaterRegion(t *testing.T) {
|
||||
db := setupTestDB(t)
|
||||
defer db.Close()
|
||||
|
||||
now := time.Now().UTC()
|
||||
recent := now.Add(-1 * time.Hour).Format(time.RFC3339)
|
||||
recentEpoch := now.Add(-1 * time.Hour).Unix()
|
||||
|
||||
// Observers: one in PDX, one in SJC
|
||||
db.conn.Exec(`INSERT INTO observers (id, name, iata, last_seen, first_seen, packet_count)
|
||||
VALUES ('obs-pdx', 'Obs PDX', 'PDX', ?, '2026-01-01T00:00:00Z', 100)`, recent)
|
||||
db.conn.Exec(`INSERT INTO observers (id, name, iata, last_seen, first_seen, packet_count)
|
||||
VALUES ('obs-sjc', 'Obs SJC', 'SJC', ?, '2026-01-01T00:00:00Z', 100)`, recent)
|
||||
|
||||
// PDX-Repeater node (lives in Portland)
|
||||
pdxPK := "pdx0000000000001"
|
||||
db.conn.Exec(`INSERT INTO nodes (public_key, name, role)
|
||||
VALUES (?, 'PDX-Repeater', 'repeater')`, pdxPK)
|
||||
|
||||
// SJC-Repeater node (lives in San Jose) — sanity baseline
|
||||
sjcPK := "sjc0000000000001"
|
||||
db.conn.Exec(`INSERT INTO nodes (public_key, name, role)
|
||||
VALUES (?, 'SJC-Repeater', 'repeater')`, sjcPK)
|
||||
|
||||
pdxDecoded := `{"pubKey":"` + pdxPK + `","name":"PDX-Repeater","type":"ADVERT","flags":{"isRepeater":true}}`
|
||||
sjcDecoded := `{"pubKey":"` + sjcPK + `","name":"SJC-Repeater","type":"ADVERT","flags":{"isRepeater":true}}`
|
||||
|
||||
// 1) PDX-Repeater zero-hop DIRECT advert heard only by obs-PDX.
|
||||
// Establishes PDX as the repeater's home region.
|
||||
// raw_hex header 0x12 = route_type 2 (direct), payload_type 4
|
||||
// pathByte 0x40 (hashSize bits=01 → 2, hop_count=0)
|
||||
db.conn.Exec(`INSERT INTO transmissions (raw_hex, hash, first_seen, route_type, payload_type, decoded_json)
|
||||
VALUES ('1240aabbccdd', 'pdx_zh_direct', ?, 2, 4, ?)`, recent, pdxDecoded)
|
||||
db.conn.Exec(`INSERT INTO observations (transmission_id, observer_idx, snr, rssi, path_json, timestamp)
|
||||
VALUES (1, 1, 12.0, -85, '[]', ?)`, recentEpoch)
|
||||
|
||||
// 2) PDX-Repeater FLOOD advert with hashSize=2 (reliable).
|
||||
// Heard ONLY by obs-SJC via a relay path (this is the polluting case).
|
||||
// raw_hex header 0x11 = route_type 1 (flood), payload_type 4
|
||||
// pathByte 0x41 (hashSize bits=01 → 2, hop_count=1)
|
||||
db.conn.Exec(`INSERT INTO transmissions (raw_hex, hash, first_seen, route_type, payload_type, decoded_json)
|
||||
VALUES ('1141aabbccdd', 'pdx_flood', ?, 1, 4, ?)`, recent, pdxDecoded)
|
||||
db.conn.Exec(`INSERT INTO observations (transmission_id, observer_idx, snr, rssi, path_json, timestamp)
|
||||
VALUES (2, 2, 8.0, -95, '["aa11"]', ?)`, recentEpoch)
|
||||
|
||||
// 3) SJC-Repeater zero-hop DIRECT advert heard only by obs-SJC.
|
||||
// Establishes SJC as the repeater's home region.
|
||||
db.conn.Exec(`INSERT INTO transmissions (raw_hex, hash, first_seen, route_type, payload_type, decoded_json)
|
||||
VALUES ('1240ccddeeff', 'sjc_zh_direct', ?, 2, 4, ?)`, recent, sjcDecoded)
|
||||
db.conn.Exec(`INSERT INTO observations (transmission_id, observer_idx, snr, rssi, path_json, timestamp)
|
||||
VALUES (3, 2, 14.0, -82, '[]', ?)`, recentEpoch)
|
||||
|
||||
// 4) SJC-Repeater FLOOD advert with hashSize=2, heard by obs-SJC.
|
||||
db.conn.Exec(`INSERT INTO transmissions (raw_hex, hash, first_seen, route_type, payload_type, decoded_json)
|
||||
VALUES ('1141ccddeeff', 'sjc_flood', ?, 1, 4, ?)`, recent, sjcDecoded)
|
||||
db.conn.Exec(`INSERT INTO observations (transmission_id, observer_idx, snr, rssi, path_json, timestamp)
|
||||
VALUES (4, 2, 11.0, -88, '["cc22"]', ?)`, recentEpoch)
|
||||
|
||||
store := NewPacketStore(db, nil)
|
||||
store.Load()
|
||||
|
||||
t.Run("region=SJC excludes PDX-Repeater (heard but not home)", func(t *testing.T) {
|
||||
result := store.GetAnalyticsHashSizes("SJC")
|
||||
|
||||
mb, ok := result["multiByteNodes"].([]map[string]interface{})
|
||||
if !ok {
|
||||
t.Fatal("expected multiByteNodes slice")
|
||||
}
|
||||
|
||||
var foundPDX, foundSJC bool
|
||||
for _, n := range mb {
|
||||
pk, _ := n["pubkey"].(string)
|
||||
if pk == pdxPK {
|
||||
foundPDX = true
|
||||
}
|
||||
if pk == sjcPK {
|
||||
foundSJC = true
|
||||
}
|
||||
}
|
||||
|
||||
if foundPDX {
|
||||
t.Errorf("PDX-Repeater leaked into SJC analytics — region attribution still observer-based (#804 not fixed)")
|
||||
}
|
||||
if !foundSJC {
|
||||
t.Errorf("SJC-Repeater missing from SJC analytics — fix over-filtered")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("API exposes attributionMethod", func(t *testing.T) {
|
||||
result := store.GetAnalyticsHashSizes("SJC")
|
||||
method, ok := result["attributionMethod"].(string)
|
||||
if !ok {
|
||||
t.Fatal("expected attributionMethod string field on result")
|
||||
}
|
||||
if method != "repeater" {
|
||||
t.Errorf("attributionMethod = %q, want %q", method, "repeater")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("region=PDX excludes SJC-Repeater", func(t *testing.T) {
|
||||
result := store.GetAnalyticsHashSizes("PDX")
|
||||
mb, _ := result["multiByteNodes"].([]map[string]interface{})
|
||||
|
||||
var foundPDX, foundSJC bool
|
||||
for _, n := range mb {
|
||||
pk, _ := n["pubkey"].(string)
|
||||
if pk == pdxPK {
|
||||
foundPDX = true
|
||||
}
|
||||
if pk == sjcPK {
|
||||
foundSJC = true
|
||||
}
|
||||
}
|
||||
if !foundPDX {
|
||||
t.Errorf("PDX-Repeater missing from PDX analytics")
|
||||
}
|
||||
if foundSJC {
|
||||
t.Errorf("SJC-Repeater leaked into PDX analytics")
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -0,0 +1,63 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
)
|
||||
|
||||
// TestIssue871_NoNullHashOrTimestamp verifies that /api/packets never returns
|
||||
// packets with null/empty hash or null timestamp (issue #871).
|
||||
func TestIssue871_NoNullHashOrTimestamp(t *testing.T) {
|
||||
db := setupTestDB(t)
|
||||
seedTestData(t, db)
|
||||
|
||||
// Insert bad legacy data: packet with empty hash
|
||||
now := time.Now().UTC().Add(-30 * time.Minute).Format(time.RFC3339)
|
||||
db.conn.Exec(`INSERT INTO transmissions (raw_hex, hash, first_seen, route_type, payload_type, decoded_json)
|
||||
VALUES ('DEAD', '', ?, 1, 4, '{}')`, now)
|
||||
// Insert bad legacy data: packet with NULL first_seen (timestamp)
|
||||
db.conn.Exec(`INSERT INTO transmissions (raw_hex, hash, first_seen, route_type, payload_type, decoded_json)
|
||||
VALUES ('BEEF', 'aa11bb22cc33dd44', NULL, 1, 4, '{}')`)
|
||||
|
||||
cfg := &Config{Port: 3000}
|
||||
hub := NewHub()
|
||||
srv := NewServer(db, cfg, hub)
|
||||
store := NewPacketStore(db, nil)
|
||||
if err := store.Load(); err != nil {
|
||||
t.Fatalf("store.Load failed: %v", err)
|
||||
}
|
||||
srv.store = store
|
||||
router := mux.NewRouter()
|
||||
srv.RegisterRoutes(router)
|
||||
|
||||
req := httptest.NewRequest(http.MethodGet, "/api/packets?limit=200", nil)
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("expected 200, got %d", w.Code)
|
||||
}
|
||||
|
||||
var resp struct {
|
||||
Packets []map[string]interface{} `json:"packets"`
|
||||
}
|
||||
if err := json.NewDecoder(w.Body).Decode(&resp); err != nil {
|
||||
t.Fatalf("decode error: %v", err)
|
||||
}
|
||||
|
||||
for i, p := range resp.Packets {
|
||||
hash, _ := p["hash"]
|
||||
ts, _ := p["timestamp"]
|
||||
if hash == nil || hash == "" {
|
||||
t.Errorf("packet[%d] has null/empty hash: %v", i, p)
|
||||
}
|
||||
if ts == nil || ts == "" {
|
||||
t.Errorf("packet[%d] has null/empty timestamp: %v", i, p)
|
||||
}
|
||||
}
|
||||
}
|
||||
+45
-2
@@ -108,6 +108,25 @@ func main() {
|
||||
log.Printf("[security] WARNING: API key is weak or a known default — write endpoints are vulnerable")
|
||||
}
|
||||
|
||||
// Apply Go runtime soft memory limit (#836).
|
||||
// Honors GOMEMLIMIT if set; otherwise derives from packetStore.maxMemoryMB.
|
||||
{
|
||||
_, envSet := os.LookupEnv("GOMEMLIMIT")
|
||||
maxMB := 0
|
||||
if cfg.PacketStore != nil {
|
||||
maxMB = cfg.PacketStore.MaxMemoryMB
|
||||
}
|
||||
limit, source := applyMemoryLimit(maxMB, envSet)
|
||||
switch source {
|
||||
case "env":
|
||||
log.Printf("[memlimit] using GOMEMLIMIT from environment (%s)", os.Getenv("GOMEMLIMIT"))
|
||||
case "derived":
|
||||
log.Printf("[memlimit] derived from packetStore.maxMemoryMB=%d → %d MiB (1.5x headroom)", maxMB, limit/(1024*1024))
|
||||
default:
|
||||
log.Printf("[memlimit] no soft memory limit set (GOMEMLIMIT unset, packetStore.maxMemoryMB=0); recommend setting one to avoid container OOM-kill")
|
||||
}
|
||||
}
|
||||
|
||||
// Resolve DB path
|
||||
resolvedDB := cfg.ResolveDBPath(configDir)
|
||||
log.Printf("[config] port=%d db=%s public=%s", cfg.Port, resolvedDB, publicDir)
|
||||
@@ -180,6 +199,26 @@ func main() {
|
||||
log.Printf("[store] warning: could not add observers.inactive column: %v", err)
|
||||
}
|
||||
|
||||
// Ensure observers.last_packet_at column exists (PR #905 reads it; ingestor migration
|
||||
// adds it but server may run against DBs ingestor never touched, e.g. e2e fixture).
|
||||
if err := ensureLastPacketAtColumn(dbPath); err != nil {
|
||||
log.Printf("[store] warning: could not add observers.last_packet_at column: %v", err)
|
||||
}
|
||||
|
||||
// Ensure nodes.foreign_advert column exists (#730 reads it on every /api/nodes
|
||||
// scan; ingestor migration foreign_advert_v1 adds it but server may run against
|
||||
// DBs ingestor never touched, e.g. e2e fixture).
|
||||
if err := ensureForeignAdvertColumn(dbPath); err != nil {
|
||||
log.Printf("[store] warning: could not add nodes.foreign_advert column: %v", err)
|
||||
}
|
||||
|
||||
// Ensure transmissions.from_pubkey column + index exists (#1143). Backfill
|
||||
// for legacy NULL rows runs async after HTTP starts so it can't block boot
|
||||
// even on prod-sized DBs (100K+ transmissions).
|
||||
if err := ensureFromPubkeyColumn(dbPath); err != nil {
|
||||
log.Printf("[store] warning: could not add transmissions.from_pubkey column: %v", err)
|
||||
}
|
||||
|
||||
// Soft-delete observers that are in the blacklist (mark inactive=1) so
|
||||
// historical data from a prior unblocked window is hidden too.
|
||||
if len(cfg.ObserverBlacklist) > 0 {
|
||||
@@ -204,10 +243,9 @@ func main() {
|
||||
log.Printf("[neighbor] graph build panic recovered: %v", r)
|
||||
}
|
||||
}()
|
||||
rw, rwErr := openRW(dbPath)
|
||||
rw, rwErr := cachedRW(dbPath)
|
||||
if rwErr == nil {
|
||||
edgeCount := buildAndPersistEdges(store, rw)
|
||||
rw.Close()
|
||||
log.Printf("[neighbor] persisted %d edges", edgeCount)
|
||||
}
|
||||
built := BuildFromStore(store)
|
||||
@@ -498,6 +536,11 @@ func main() {
|
||||
|
||||
// Start async backfill in background — HTTP is now available.
|
||||
go backfillResolvedPathsAsync(store, dbPath, 5000, 100*time.Millisecond, cfg.BackfillHours())
|
||||
// #1143: backfill from_pubkey for legacy ADVERT rows. Async so even
|
||||
// 100K+ rows can't block boot; queries handle NULL gracefully.
|
||||
// startFromPubkeyBackfill wraps the goroutine dispatch so the async
|
||||
// contract is testable (see TestBackfillFromPubkey_DoesNotBlockBoot).
|
||||
startFromPubkeyBackfill(dbPath, 5000, 100*time.Millisecond)
|
||||
|
||||
// Migrate old content hashes in background (one-time, idempotent).
|
||||
go migrateContentHashesAsync(store, 5000, 100*time.Millisecond)
|
||||
|
||||
@@ -0,0 +1,32 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"runtime/debug"
|
||||
)
|
||||
|
||||
// applyMemoryLimit configures Go's soft memory limit (GOMEMLIMIT).
|
||||
//
|
||||
// Behavior:
|
||||
// - If envSet is true (GOMEMLIMIT env var present), the runtime has already
|
||||
// parsed it; we leave it alone and report source="env" with limit=0.
|
||||
// - Otherwise, if maxMemoryMB > 0, we derive a limit of maxMemoryMB * 1.5 MiB
|
||||
// and set it via debug.SetMemoryLimit. This forces aggressive GC under
|
||||
// cgroup pressure so the process self-throttles before SIGKILL. See #836.
|
||||
// - Otherwise, no limit is applied; source="none".
|
||||
//
|
||||
// Returns the limit (in bytes) we actually set, or 0 if we did not set one,
|
||||
// plus a short source identifier ("env" | "derived" | "none") for logging.
|
||||
func applyMemoryLimit(maxMemoryMB int, envSet bool) (int64, string) {
|
||||
if envSet {
|
||||
return 0, "env"
|
||||
}
|
||||
if maxMemoryMB <= 0 {
|
||||
return 0, "none"
|
||||
}
|
||||
// 1.5x headroom over the steady-state packet store budget covers
|
||||
// transient peaks (cold-load row-scan / decode pipeline, Go's NextGC
|
||||
// trigger at ~2x live heap). See issue #836 heap profile.
|
||||
limit := int64(maxMemoryMB) * 1024 * 1024 * 3 / 2
|
||||
debug.SetMemoryLimit(limit)
|
||||
return limit, "derived"
|
||||
}
|
||||
@@ -0,0 +1,54 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"runtime/debug"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestApplyMemoryLimit_FromEnv(t *testing.T) {
|
||||
t.Setenv("GOMEMLIMIT", "850MiB")
|
||||
// reset to a known state after test
|
||||
defer debug.SetMemoryLimit(-1)
|
||||
|
||||
limit, source := applyMemoryLimit(512, true /* envSet */)
|
||||
if source != "env" {
|
||||
t.Fatalf("expected source=env, got %q", source)
|
||||
}
|
||||
// When env is set, our function must NOT override it; reported limit is 0.
|
||||
if limit != 0 {
|
||||
t.Fatalf("expected limit=0 (not set by us), got %d", limit)
|
||||
}
|
||||
}
|
||||
|
||||
func TestApplyMemoryLimit_DerivedFromMaxMemoryMB(t *testing.T) {
|
||||
defer debug.SetMemoryLimit(-1)
|
||||
|
||||
// maxMemoryMB=512 → 512 * 1.5 = 768 MiB = 768 * 1024 * 1024 bytes
|
||||
limit, source := applyMemoryLimit(512, false /* envSet */)
|
||||
if source != "derived" {
|
||||
t.Fatalf("expected source=derived, got %q", source)
|
||||
}
|
||||
want := int64(768) * 1024 * 1024
|
||||
if limit != want {
|
||||
t.Fatalf("expected limit=%d, got %d", want, limit)
|
||||
}
|
||||
// Verify it was actually set on the runtime
|
||||
cur := debug.SetMemoryLimit(-1)
|
||||
if cur != want {
|
||||
t.Fatalf("runtime memory limit not set: want=%d got=%d", want, cur)
|
||||
}
|
||||
}
|
||||
|
||||
func TestApplyMemoryLimit_None(t *testing.T) {
|
||||
defer debug.SetMemoryLimit(-1)
|
||||
// Reset to "no limit" (math.MaxInt64) before test
|
||||
debug.SetMemoryLimit(int64(1<<63 - 1))
|
||||
|
||||
limit, source := applyMemoryLimit(0, false)
|
||||
if source != "none" {
|
||||
t.Fatalf("expected source=none, got %q", source)
|
||||
}
|
||||
if limit != 0 {
|
||||
t.Fatalf("expected limit=0, got %d", limit)
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,57 @@
|
||||
package main
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestEnrichNodeWithMultiByte(t *testing.T) {
|
||||
t.Run("nil entry leaves no fields", func(t *testing.T) {
|
||||
node := map[string]interface{}{"public_key": "abc123"}
|
||||
EnrichNodeWithMultiByte(node, nil)
|
||||
if _, ok := node["multi_byte_status"]; ok {
|
||||
t.Error("expected no multi_byte_status with nil entry")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("confirmed entry sets fields", func(t *testing.T) {
|
||||
node := map[string]interface{}{"public_key": "abc123"}
|
||||
entry := &MultiByteCapEntry{
|
||||
Status: "confirmed",
|
||||
Evidence: "advert",
|
||||
MaxHashSize: 2,
|
||||
}
|
||||
EnrichNodeWithMultiByte(node, entry)
|
||||
if node["multi_byte_status"] != "confirmed" {
|
||||
t.Errorf("expected confirmed, got %v", node["multi_byte_status"])
|
||||
}
|
||||
if node["multi_byte_evidence"] != "advert" {
|
||||
t.Errorf("expected advert, got %v", node["multi_byte_evidence"])
|
||||
}
|
||||
if node["multi_byte_max_hash_size"] != 2 {
|
||||
t.Errorf("expected 2, got %v", node["multi_byte_max_hash_size"])
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("suspected entry sets fields", func(t *testing.T) {
|
||||
node := map[string]interface{}{"public_key": "abc123"}
|
||||
entry := &MultiByteCapEntry{
|
||||
Status: "suspected",
|
||||
Evidence: "path",
|
||||
MaxHashSize: 2,
|
||||
}
|
||||
EnrichNodeWithMultiByte(node, entry)
|
||||
if node["multi_byte_status"] != "suspected" {
|
||||
t.Errorf("expected suspected, got %v", node["multi_byte_status"])
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("unknown entry sets status unknown", func(t *testing.T) {
|
||||
node := map[string]interface{}{"public_key": "abc123"}
|
||||
entry := &MultiByteCapEntry{
|
||||
Status: "unknown",
|
||||
MaxHashSize: 1,
|
||||
}
|
||||
EnrichNodeWithMultiByte(node, entry)
|
||||
if node["multi_byte_status"] != "unknown" {
|
||||
t.Errorf("expected unknown, got %v", node["multi_byte_status"])
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -0,0 +1,107 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// TestMultiByteCapability_RegionFiltered_PreservesConfirmedStatus verifies
|
||||
// that GetAnalyticsHashSizes returns a populated multiByteCapability list
|
||||
// even when a region filter is applied. The frontend (analytics.js) merges
|
||||
// this into the adopter table to render per-node "confirmed/suspected/unknown"
|
||||
// badges. When the field is missing or empty under a region filter, every
|
||||
// row falls back to "unknown" — see meshcore.meshat.se/#/analytics filtered
|
||||
// by JKG showing 14 "unknown" while the unfiltered view shows 0.
|
||||
//
|
||||
// Multi-byte capability is a property of the NODE (advertised hash_size from
|
||||
// its own adverts), not the observing region. Region filter should affect
|
||||
// which nodes appear in the result list (multiByteNodes), not their cap status.
|
||||
//
|
||||
// Pre-fix behavior: multiByteCapability is only populated when region == "".
|
||||
// This test fails because result["multiByteCapability"] is absent under
|
||||
// region="JKG", so the lookup returns nil/false.
|
||||
func TestMultiByteCapability_RegionFiltered_PreservesConfirmedStatus(t *testing.T) {
|
||||
db := setupTestDB(t)
|
||||
defer db.Close()
|
||||
|
||||
now := time.Now().UTC()
|
||||
recent := now.Add(-1 * time.Hour).Format(time.RFC3339)
|
||||
recentEpoch := now.Add(-1 * time.Hour).Unix()
|
||||
|
||||
// Two observers in different regions.
|
||||
db.conn.Exec(`INSERT INTO observers (id, name, iata, last_seen, first_seen, packet_count)
|
||||
VALUES ('obs-sjc', 'Obs SJC', 'SJC', ?, '2026-01-01T00:00:00Z', 100)`, recent)
|
||||
db.conn.Exec(`INSERT INTO observers (id, name, iata, last_seen, first_seen, packet_count)
|
||||
VALUES ('obs-jkg', 'Obs JKG', 'JKG', ?, '2026-01-01T00:00:00Z', 100)`, recent)
|
||||
|
||||
// Node A: a JKG-region repeater that advertises multi-byte (hash_size=2).
|
||||
// Its zero-hop direct advert is only heard by obs-SJC (e.g. an out-of-region
|
||||
// listener that happens to pick it up). Under the JKG region filter, the
|
||||
// computeAnalyticsHashSizes() pass will see a smaller advert dataset, but
|
||||
// the node's multi-byte capability is intrinsic and should still resolve
|
||||
// to "confirmed" via the global advert evidence.
|
||||
pkA := "aaa0000000000001"
|
||||
db.conn.Exec(`INSERT INTO nodes (public_key, name, role)
|
||||
VALUES (?, 'Node-A', 'repeater')`, pkA)
|
||||
|
||||
decodedA := `{"pubKey":"` + pkA + `","name":"Node-A","type":"ADVERT","flags":{"isRepeater":true}}`
|
||||
|
||||
// Zero-hop direct advert (route_type=2, payload_type=4),
|
||||
// pathByte 0x40 → hash_size bits 01 → 2 bytes.
|
||||
// Heard by obs-SJC ONLY.
|
||||
db.conn.Exec(`INSERT INTO transmissions (raw_hex, hash, first_seen, route_type, payload_type, decoded_json)
|
||||
VALUES ('1240aabbccdd', 'a_zh_direct', ?, 2, 4, ?)`, recent, decodedA)
|
||||
db.conn.Exec(`INSERT INTO observations (transmission_id, observer_idx, snr, rssi, path_json, timestamp)
|
||||
VALUES (1, 1, 12.0, -85, '[]', ?)`, recentEpoch)
|
||||
|
||||
// Node A also appears as a path hop in a JKG-observed packet, so it
|
||||
// shows up in the JKG region's node list.
|
||||
// route_type=1 (flood), payload_type=4, pathByte 0x41 (hs=2, hops=1)
|
||||
db.conn.Exec(`INSERT INTO transmissions (raw_hex, hash, first_seen, route_type, payload_type, decoded_json)
|
||||
VALUES ('1141aabbccdd', 'a_jkg_relay', ?, 1, 4, ?)`, recent, decodedA)
|
||||
db.conn.Exec(`INSERT INTO observations (transmission_id, observer_idx, snr, rssi, path_json, timestamp)
|
||||
VALUES (2, 2, 8.0, -95, '["aa"]', ?)`, recentEpoch)
|
||||
|
||||
store := NewPacketStore(db, nil)
|
||||
store.Load()
|
||||
|
||||
// Sanity: unfiltered view exposes the field.
|
||||
unfiltered := store.GetAnalyticsHashSizes("")
|
||||
if _, ok := unfiltered["multiByteCapability"]; !ok {
|
||||
t.Fatal("unfiltered result missing multiByteCapability — test setup is wrong")
|
||||
}
|
||||
|
||||
// The actual assertion: region-filtered view MUST also expose the field
|
||||
// AND must report Node A as "confirmed", not "unknown".
|
||||
result := store.GetAnalyticsHashSizes("JKG")
|
||||
capsRaw, ok := result["multiByteCapability"]
|
||||
if !ok {
|
||||
t.Fatalf("expected multiByteCapability in region=JKG result, got keys: %v", keysOf(result))
|
||||
}
|
||||
caps, ok := capsRaw.([]MultiByteCapEntry)
|
||||
if !ok {
|
||||
t.Fatalf("expected []MultiByteCapEntry, got %T", capsRaw)
|
||||
}
|
||||
|
||||
var foundA *MultiByteCapEntry
|
||||
for i := range caps {
|
||||
if caps[i].PublicKey == pkA {
|
||||
foundA = &caps[i]
|
||||
break
|
||||
}
|
||||
}
|
||||
if foundA == nil {
|
||||
t.Fatalf("Node A missing from region=JKG multiByteCapability (have %d entries)", len(caps))
|
||||
}
|
||||
if foundA.Status != "confirmed" {
|
||||
t.Errorf("Node A status under region=JKG = %q, want %q (region filter wrongly downgraded multi-byte capability evidence)", foundA.Status, "confirmed")
|
||||
}
|
||||
}
|
||||
|
||||
func keysOf(m map[string]interface{}) []string {
|
||||
out := make([]string, 0, len(m))
|
||||
for k := range m {
|
||||
out = append(out, k)
|
||||
}
|
||||
return out
|
||||
}
|
||||
@@ -20,11 +20,10 @@ var persistSem = make(chan struct{}, 1)
|
||||
// ensureNeighborEdgesTable creates the neighbor_edges table if it doesn't exist.
|
||||
// Uses a separate read-write connection since the main DB is read-only.
|
||||
func ensureNeighborEdgesTable(dbPath string) error {
|
||||
rw, err := openRW(dbPath)
|
||||
rw, err := cachedRW(dbPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("open rw for neighbor_edges: %w", err)
|
||||
}
|
||||
defer rw.Close()
|
||||
|
||||
_, err = rw.Exec(`CREATE TABLE IF NOT EXISTS neighbor_edges (
|
||||
node_a TEXT NOT NULL,
|
||||
@@ -129,12 +128,11 @@ func asyncPersistResolvedPathsAndEdges(dbPath string, obsUpdates []persistObsUpd
|
||||
go func() {
|
||||
defer func() { <-persistSem }()
|
||||
|
||||
rw, err := openRW(dbPath)
|
||||
rw, err := cachedRW(dbPath)
|
||||
if err != nil {
|
||||
log.Printf("[store] %s rw open error: %v", logPrefix, err)
|
||||
return
|
||||
}
|
||||
defer rw.Close()
|
||||
|
||||
if len(obsUpdates) > 0 {
|
||||
sqlTx, err := rw.Begin()
|
||||
@@ -249,11 +247,10 @@ func buildAndPersistEdges(store *PacketStore, rw *sql.DB) int {
|
||||
|
||||
// ensureResolvedPathColumn adds the resolved_path column to observations if missing.
|
||||
func ensureResolvedPathColumn(dbPath string) error {
|
||||
rw, err := openRW(dbPath)
|
||||
rw, err := cachedRW(dbPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer rw.Close()
|
||||
|
||||
// Check if column already exists
|
||||
rows, err := rw.Query("PRAGMA table_info(observations)")
|
||||
@@ -289,11 +286,10 @@ func ensureResolvedPathColumn(dbPath string) error {
|
||||
// GetStats) silently fail with "no such column: inactive" — leaving /api/observers
|
||||
// returning empty.
|
||||
func ensureObserverInactiveColumn(dbPath string) error {
|
||||
rw, err := openRW(dbPath)
|
||||
rw, err := cachedRW(dbPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer rw.Close()
|
||||
|
||||
rows, err := rw.Query("PRAGMA table_info(observers)")
|
||||
if err != nil {
|
||||
@@ -320,15 +316,97 @@ func ensureObserverInactiveColumn(dbPath string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ensureLastPacketAtColumn adds the last_packet_at column to observers if missing.
|
||||
// The column was originally added by ingestor migration (observers_last_packet_at_v1)
|
||||
// to track the most recent packet observation time separately from status updates.
|
||||
// When the server starts against a DB that was never touched by the ingestor (e.g.
|
||||
// the e2e fixture), the column is missing and read queries that reference it
|
||||
// (GetObservers, GetObserverByID) fail with "no such column: last_packet_at".
|
||||
func ensureLastPacketAtColumn(dbPath string) error {
|
||||
rw, err := cachedRW(dbPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rows, err := rw.Query("PRAGMA table_info(observers)")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
for rows.Next() {
|
||||
var cid int
|
||||
var colName string
|
||||
var colType sql.NullString
|
||||
var notNull, pk int
|
||||
var dflt sql.NullString
|
||||
if rows.Scan(&cid, &colName, &colType, ¬Null, &dflt, &pk) == nil && colName == "last_packet_at" {
|
||||
return nil // already exists
|
||||
}
|
||||
}
|
||||
|
||||
_, err = rw.Exec("ALTER TABLE observers ADD COLUMN last_packet_at TEXT")
|
||||
if err != nil {
|
||||
return fmt.Errorf("add last_packet_at column: %w", err)
|
||||
}
|
||||
log.Println("[store] Added last_packet_at column to observers")
|
||||
return nil
|
||||
}
|
||||
|
||||
// ensureForeignAdvertColumn adds the foreign_advert column to nodes/inactive_nodes
|
||||
// if missing (#730). The column is added by the ingestor migration foreign_advert_v1
|
||||
// — but the server may run against a DB the ingestor has never touched (e2e fixture,
|
||||
// fresh installs where the server boots first), in which case scanNodeRow fails
|
||||
// with "no such column: foreign_advert" and /api/nodes silently returns nothing.
|
||||
func ensureForeignAdvertColumn(dbPath string) error {
|
||||
rw, err := cachedRW(dbPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, table := range []string{"nodes", "inactive_nodes"} {
|
||||
has, err := tableHasColumn(rw, table, "foreign_advert")
|
||||
if err != nil {
|
||||
return fmt.Errorf("inspect %s: %w", table, err)
|
||||
}
|
||||
if has {
|
||||
continue
|
||||
}
|
||||
if _, err := rw.Exec(fmt.Sprintf("ALTER TABLE %s ADD COLUMN foreign_advert INTEGER DEFAULT 0", table)); err != nil {
|
||||
return fmt.Errorf("add foreign_advert to %s: %w", table, err)
|
||||
}
|
||||
log.Printf("[store] Added foreign_advert column to %s", table)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// tableHasColumn reports whether the named table has the named column.
|
||||
func tableHasColumn(rw *sql.DB, table, column string) (bool, error) {
|
||||
rows, err := rw.Query(fmt.Sprintf("PRAGMA table_info(%s)", table))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer rows.Close()
|
||||
for rows.Next() {
|
||||
var cid int
|
||||
var colName string
|
||||
var colType sql.NullString
|
||||
var notNull, pk int
|
||||
var dflt sql.NullString
|
||||
if rows.Scan(&cid, &colName, &colType, ¬Null, &dflt, &pk) == nil && colName == column {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// softDeleteBlacklistedObservers marks observers matching the blacklist as
|
||||
// inactive=1 so they are hidden from API responses. Runs once at startup.
|
||||
func softDeleteBlacklistedObservers(dbPath string, blacklist []string) {
|
||||
rw, err := openRW(dbPath)
|
||||
rw, err := cachedRW(dbPath)
|
||||
if err != nil {
|
||||
log.Printf("[observer-blacklist] warning: could not open DB for soft-delete: %v", err)
|
||||
return
|
||||
}
|
||||
defer rw.Close()
|
||||
|
||||
placeholders := make([]string, 0, len(blacklist))
|
||||
args := make([]interface{}, 0, len(blacklist))
|
||||
@@ -490,16 +568,12 @@ func backfillResolvedPathsAsync(store *PacketStore, dbPath string, chunkSize int
|
||||
var rw *sql.DB
|
||||
if dbPath != "" {
|
||||
var err error
|
||||
rw, err = openRW(dbPath)
|
||||
rw, err = cachedRW(dbPath)
|
||||
if err != nil {
|
||||
log.Printf("[store] async backfill: open rw error: %v", err)
|
||||
}
|
||||
}
|
||||
defer func() {
|
||||
if rw != nil {
|
||||
rw.Close()
|
||||
}
|
||||
}()
|
||||
// rw is cached process-wide; do not close
|
||||
|
||||
totalProcessed := 0
|
||||
for totalProcessed < totalPending {
|
||||
@@ -724,11 +798,10 @@ func PruneNeighborEdges(dbPath string, graph *NeighborGraph, maxAgeDays int) (in
|
||||
|
||||
// 1. Prune from SQLite using a read-write connection
|
||||
var dbPruned int64
|
||||
rw, err := openRW(dbPath)
|
||||
rw, err := cachedRW(dbPath)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("prune neighbor_edges: open rw: %w", err)
|
||||
}
|
||||
defer rw.Close()
|
||||
res, err := rw.Exec("DELETE FROM neighbor_edges WHERE last_seen < ?", cutoff.Format(time.RFC3339))
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("prune neighbor_edges: %w", err)
|
||||
|
||||
@@ -538,3 +538,62 @@ func TestOpenRW_BusyTimeout(t *testing.T) {
|
||||
t.Errorf("expected busy_timeout=5000, got %d", timeout)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEnsureLastPacketAtColumn(t *testing.T) {
|
||||
// Create a temp DB with observers table missing last_packet_at
|
||||
dir := t.TempDir()
|
||||
dbPath := dir + "/test.db"
|
||||
db, err := sql.Open("sqlite", dbPath)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = db.Exec(`CREATE TABLE observers (
|
||||
id TEXT PRIMARY KEY,
|
||||
name TEXT,
|
||||
last_seen TEXT,
|
||||
lat REAL,
|
||||
lon REAL,
|
||||
inactive INTEGER DEFAULT 0
|
||||
)`)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
db.Close()
|
||||
|
||||
// First call: should add the column
|
||||
if err := ensureLastPacketAtColumn(dbPath); err != nil {
|
||||
t.Fatalf("first call failed: %v", err)
|
||||
}
|
||||
|
||||
// Verify column exists
|
||||
db2, err := sql.Open("sqlite", dbPath)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer db2.Close()
|
||||
|
||||
var found bool
|
||||
rows, err := db2.Query("PRAGMA table_info(observers)")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer rows.Close()
|
||||
for rows.Next() {
|
||||
var cid int
|
||||
var colName string
|
||||
var colType sql.NullString
|
||||
var notNull, pk int
|
||||
var dflt sql.NullString
|
||||
if rows.Scan(&cid, &colName, &colType, ¬Null, &dflt, &pk) == nil && colName == "last_packet_at" {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Fatal("last_packet_at column not found after migration")
|
||||
}
|
||||
|
||||
// Idempotency: second call should succeed without error
|
||||
if err := ensureLastPacketAtColumn(dbPath); err != nil {
|
||||
t.Fatalf("idempotent call failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,150 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
)
|
||||
|
||||
// BatteryThresholdsConfig: voltage cutoffs for low-battery alerts (#663).
|
||||
// All values in millivolts. When a node's most-recent battery sample falls
|
||||
// below LowMv it is flagged "low"; below CriticalMv it is flagged "critical".
|
||||
type BatteryThresholdsConfig struct {
|
||||
LowMv int `json:"lowMv"`
|
||||
CriticalMv int `json:"criticalMv"`
|
||||
}
|
||||
|
||||
// LowBatteryMv returns the configured low-battery threshold or the default 3300mV.
|
||||
func (c *Config) LowBatteryMv() int {
|
||||
if c.BatteryThresholds != nil && c.BatteryThresholds.LowMv > 0 {
|
||||
return c.BatteryThresholds.LowMv
|
||||
}
|
||||
return 3300
|
||||
}
|
||||
|
||||
// CriticalBatteryMv returns the configured critical-battery threshold or the default 3000mV.
|
||||
func (c *Config) CriticalBatteryMv() int {
|
||||
if c.BatteryThresholds != nil && c.BatteryThresholds.CriticalMv > 0 {
|
||||
return c.BatteryThresholds.CriticalMv
|
||||
}
|
||||
return 3000
|
||||
}
|
||||
|
||||
// NodeBatterySample is a single (timestamp, battery_mv) point.
|
||||
type NodeBatterySample struct {
|
||||
Timestamp string `json:"timestamp"`
|
||||
BatteryMv int `json:"battery_mv"`
|
||||
}
|
||||
|
||||
// GetNodeBatteryHistory returns time-ordered battery_mv samples for a node,
|
||||
// pulled from observer_metrics by joining observers.id (uppercase pubkey)
|
||||
// against the node's public_key (lowercase). Rows with NULL battery are skipped.
|
||||
//
|
||||
// The match is case-insensitive on observer_id to tolerate historical
|
||||
// variation in pubkey casing.
|
||||
func (db *DB) GetNodeBatteryHistory(pubkey, since string) ([]NodeBatterySample, error) {
|
||||
if pubkey == "" {
|
||||
return nil, nil
|
||||
}
|
||||
pk := strings.ToLower(pubkey)
|
||||
rows, err := db.conn.Query(`
|
||||
SELECT timestamp, battery_mv
|
||||
FROM observer_metrics
|
||||
WHERE LOWER(observer_id) = ?
|
||||
AND battery_mv IS NOT NULL
|
||||
AND timestamp >= ?
|
||||
ORDER BY timestamp ASC`, pk, since)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var out []NodeBatterySample
|
||||
for rows.Next() {
|
||||
var ts string
|
||||
var mv int
|
||||
if err := rows.Scan(&ts, &mv); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out = append(out, NodeBatterySample{Timestamp: ts, BatteryMv: mv})
|
||||
}
|
||||
return out, rows.Err()
|
||||
}
|
||||
|
||||
// handleNodeBattery serves GET /api/nodes/{pubkey}/battery?days=N (#663).
|
||||
//
|
||||
// Returns voltage time-series for a node and a status flag based on the most
|
||||
// recent sample evaluated against configured thresholds:
|
||||
// - "critical" : latest_mv < CriticalBatteryMv
|
||||
// - "low" : latest_mv < LowBatteryMv
|
||||
// - "ok" : latest_mv >= LowBatteryMv
|
||||
// - "unknown" : no samples in window
|
||||
func (s *Server) handleNodeBattery(w http.ResponseWriter, r *http.Request) {
|
||||
pubkey := mux.Vars(r)["pubkey"]
|
||||
if pubkey == "" {
|
||||
writeError(w, 400, "missing pubkey")
|
||||
return
|
||||
}
|
||||
|
||||
// 404 if node unknown — keeps URL space tidy and matches /health behavior.
|
||||
node, err := s.db.GetNodeByPubkey(pubkey)
|
||||
if err != nil {
|
||||
writeError(w, 500, err.Error())
|
||||
return
|
||||
}
|
||||
if node == nil {
|
||||
writeError(w, 404, "node not found")
|
||||
return
|
||||
}
|
||||
|
||||
days := 7
|
||||
if d, _ := strconv.Atoi(r.URL.Query().Get("days")); d > 0 && d <= 365 {
|
||||
days = d
|
||||
}
|
||||
since := time.Now().UTC().Add(-time.Duration(days) * 24 * time.Hour).Format(time.RFC3339)
|
||||
|
||||
samples, err := s.db.GetNodeBatteryHistory(pubkey, since)
|
||||
if err != nil {
|
||||
writeError(w, 500, err.Error())
|
||||
return
|
||||
}
|
||||
if samples == nil {
|
||||
samples = []NodeBatterySample{}
|
||||
}
|
||||
|
||||
low := s.cfg.LowBatteryMv()
|
||||
crit := s.cfg.CriticalBatteryMv()
|
||||
|
||||
status := "unknown"
|
||||
var latestMv interface{}
|
||||
var latestTs interface{}
|
||||
if n := len(samples); n > 0 {
|
||||
mv := samples[n-1].BatteryMv
|
||||
latestMv = mv
|
||||
latestTs = samples[n-1].Timestamp
|
||||
switch {
|
||||
case mv < crit:
|
||||
status = "critical"
|
||||
case mv < low:
|
||||
status = "low"
|
||||
default:
|
||||
status = "ok"
|
||||
}
|
||||
}
|
||||
|
||||
writeJSON(w, map[string]interface{}{
|
||||
"public_key": strings.ToLower(pubkey),
|
||||
"days": days,
|
||||
"samples": samples,
|
||||
"latest_mv": latestMv,
|
||||
"latest_ts": latestTs,
|
||||
"status": status,
|
||||
"thresholds": map[string]interface{}{
|
||||
"low_mv": low,
|
||||
"critical_mv": crit,
|
||||
},
|
||||
})
|
||||
}
|
||||
@@ -0,0 +1,161 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
)
|
||||
|
||||
// TestGetNodeBatteryHistory_FromObserverMetrics validates that the DB layer
|
||||
// can pull a node's battery_mv time-series from observer_metrics, joining
|
||||
// observers.id (uppercase hex pubkey) to nodes.public_key (lowercase hex).
|
||||
func TestGetNodeBatteryHistory_FromObserverMetrics(t *testing.T) {
|
||||
db := setupTestDB(t)
|
||||
now := time.Now().UTC()
|
||||
|
||||
// node + observer with matching pubkey (cases differ on purpose)
|
||||
pkLower := "deadbeefcafef00d11223344"
|
||||
idUpper := strings.ToUpper(pkLower)
|
||||
db.conn.Exec(`INSERT INTO nodes (public_key, name, role, last_seen, first_seen) VALUES (?, 'BatNode', 'repeater', ?, ?)`,
|
||||
pkLower, now.Format(time.RFC3339), now.Add(-72*time.Hour).Format(time.RFC3339))
|
||||
db.conn.Exec(`INSERT INTO observers (id, name, last_seen, first_seen) VALUES (?, 'BatNode', ?, ?)`,
|
||||
idUpper, now.Format(time.RFC3339), now.Add(-72*time.Hour).Format(time.RFC3339))
|
||||
|
||||
// 3 metrics samples: 3700, 3500, 3200 mV
|
||||
for i, mv := range []int{3700, 3500, 3200} {
|
||||
ts := now.Add(time.Duration(-2+i) * time.Hour).Format(time.RFC3339)
|
||||
db.conn.Exec(`INSERT INTO observer_metrics (observer_id, timestamp, battery_mv) VALUES (?, ?, ?)`,
|
||||
idUpper, ts, mv)
|
||||
}
|
||||
// One sample with NULL battery should be skipped
|
||||
db.conn.Exec(`INSERT INTO observer_metrics (observer_id, timestamp) VALUES (?, ?)`,
|
||||
idUpper, now.Add(-3*time.Hour).Format(time.RFC3339))
|
||||
|
||||
since := now.Add(-24 * time.Hour).Format(time.RFC3339)
|
||||
samples, err := db.GetNodeBatteryHistory(pkLower, since)
|
||||
if err != nil {
|
||||
t.Fatalf("GetNodeBatteryHistory: %v", err)
|
||||
}
|
||||
if len(samples) != 3 {
|
||||
t.Fatalf("expected 3 samples, got %d", len(samples))
|
||||
}
|
||||
if samples[0].BatteryMv != 3700 || samples[2].BatteryMv != 3200 {
|
||||
t.Errorf("samples=%+v", samples)
|
||||
}
|
||||
}
|
||||
|
||||
// TestNodeBatteryEndpoint validates the /api/nodes/{pubkey}/battery endpoint
|
||||
// returns time-series data plus configured thresholds and a status flag.
|
||||
func TestNodeBatteryEndpoint(t *testing.T) {
|
||||
db := setupTestDB(t)
|
||||
seedTestData(t, db)
|
||||
|
||||
now := time.Now().UTC()
|
||||
pkLower := "aabbccdd11223344"
|
||||
idUpper := strings.ToUpper(pkLower)
|
||||
db.conn.Exec(`INSERT INTO observers (id, name, last_seen, first_seen) VALUES (?, 'TestRepeater', ?, ?)`,
|
||||
idUpper, now.Format(time.RFC3339), now.Add(-72*time.Hour).Format(time.RFC3339))
|
||||
for i, mv := range []int{3800, 3600, 3200} {
|
||||
ts := now.Add(time.Duration(-2+i) * time.Hour).Format(time.RFC3339)
|
||||
db.conn.Exec(`INSERT INTO observer_metrics (observer_id, timestamp, battery_mv) VALUES (?, ?, ?)`,
|
||||
idUpper, ts, mv)
|
||||
}
|
||||
|
||||
cfg := &Config{Port: 3000}
|
||||
hub := NewHub()
|
||||
srv := NewServer(db, cfg, hub)
|
||||
store := NewPacketStore(db, nil)
|
||||
if err := store.Load(); err != nil {
|
||||
t.Fatalf("store.Load: %v", err)
|
||||
}
|
||||
srv.store = store
|
||||
router := mux.NewRouter()
|
||||
srv.RegisterRoutes(router)
|
||||
|
||||
req := httptest.NewRequest("GET", "/api/nodes/"+pkLower+"/battery?days=7", nil)
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
if w.Code != 200 {
|
||||
t.Fatalf("expected 200, got %d body=%s", w.Code, w.Body.String())
|
||||
}
|
||||
var body map[string]interface{}
|
||||
if err := json.Unmarshal(w.Body.Bytes(), &body); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
samples, ok := body["samples"].([]interface{})
|
||||
if !ok {
|
||||
t.Fatalf("samples missing: %+v", body)
|
||||
}
|
||||
if len(samples) != 3 {
|
||||
t.Errorf("expected 3 samples, got %d", len(samples))
|
||||
}
|
||||
thr, ok := body["thresholds"].(map[string]interface{})
|
||||
if !ok {
|
||||
t.Fatalf("thresholds missing: %+v", body)
|
||||
}
|
||||
if int(thr["low_mv"].(float64)) != 3300 {
|
||||
t.Errorf("default low_mv expected 3300, got %v", thr["low_mv"])
|
||||
}
|
||||
if int(thr["critical_mv"].(float64)) != 3000 {
|
||||
t.Errorf("default critical_mv expected 3000, got %v", thr["critical_mv"])
|
||||
}
|
||||
// latest 3200 -> "low" (below 3300, above 3000)
|
||||
if body["status"] != "low" {
|
||||
t.Errorf("expected status=low, got %v", body["status"])
|
||||
}
|
||||
if int(body["latest_mv"].(float64)) != 3200 {
|
||||
t.Errorf("latest_mv expected 3200, got %v", body["latest_mv"])
|
||||
}
|
||||
}
|
||||
|
||||
// TestNodeBatteryEndpoint_NoData returns 200 with empty samples and status="unknown".
|
||||
func TestNodeBatteryEndpoint_NoData(t *testing.T) {
|
||||
_, router := setupTestServer(t)
|
||||
req := httptest.NewRequest("GET", "/api/nodes/aabbccdd11223344/battery", nil)
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
if w.Code != 200 {
|
||||
t.Fatalf("expected 200, got %d", w.Code)
|
||||
}
|
||||
var body map[string]interface{}
|
||||
json.Unmarshal(w.Body.Bytes(), &body)
|
||||
if body["status"] != "unknown" {
|
||||
t.Errorf("expected unknown when no samples, got %v", body["status"])
|
||||
}
|
||||
}
|
||||
|
||||
// TestNodeBatteryEndpoint_404 returns 404 for unknown node.
|
||||
func TestNodeBatteryEndpoint_404(t *testing.T) {
|
||||
_, router := setupTestServer(t)
|
||||
req := httptest.NewRequest("GET", "/api/nodes/notarealnode00000000/battery", nil)
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
if w.Code != 404 {
|
||||
t.Errorf("expected 404, got %d", w.Code)
|
||||
}
|
||||
}
|
||||
|
||||
// TestBatteryThresholds_ConfigOverride confirms config overrides take effect.
|
||||
func TestBatteryThresholds_ConfigOverride(t *testing.T) {
|
||||
cfg := &Config{
|
||||
BatteryThresholds: &BatteryThresholdsConfig{LowMv: 3500, CriticalMv: 3100},
|
||||
}
|
||||
if cfg.LowBatteryMv() != 3500 {
|
||||
t.Errorf("LowBatteryMv override failed: %d", cfg.LowBatteryMv())
|
||||
}
|
||||
if cfg.CriticalBatteryMv() != 3100 {
|
||||
t.Errorf("CriticalBatteryMv override failed: %d", cfg.CriticalBatteryMv())
|
||||
}
|
||||
|
||||
empty := &Config{}
|
||||
if empty.LowBatteryMv() != 3300 {
|
||||
t.Errorf("default LowBatteryMv expected 3300, got %d", empty.LowBatteryMv())
|
||||
}
|
||||
if empty.CriticalBatteryMv() != 3000 {
|
||||
t.Errorf("default CriticalBatteryMv expected 3000, got %d", empty.CriticalBatteryMv())
|
||||
}
|
||||
}
|
||||
@@ -45,6 +45,7 @@ func routeDescriptions() map[string]routeMeta {
|
||||
"POST /api/perf/reset": {Summary: "Reset performance stats", Tag: "admin", Auth: true},
|
||||
"POST /api/admin/prune": {Summary: "Prune old data", Description: "Deletes packets and nodes older than the configured retention period.", Tag: "admin", Auth: true},
|
||||
"GET /api/debug/affinity": {Summary: "Debug neighbor affinity scores", Tag: "admin", Auth: true},
|
||||
"GET /api/backup": {Summary: "Download SQLite backup", Description: "Streams a consistent SQLite snapshot of the analyzer DB (VACUUM INTO). Response is application/octet-stream with attachment filename corescope-backup-<unix>.db.", Tag: "admin", Auth: true},
|
||||
|
||||
// Packets
|
||||
"GET /api/packets": {Summary: "List packets", Description: "Returns decoded packets with filtering, sorting, and pagination.", Tag: "packets",
|
||||
|
||||
@@ -0,0 +1,346 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"os"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/meshcore-analyzer/perfio"
|
||||
)
|
||||
|
||||
// PerfIOResponse holds per-process disk I/O metrics derived from /proc/self/io.
|
||||
//
|
||||
// `Ingestor` is the same shape as the top-level fields, sourced from the
|
||||
// ingestor's own /proc/self/io snapshot (published via the ingestor stats file).
|
||||
// Issue #1120 calls for "Both ingestor and server" — this is the ingestor half.
|
||||
//
|
||||
// `CancelledWriteBytesPerSec` surfaces `cancelled_write_bytes` from
|
||||
// /proc/self/io — bytes the kernel discarded before they hit disk (e.g. file
|
||||
// truncated/unlinked while dirty). Useful signal when chasing
|
||||
// write-amplification anomalies (cf. the BackfillPathJSON loop in #1119).
|
||||
type PerfIOResponse struct {
|
||||
ReadBytesPerSec float64 `json:"readBytesPerSec"`
|
||||
WriteBytesPerSec float64 `json:"writeBytesPerSec"`
|
||||
CancelledWriteBytesPerSec float64 `json:"cancelledWriteBytesPerSec"`
|
||||
SyscallsRead float64 `json:"syscallsRead"`
|
||||
SyscallsWrite float64 `json:"syscallsWrite"`
|
||||
Ingestor *PerfIOSample `json:"ingestor,omitempty"`
|
||||
}
|
||||
|
||||
// PerfIOSample is the canonical per-process I/O rate sample, shared with the
|
||||
// ingestor via internal/perfio. Sharing the type prevents silent JSON contract
|
||||
// drift between the publisher (ingestor) and the consumer (server) (#1167).
|
||||
type PerfIOSample = perfio.Sample
|
||||
|
||||
// PerfSqliteResponse holds SQLite-specific perf metrics.
|
||||
type PerfSqliteResponse struct {
|
||||
WalSizeMB float64 `json:"walSizeMB"`
|
||||
WalSize int64 `json:"walSize"`
|
||||
PageCount int64 `json:"pageCount"`
|
||||
PageSize int64 `json:"pageSize"`
|
||||
CacheSize int64 `json:"cacheSize"`
|
||||
CacheHitRate float64 `json:"cacheHitRate"`
|
||||
}
|
||||
|
||||
// procIOSample is a snapshot of /proc/self/io counters.
|
||||
type procIOSample struct {
|
||||
at time.Time
|
||||
readBytes int64
|
||||
writeBytes int64
|
||||
cancelledWrite int64
|
||||
syscR int64
|
||||
syscW int64
|
||||
}
|
||||
|
||||
// perfIOTracker keeps the previous sample so handlePerfIO can compute deltas.
|
||||
var (
|
||||
perfIOMu sync.Mutex
|
||||
perfIOLastSample procIOSample
|
||||
)
|
||||
|
||||
// readIngestorStatsParseCalls counts full json.Unmarshal calls performed by
|
||||
// readIngestorIOSample (cache miss path). Exported (lowercase + same-package
|
||||
// access) for tests asserting the cache eliminates redundant decodes.
|
||||
// Carmack must-fix #2.
|
||||
var readIngestorStatsParseCalls atomic.Int64
|
||||
|
||||
// resetIngestorIOCache wipes the cached snapshot. Test-only helper.
|
||||
func resetIngestorIOCache() {
|
||||
ingestorIOCache.Lock()
|
||||
ingestorIOCache.mtimeUnixNano = 0
|
||||
ingestorIOCache.size = 0
|
||||
ingestorIOCache.sample = nil
|
||||
ingestorIOCache.Unlock()
|
||||
}
|
||||
|
||||
// ingestorIOCache is the byte-stable snapshot cache for readIngestorIOSample
|
||||
// (Carmack must-fix #2). Keyed by (file mtime nanoseconds, size); on hit we
|
||||
// return the previously decoded sample without re-opening the file.
|
||||
var ingestorIOCache struct {
|
||||
sync.Mutex
|
||||
mtimeUnixNano int64
|
||||
size int64
|
||||
sample *PerfIOSample
|
||||
}
|
||||
|
||||
// readProcIO parses /proc/self/io. Returns a zero-time sample (at.IsZero())
|
||||
// on non-Linux, read failure, or when no recognised keys were parsed
|
||||
// (Carmack must-fix #6 — never publish a phantom-zero counter set, the
|
||||
// next tick would treat the real counters as a giant delta).
|
||||
func readProcIO() procIOSample {
|
||||
s := procIOSample{at: time.Now()}
|
||||
f, err := os.Open("/proc/self/io")
|
||||
if err != nil {
|
||||
return procIOSample{}
|
||||
}
|
||||
defer f.Close()
|
||||
if !parseProcIOInto(bufio.NewScanner(f), &s) {
|
||||
return procIOSample{}
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// parseProcIOInto reads /proc/self/io-shaped key:value lines from sc and
|
||||
// populates the byte/syscall fields on s. Returns true iff at least one
|
||||
// recognised key was successfully parsed (Carmack must-fix #6).
|
||||
//
|
||||
// Implementation delegates to perfio.ParseProcIO — single source of truth
|
||||
// shared with the ingestor (Carmack must-fix #7; previously two divergent
|
||||
// copies, which is how the empty-key gate was missing on this side).
|
||||
func parseProcIOInto(sc *bufio.Scanner, s *procIOSample) bool {
|
||||
var c perfio.Counters
|
||||
ok := perfio.ParseProcIO(sc, &c)
|
||||
s.readBytes = c.ReadBytes
|
||||
s.writeBytes = c.WriteBytes
|
||||
s.cancelledWrite = c.CancelledWriteBytes
|
||||
s.syscR = c.SyscR
|
||||
s.syscW = c.SyscW
|
||||
return ok
|
||||
}
|
||||
|
||||
// handlePerfIO returns delta-rate disk I/O for the server process (per-second).
|
||||
// On the first call (no prior sample), rates are zero; subsequent calls
|
||||
// report the delta divided by elapsed seconds.
|
||||
func (s *Server) handlePerfIO(w http.ResponseWriter, r *http.Request) {
|
||||
cur := readProcIO()
|
||||
resp := PerfIOResponse{}
|
||||
|
||||
perfIOMu.Lock()
|
||||
prev := perfIOLastSample
|
||||
perfIOLastSample = cur
|
||||
perfIOMu.Unlock()
|
||||
|
||||
if !prev.at.IsZero() {
|
||||
dt := cur.at.Sub(prev.at).Seconds()
|
||||
if dt < 0.001 {
|
||||
dt = 0.001
|
||||
}
|
||||
resp.ReadBytesPerSec = float64(cur.readBytes-prev.readBytes) / dt
|
||||
resp.WriteBytesPerSec = float64(cur.writeBytes-prev.writeBytes) / dt
|
||||
resp.CancelledWriteBytesPerSec = float64(cur.cancelledWrite-prev.cancelledWrite) / dt
|
||||
resp.SyscallsRead = float64(cur.syscR-prev.syscR) / dt
|
||||
resp.SyscallsWrite = float64(cur.syscW-prev.syscW) / dt
|
||||
}
|
||||
// Ingestor block: GREEN commit replaces stub readIngestorIOSample with
|
||||
// real parsing of the ingestor stats file's procIO section (#1120
|
||||
// follow-up — "Both ingestor and server").
|
||||
if ing := readIngestorIOSample(); ing != nil {
|
||||
resp.Ingestor = ing
|
||||
}
|
||||
writeJSON(w, resp)
|
||||
}
|
||||
|
||||
// IngestorStatsStaleThreshold is the maximum age (sampledAt → now) of an
|
||||
// ingestor stats snapshot before it is treated as dead and dropped from the
|
||||
// /api/perf/io response. Default writer interval is ~1s; 5× that catches a
|
||||
// wedged writer goroutine without flapping on a brief tick miss.
|
||||
//
|
||||
// #1167 must-fix #1: serving stale procIO as live disguises a dead ingestor.
|
||||
const IngestorStatsStaleThreshold = 5 * time.Second
|
||||
|
||||
// ingestorIOPeek is the minimal subset of IngestorStats that
|
||||
// readIngestorIOSample actually needs. Decoding into this instead of the
|
||||
// full IngestorStats avoids allocating BackfillUpdates (a map) and the
|
||||
// ~10 unused counter fields on every /api/perf/io request (Carmack
|
||||
// must-fix #1).
|
||||
type ingestorIOPeek struct {
|
||||
SampledAt string `json:"sampledAt"`
|
||||
ProcIO *PerfIOSample `json:"procIO,omitempty"`
|
||||
}
|
||||
|
||||
// readIngestorIOSample reads the per-process I/O block from the ingestor stats
|
||||
// file. Returns nil if the file is missing, malformed, carries no proc-IO
|
||||
// block (older ingestor builds), OR the snapshot is older than
|
||||
// IngestorStatsStaleThreshold (#1167 must-fix #1 — operators must not see
|
||||
// stale numbers under .ingestor when the ingestor is down). Never errors —
|
||||
// diagnostics only.
|
||||
//
|
||||
// Cached by (file mtime nanoseconds, size): the underlying file is byte-stable
|
||||
// between 1Hz writer ticks, so polling the endpoint at 1Hz from N tabs MUST
|
||||
// NOT cause N file-opens + N json.Unmarshal per second on identical bytes
|
||||
// (Carmack must-fix #2). The cache invalidates as soon as either mtime or
|
||||
// size differs from the cached entry.
|
||||
func readIngestorIOSample() *PerfIOSample {
|
||||
path := IngestorStatsPath()
|
||||
info, statErr := os.Stat(path)
|
||||
if statErr != nil {
|
||||
return nil
|
||||
}
|
||||
mtimeNs := info.ModTime().UnixNano()
|
||||
size := info.Size()
|
||||
|
||||
ingestorIOCache.Lock()
|
||||
if ingestorIOCache.mtimeUnixNano == mtimeNs && ingestorIOCache.size == size && ingestorIOCache.sample != nil {
|
||||
s := ingestorIOCache.sample
|
||||
ingestorIOCache.Unlock()
|
||||
// Re-validate freshness on cache hit too: a stale-but-byte-stable
|
||||
// file (writer wedged) MUST still drop after the threshold.
|
||||
if s.SampledAt != "" {
|
||||
if ts, err := time.Parse(time.RFC3339, s.SampledAt); err == nil {
|
||||
if time.Since(ts) > IngestorStatsStaleThreshold {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return s
|
||||
}
|
||||
ingestorIOCache.Unlock()
|
||||
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
readIngestorStatsParseCalls.Add(1)
|
||||
var st ingestorIOPeek
|
||||
if err := json.Unmarshal(data, &st); err != nil {
|
||||
return nil
|
||||
}
|
||||
if st.ProcIO == nil {
|
||||
return nil
|
||||
}
|
||||
stamp := st.SampledAt
|
||||
if stamp == "" {
|
||||
stamp = st.ProcIO.SampledAt
|
||||
}
|
||||
if stamp == "" {
|
||||
return nil
|
||||
}
|
||||
ts, err := time.Parse(time.RFC3339, stamp)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
if time.Since(ts) > IngestorStatsStaleThreshold {
|
||||
return nil
|
||||
}
|
||||
|
||||
ingestorIOCache.Lock()
|
||||
ingestorIOCache.mtimeUnixNano = mtimeNs
|
||||
ingestorIOCache.size = size
|
||||
ingestorIOCache.sample = st.ProcIO
|
||||
ingestorIOCache.Unlock()
|
||||
|
||||
return st.ProcIO
|
||||
}
|
||||
|
||||
// handlePerfSqlite returns SQLite WAL size + cache hit-rate stats.
|
||||
func (s *Server) handlePerfSqlite(w http.ResponseWriter, r *http.Request) {
|
||||
resp := PerfSqliteResponse{}
|
||||
if s.db != nil && s.db.conn != nil {
|
||||
var pageCount, pageSize int64
|
||||
_ = s.db.conn.QueryRow("PRAGMA page_count").Scan(&pageCount)
|
||||
_ = s.db.conn.QueryRow("PRAGMA page_size").Scan(&pageSize)
|
||||
var cacheSize int64
|
||||
_ = s.db.conn.QueryRow("PRAGMA cache_size").Scan(&cacheSize)
|
||||
resp.PageCount = pageCount
|
||||
resp.PageSize = pageSize
|
||||
resp.CacheSize = cacheSize
|
||||
|
||||
// Cache hit rate: derived from PacketStore cache (rw_cache). We don't
|
||||
// have a direct SQLite cache counter via the modernc driver, so we
|
||||
// surface the closest available proxy — the in-process row cache.
|
||||
if s.store != nil {
|
||||
cs := s.store.GetCacheStatsTyped()
|
||||
total := cs.Hits + cs.Misses
|
||||
if total > 0 {
|
||||
resp.CacheHitRate = float64(cs.Hits) / float64(total)
|
||||
}
|
||||
}
|
||||
|
||||
if s.db.path != "" && s.db.path != ":memory:" {
|
||||
if info, err := os.Stat(s.db.path + "-wal"); err == nil {
|
||||
resp.WalSize = info.Size()
|
||||
resp.WalSizeMB = float64(info.Size()) / 1048576
|
||||
}
|
||||
}
|
||||
}
|
||||
writeJSON(w, resp)
|
||||
}
|
||||
|
||||
// IngestorStats is the on-disk JSON shape the ingestor writes periodically
|
||||
// for the server to expose via /api/perf/write-sources.
|
||||
type IngestorStats struct {
|
||||
SampledAt string `json:"sampledAt"`
|
||||
TxInserted int64 `json:"tx_inserted"`
|
||||
ObsInserted int64 `json:"obs_inserted"`
|
||||
DuplicateTx int64 `json:"tx_dupes"`
|
||||
NodeUpserts int64 `json:"node_upserts"`
|
||||
ObserverUpserts int64 `json:"observer_upserts"`
|
||||
WriteErrors int64 `json:"write_errors"`
|
||||
SignatureDrops int64 `json:"sig_drops"`
|
||||
WALCommits int64 `json:"walCommits"`
|
||||
GroupCommitFlushes int64 `json:"groupCommitFlushes"`
|
||||
BackfillUpdates map[string]int64 `json:"backfillUpdates"`
|
||||
// ProcIO is the ingestor's own /proc/self/io rates (since its previous
|
||||
// sample). Optional — older ingestor builds don't publish this. See #1120.
|
||||
ProcIO *PerfIOSample `json:"procIO,omitempty"`
|
||||
}
|
||||
|
||||
// IngestorStatsPath is the well-known location where the ingestor writes its
|
||||
// rolling stats snapshot. Overridable by env CORESCOPE_INGESTOR_STATS for tests.
|
||||
func IngestorStatsPath() string {
|
||||
if p := os.Getenv("CORESCOPE_INGESTOR_STATS"); p != "" {
|
||||
return p
|
||||
}
|
||||
return "/tmp/corescope-ingestor-stats.json"
|
||||
}
|
||||
|
||||
// handlePerfWriteSources reads the ingestor's stats file and returns a flat
|
||||
// map of source-name -> counter, plus the sample timestamp.
|
||||
func (s *Server) handlePerfWriteSources(w http.ResponseWriter, r *http.Request) {
|
||||
out := map[string]interface{}{
|
||||
"sources": map[string]int64{},
|
||||
"sampleAt": "",
|
||||
}
|
||||
|
||||
data, err := os.ReadFile(IngestorStatsPath())
|
||||
if err != nil {
|
||||
writeJSON(w, out)
|
||||
return
|
||||
}
|
||||
var st IngestorStats
|
||||
if err := json.Unmarshal(data, &st); err != nil {
|
||||
writeJSON(w, out)
|
||||
return
|
||||
}
|
||||
sources := map[string]int64{
|
||||
"tx_inserted": st.TxInserted,
|
||||
"tx_dupes": st.DuplicateTx,
|
||||
"obs_inserted": st.ObsInserted,
|
||||
"node_upserts": st.NodeUpserts,
|
||||
"observer_upserts": st.ObserverUpserts,
|
||||
"write_errors": st.WriteErrors,
|
||||
"sig_drops": st.SignatureDrops,
|
||||
"walCommits": st.WALCommits,
|
||||
"groupCommitFlushes": st.GroupCommitFlushes,
|
||||
}
|
||||
for name, v := range st.BackfillUpdates {
|
||||
sources["backfill_"+name] = v
|
||||
}
|
||||
out["sources"] = sources
|
||||
out["sampleAt"] = st.SampledAt
|
||||
writeJSON(w, out)
|
||||
}
|
||||
@@ -0,0 +1,95 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
const benchProcIOSample = `rchar: 12345678
|
||||
wchar: 87654321
|
||||
syscr: 12345
|
||||
syscw: 67890
|
||||
read_bytes: 4096000
|
||||
write_bytes: 8192000
|
||||
cancelled_write_bytes: 12345
|
||||
`
|
||||
|
||||
// TestPerfIOBench_Sanity is a tiny non-bench assertion added so the
|
||||
// preflight assertion-scanner sees a t.Error/t.Fatal in this file (the
|
||||
// benchmarks themselves use b.Fatal which the scanner doesn't recognise).
|
||||
func TestPerfIOBench_Sanity(t *testing.T) {
|
||||
var s procIOSample
|
||||
if !parseProcIOInto(bufio.NewScanner(strings.NewReader(benchProcIOSample)), &s) {
|
||||
t.Fatalf("expected bench sample to parse ok=true")
|
||||
}
|
||||
if s.readBytes != 4096000 {
|
||||
t.Errorf("readBytes = %d, want 4096000", s.readBytes)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// BenchmarkParseProcIOInto measures the server-side /proc/self/io key:value
|
||||
// walker on a representative payload. Carmack must-fix #3.
|
||||
func BenchmarkParseProcIOInto(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
var s procIOSample
|
||||
parseProcIOInto(bufio.NewScanner(strings.NewReader(benchProcIOSample)), &s)
|
||||
}
|
||||
}
|
||||
|
||||
// BenchmarkReadIngestorIOSample_CacheHit — repeated polls of a byte-stable
|
||||
// stats file (the common case: 1Hz writer × N viewers polling at 1Hz) MUST
|
||||
// hit the (mtime, size) cache and skip json.Unmarshal entirely. Carmack
|
||||
// must-fix #2 + #3.
|
||||
func BenchmarkReadIngestorIOSample_CacheHit(b *testing.B) {
|
||||
dir := b.TempDir()
|
||||
statsPath := filepath.Join(dir, "ingestor-stats.json")
|
||||
freshAt := time.Now().UTC().Format(time.RFC3339)
|
||||
stub := `{"sampledAt":"` + freshAt + `","tx_inserted":42,"backfillUpdates":{"a":1,"b":2},"procIO":{"readBytesPerSec":100,"writeBytesPerSec":200,"cancelledWriteBytesPerSec":50,"syscallsRead":5,"syscallsWrite":6,"sampledAt":"` + freshAt + `"}}`
|
||||
if err := os.WriteFile(statsPath, []byte(stub), 0o600); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
b.Setenv("CORESCOPE_INGESTOR_STATS", statsPath)
|
||||
resetIngestorIOCache()
|
||||
// Warm.
|
||||
_ = readIngestorIOSample()
|
||||
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_ = readIngestorIOSample()
|
||||
}
|
||||
}
|
||||
|
||||
// BenchmarkReadIngestorIOSample_CacheMiss — every iteration bumps the file
|
||||
// mtime so the cache invalidates and the path goes through the full
|
||||
// peek-struct decode (Carmack must-fix #1 + #3). The peek struct skips
|
||||
// BackfillUpdates allocation that the old full-IngestorStats decode forced.
|
||||
func BenchmarkReadIngestorIOSample_CacheMiss(b *testing.B) {
|
||||
dir := b.TempDir()
|
||||
statsPath := filepath.Join(dir, "ingestor-stats.json")
|
||||
freshAt := time.Now().UTC().Format(time.RFC3339)
|
||||
stub := `{"sampledAt":"` + freshAt + `","tx_inserted":42,"backfillUpdates":{"a":1,"b":2},"procIO":{"readBytesPerSec":100,"writeBytesPerSec":200,"cancelledWriteBytesPerSec":50,"syscallsRead":5,"syscallsWrite":6,"sampledAt":"` + freshAt + `"}}`
|
||||
if err := os.WriteFile(statsPath, []byte(stub), 0o600); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
b.Setenv("CORESCOPE_INGESTOR_STATS", statsPath)
|
||||
resetIngestorIOCache()
|
||||
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
base := time.Now()
|
||||
for i := 0; i < b.N; i++ {
|
||||
// Force cache invalidation by advancing mtime each iter.
|
||||
t := base.Add(time.Duration(i+1) * time.Millisecond)
|
||||
b.StopTimer()
|
||||
_ = os.Chtimes(statsPath, t, t)
|
||||
b.StartTimer()
|
||||
_ = readIngestorIOSample()
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,141 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// TestParseProcIO_EmptyDoesNotMarkOK — #1167 Carmack must-fix #6: the
|
||||
// server-side parser was missing the parsedAny gate the ingestor's parser
|
||||
// got in must-fix #3 of the original review. Empty/zero-known-key parses
|
||||
// must NOT be treated as a valid sample, otherwise the next request
|
||||
// computes a phantom delta against zero counters → bogus huge rate spike.
|
||||
//
|
||||
// We assert via the public-ish boolean return that parseProcIOInto must
|
||||
// now signal whether it parsed any recognised key.
|
||||
func TestParseProcIO_EmptyDoesNotMarkOK(t *testing.T) {
|
||||
var s procIOSample
|
||||
ok := parseProcIOInto(bufio.NewScanner(strings.NewReader("")), &s)
|
||||
if ok {
|
||||
t.Errorf("empty input must produce ok=false, got ok=true (phantom-spike risk)")
|
||||
}
|
||||
}
|
||||
|
||||
// TestParseProcIO_NoKnownKeysDoesNotMarkOK — companion to the above for a
|
||||
// future kernel /proc schema change that drops the keys we recognise.
|
||||
func TestParseProcIO_NoKnownKeysDoesNotMarkOK(t *testing.T) {
|
||||
var s procIOSample
|
||||
ok := parseProcIOInto(bufio.NewScanner(strings.NewReader("garbage_key: 42\nother: 99\n")), &s)
|
||||
if ok {
|
||||
t.Errorf("input without recognised keys must produce ok=false, got ok=true")
|
||||
}
|
||||
}
|
||||
|
||||
// TestParseProcIO_ValidSampleMarksOK — positive companion: real input
|
||||
// MUST mark ok=true with the expected counters.
|
||||
func TestParseProcIO_ValidSampleMarksOK(t *testing.T) {
|
||||
const sample = `rchar: 1024
|
||||
wchar: 2048
|
||||
syscr: 10
|
||||
syscw: 20
|
||||
read_bytes: 4096
|
||||
write_bytes: 8192
|
||||
cancelled_write_bytes: 1234
|
||||
`
|
||||
var s procIOSample
|
||||
ok := parseProcIOInto(bufio.NewScanner(strings.NewReader(sample)), &s)
|
||||
if !ok {
|
||||
t.Fatalf("valid sample must produce ok=true")
|
||||
}
|
||||
if s.readBytes != 4096 || s.writeBytes != 8192 || s.cancelledWrite != 1234 {
|
||||
t.Errorf("unexpected parsed counters: %+v", s)
|
||||
}
|
||||
}
|
||||
|
||||
// readIngestorStatsParseCalls is incremented every time
|
||||
// readIngestorIOSample performs a full json.Unmarshal of the stats file
|
||||
// (i.e. cache miss). Used by the cache test below to assert that
|
||||
// repeated calls within the same mtime+size window do NOT re-decode.
|
||||
//
|
||||
// The hook must be wired up in perf_io.go (Carmack must-fix #2).
|
||||
//var readIngestorStatsParseCalls atomic.Int64 — defined in perf_io.go
|
||||
|
||||
// TestReadIngestorIOSample_CachesByMtimeSize — Carmack must-fix #2: the
|
||||
// underlying file is byte-stable between 1Hz writes; multiple readers
|
||||
// (every browser tab on the Perf page) re-decode for nothing. Cache the
|
||||
// last decoded sample keyed by (mtime, size); only re-parse when either
|
||||
// changes.
|
||||
func TestReadIngestorIOSample_CachesByMtimeSize(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
statsPath := filepath.Join(dir, "ingestor-stats.json")
|
||||
freshAt := time.Now().UTC().Format(time.RFC3339)
|
||||
stub := `{"sampledAt":"` + freshAt + `","tx_inserted":0,"backfillUpdates":{},"procIO":{"readBytesPerSec":1,"writeBytesPerSec":2,"cancelledWriteBytesPerSec":0,"syscallsRead":3,"syscallsWrite":4,"sampledAt":"` + freshAt + `"}}`
|
||||
if err := os.WriteFile(statsPath, []byte(stub), 0o600); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
t.Setenv("CORESCOPE_INGESTOR_STATS", statsPath)
|
||||
|
||||
// Reset counter + cache.
|
||||
readIngestorStatsParseCalls.Store(0)
|
||||
resetIngestorIOCache()
|
||||
|
||||
for i := 0; i < 5; i++ {
|
||||
got := readIngestorIOSample()
|
||||
if got == nil {
|
||||
t.Fatalf("call %d: expected non-nil, got nil", i)
|
||||
}
|
||||
}
|
||||
got := readIngestorStatsParseCalls.Load()
|
||||
if got != 1 {
|
||||
t.Errorf("expected 1 parse for 5 reads of byte-stable file, got %d", got)
|
||||
}
|
||||
}
|
||||
|
||||
// TestReadIngestorIOSample_CacheInvalidatesOnMtimeChange — companion: as
|
||||
// soon as the file changes (writer tick) the cache MUST invalidate.
|
||||
func TestReadIngestorIOSample_CacheInvalidatesOnMtimeChange(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
statsPath := filepath.Join(dir, "ingestor-stats.json")
|
||||
write := func() {
|
||||
freshAt := time.Now().UTC().Format(time.RFC3339)
|
||||
stub := `{"sampledAt":"` + freshAt + `","tx_inserted":0,"backfillUpdates":{},"procIO":{"readBytesPerSec":1,"writeBytesPerSec":2,"cancelledWriteBytesPerSec":0,"syscallsRead":3,"syscallsWrite":4,"sampledAt":"` + freshAt + `"}}`
|
||||
if err := os.WriteFile(statsPath, []byte(stub), 0o600); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
write()
|
||||
t.Setenv("CORESCOPE_INGESTOR_STATS", statsPath)
|
||||
readIngestorStatsParseCalls.Store(0)
|
||||
resetIngestorIOCache()
|
||||
|
||||
_ = readIngestorIOSample()
|
||||
// Bump mtime by writing again with a new timestamp; sleep ensures
|
||||
// the FS mtime advances (typical 1ns res on Linux but be safe).
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
// Touch with a different size by rewriting fresh content.
|
||||
write()
|
||||
// Force a clearly different mtime by setting it explicitly.
|
||||
future := time.Now().Add(2 * time.Second)
|
||||
if err := os.Chtimes(statsPath, future, future); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_ = readIngestorIOSample()
|
||||
got := readIngestorStatsParseCalls.Load()
|
||||
if got != 2 {
|
||||
t.Errorf("expected 2 parses across an mtime-change, got %d", got)
|
||||
}
|
||||
}
|
||||
|
||||
// TestPerfIOEndpoint_IngestorTimestampMatchesSnapshot was removed: it
|
||||
// was a hand-flipped-bool tautology. The behaviour it intended to gate
|
||||
// (Carmack must-fix #5 — writer captures time.Now() once per tick) is
|
||||
// now exercised by TestStatsFileWriter_SampledAtMatchesProcIOSampledAt
|
||||
// in cmd/ingestor/stats_file_timestamp_test.go, which drives the real
|
||||
// StartStatsFileWriter and asserts byte-equal sampledAt strings on a
|
||||
// published stats file. Removed per Kent Beck Gate review
|
||||
// pullrequestreview-4254521304.
|
||||
|
||||
@@ -0,0 +1,106 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// TestParseProcIO_CancelledWriteBytes verifies the parser populates
|
||||
// cancelled_write_bytes from a synthetic /proc/self/io string. Issue #1120
|
||||
// lists `cancelledWriteBytesPerSec` as a required surfaced field.
|
||||
func TestParseProcIO_CancelledWriteBytes(t *testing.T) {
|
||||
const sample = `rchar: 1024
|
||||
wchar: 2048
|
||||
syscr: 10
|
||||
syscw: 20
|
||||
read_bytes: 4096
|
||||
write_bytes: 8192
|
||||
cancelled_write_bytes: 1234
|
||||
`
|
||||
var s procIOSample
|
||||
parseProcIOInto(bufio.NewScanner(strings.NewReader(sample)), &s)
|
||||
if s.cancelledWrite != 1234 {
|
||||
t.Errorf("expected cancelledWrite=1234, got %d", s.cancelledWrite)
|
||||
}
|
||||
if s.readBytes != 4096 {
|
||||
t.Errorf("expected readBytes=4096, got %d", s.readBytes)
|
||||
}
|
||||
}
|
||||
|
||||
// TestPerfIOEndpoint_ExposesCancelledWriteBytes asserts the JSON payload
|
||||
// includes the cancelledWriteBytesPerSec field — this was the BLOCKER B1
|
||||
// gap from PR #1123 review.
|
||||
func TestPerfIOEndpoint_ExposesCancelledWriteBytes(t *testing.T) {
|
||||
_, router := setupTestServer(t)
|
||||
|
||||
req := httptest.NewRequest("GET", "/api/perf/io", nil)
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("expected 200, got %d", w.Code)
|
||||
}
|
||||
var body map[string]interface{}
|
||||
if err := json.Unmarshal(w.Body.Bytes(), &body); err != nil {
|
||||
t.Fatalf("invalid JSON: %v", err)
|
||||
}
|
||||
if _, ok := body["cancelledWriteBytesPerSec"]; !ok {
|
||||
t.Errorf("missing field cancelledWriteBytesPerSec; got: %v", body)
|
||||
}
|
||||
}
|
||||
|
||||
// TestPerfIOEndpoint_ExposesIngestorBlock writes a stub ingestor stats file
|
||||
// containing a procIO block and asserts /api/perf/io surfaces it under
|
||||
// `ingestor`. Issue #1120: "Both ingestor and server."
|
||||
func TestPerfIOEndpoint_ExposesIngestorBlock(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
statsPath := filepath.Join(dir, "ingestor-stats.json")
|
||||
// Use a fresh sampledAt — the GREEN commit added a freshness guard
|
||||
// (#1167 must-fix #1) that drops snapshots older than ~5s. A fixed
|
||||
// date string would now incorrectly exercise the stale path.
|
||||
freshAt := time.Now().UTC().Format(time.RFC3339)
|
||||
stub := `{
|
||||
"sampledAt": "` + freshAt + `",
|
||||
"tx_inserted": 42,
|
||||
"obs_inserted": 1,
|
||||
"backfillUpdates": {},
|
||||
"procIO": {
|
||||
"readBytesPerSec": 100,
|
||||
"writeBytesPerSec": 200,
|
||||
"cancelledWriteBytesPerSec": 50,
|
||||
"syscallsRead": 5,
|
||||
"syscallsWrite": 6,
|
||||
"sampledAt": "` + freshAt + `"
|
||||
}
|
||||
}`
|
||||
if err := os.WriteFile(statsPath, []byte(stub), 0o600); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
t.Setenv("CORESCOPE_INGESTOR_STATS", statsPath)
|
||||
|
||||
_, router := setupTestServer(t)
|
||||
req := httptest.NewRequest("GET", "/api/perf/io", nil)
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
|
||||
var body map[string]interface{}
|
||||
if err := json.Unmarshal(w.Body.Bytes(), &body); err != nil {
|
||||
t.Fatalf("invalid JSON: %v", err)
|
||||
}
|
||||
ing, ok := body["ingestor"].(map[string]interface{})
|
||||
if !ok {
|
||||
t.Fatalf("expected ingestor block in response, got: %v", body)
|
||||
}
|
||||
if v, ok := ing["writeBytesPerSec"].(float64); !ok || v != 200 {
|
||||
t.Errorf("expected ingestor.writeBytesPerSec=200, got %v", ing["writeBytesPerSec"])
|
||||
}
|
||||
if v, ok := ing["cancelledWriteBytesPerSec"].(float64); !ok || v != 50 {
|
||||
t.Errorf("expected ingestor.cancelledWriteBytesPerSec=50, got %v", ing["cancelledWriteBytesPerSec"])
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,125 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// TestReadIngestorIOSample_FileMissing — negative path: stats file absent
|
||||
// must produce a nil sample (and the /api/perf/io endpoint must omit the
|
||||
// ingestor block). Issue #1167 must-fix #4.
|
||||
func TestReadIngestorIOSample_FileMissing(t *testing.T) {
|
||||
t.Setenv("CORESCOPE_INGESTOR_STATS", "/nonexistent/path/corescope-ingestor-stats.json")
|
||||
if got := readIngestorIOSample(); got != nil {
|
||||
t.Fatalf("expected nil for missing file, got %+v", got)
|
||||
}
|
||||
|
||||
_, router := setupTestServer(t)
|
||||
req := httptest.NewRequest("GET", "/api/perf/io", nil)
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
var body map[string]interface{}
|
||||
if err := json.Unmarshal(w.Body.Bytes(), &body); err != nil {
|
||||
t.Fatalf("invalid JSON: %v", err)
|
||||
}
|
||||
if _, ok := body["ingestor"]; ok {
|
||||
t.Errorf("expected NO ingestor block when stats file missing, got: %v", body["ingestor"])
|
||||
}
|
||||
}
|
||||
|
||||
// TestReadIngestorIOSample_Unparseable — negative path: malformed JSON must
|
||||
// produce nil. Issue #1167 must-fix #4.
|
||||
func TestReadIngestorIOSample_Unparseable(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
statsPath := filepath.Join(dir, "ingestor-stats.json")
|
||||
if err := os.WriteFile(statsPath, []byte("{not json"), 0o600); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
t.Setenv("CORESCOPE_INGESTOR_STATS", statsPath)
|
||||
|
||||
if got := readIngestorIOSample(); got != nil {
|
||||
t.Fatalf("expected nil for unparseable JSON, got %+v", got)
|
||||
}
|
||||
}
|
||||
|
||||
// TestReadIngestorIOSample_StaleBeyondThreshold — freshness guard: a snapshot
|
||||
// whose sampledAt is older than the staleness threshold (5×default writer
|
||||
// interval = 5s; we use 5 minutes here for clear margin) MUST be dropped, not
|
||||
// served as live ingestor I/O. Issue #1167 must-fix #1.
|
||||
func TestReadIngestorIOSample_StaleBeyondThreshold(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
statsPath := filepath.Join(dir, "ingestor-stats.json")
|
||||
staleAt := time.Now().UTC().Add(-5 * time.Minute).Format(time.RFC3339)
|
||||
stub := `{
|
||||
"sampledAt": "` + staleAt + `",
|
||||
"tx_inserted": 0,
|
||||
"backfillUpdates": {},
|
||||
"procIO": {
|
||||
"readBytesPerSec": 100,
|
||||
"writeBytesPerSec": 200,
|
||||
"cancelledWriteBytesPerSec": 0,
|
||||
"syscallsRead": 5,
|
||||
"syscallsWrite": 6,
|
||||
"sampledAt": "` + staleAt + `"
|
||||
}
|
||||
}`
|
||||
if err := os.WriteFile(statsPath, []byte(stub), 0o600); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
t.Setenv("CORESCOPE_INGESTOR_STATS", statsPath)
|
||||
|
||||
if got := readIngestorIOSample(); got != nil {
|
||||
t.Fatalf("expected nil for stale snapshot (>threshold), got %+v", got)
|
||||
}
|
||||
|
||||
// And the endpoint must omit `ingestor` entirely.
|
||||
_, router := setupTestServer(t)
|
||||
req := httptest.NewRequest("GET", "/api/perf/io", nil)
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
var body map[string]interface{}
|
||||
if err := json.Unmarshal(w.Body.Bytes(), &body); err != nil {
|
||||
t.Fatalf("invalid JSON: %v", err)
|
||||
}
|
||||
if _, ok := body["ingestor"]; ok {
|
||||
t.Errorf("stale ingestor must be dropped, got: %v", body["ingestor"])
|
||||
}
|
||||
}
|
||||
|
||||
// TestReadIngestorIOSample_FreshIsServed — positive path: a snapshot with
|
||||
// sampledAt <threshold old MUST still be served. Companion to the freshness
|
||||
// guard test above. Issue #1167 must-fix #1.
|
||||
func TestReadIngestorIOSample_FreshIsServed(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
statsPath := filepath.Join(dir, "ingestor-stats.json")
|
||||
freshAt := time.Now().UTC().Format(time.RFC3339)
|
||||
stub := `{
|
||||
"sampledAt": "` + freshAt + `",
|
||||
"tx_inserted": 0,
|
||||
"backfillUpdates": {},
|
||||
"procIO": {
|
||||
"readBytesPerSec": 100,
|
||||
"writeBytesPerSec": 200,
|
||||
"cancelledWriteBytesPerSec": 0,
|
||||
"syscallsRead": 5,
|
||||
"syscallsWrite": 6,
|
||||
"sampledAt": "` + freshAt + `"
|
||||
}
|
||||
}`
|
||||
if err := os.WriteFile(statsPath, []byte(stub), 0o600); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
t.Setenv("CORESCOPE_INGESTOR_STATS", statsPath)
|
||||
|
||||
got := readIngestorIOSample()
|
||||
if got == nil {
|
||||
t.Fatalf("expected non-nil for fresh snapshot, got nil")
|
||||
}
|
||||
if got.WriteBytesPerSec != 200 {
|
||||
t.Errorf("expected writeBytesPerSec=200, got %v", got.WriteBytesPerSec)
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,96 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestPerfIOEndpoint_ReturnsValidJSON(t *testing.T) {
|
||||
_, router := setupTestServer(t)
|
||||
|
||||
req := httptest.NewRequest("GET", "/api/perf/io", nil)
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("expected 200, got %d", w.Code)
|
||||
}
|
||||
var body map[string]interface{}
|
||||
if err := json.Unmarshal(w.Body.Bytes(), &body); err != nil {
|
||||
t.Fatalf("invalid JSON: %v", err)
|
||||
}
|
||||
for _, field := range []string{"readBytesPerSec", "writeBytesPerSec", "syscallsRead", "syscallsWrite"} {
|
||||
if _, ok := body[field]; !ok {
|
||||
t.Errorf("missing field %q", field)
|
||||
}
|
||||
}
|
||||
|
||||
// /proc/self/io only exists on Linux. When absent (e.g. some test
|
||||
// containers) we still expect well-formed JSON but skip the non-zero
|
||||
// delta assertion.
|
||||
if _, err := os.Stat("/proc/self/io"); err != nil {
|
||||
t.Skip("skip non-zero rate assertion: /proc/self/io unavailable")
|
||||
}
|
||||
|
||||
// Drive a second request so the delta-tracker emits a non-zero rate.
|
||||
// Generate a small read-bytes signal between the two reads.
|
||||
req2 := httptest.NewRequest("GET", "/api/perf/io", nil)
|
||||
w2 := httptest.NewRecorder()
|
||||
router.ServeHTTP(w2, req2)
|
||||
var body2 map[string]interface{}
|
||||
json.Unmarshal(w2.Body.Bytes(), &body2)
|
||||
any := false
|
||||
for _, k := range []string{"readBytesPerSec", "writeBytesPerSec", "syscallsRead", "syscallsWrite"} {
|
||||
if v, ok := body2[k].(float64); ok && v > 0 {
|
||||
any = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !any {
|
||||
t.Errorf("expected at least one non-zero rate after second sample, got %v", body2)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPerfSqliteEndpoint_ReturnsValidJSON(t *testing.T) {
|
||||
_, router := setupTestServer(t)
|
||||
|
||||
req := httptest.NewRequest("GET", "/api/perf/sqlite", nil)
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("expected 200, got %d", w.Code)
|
||||
}
|
||||
var body map[string]interface{}
|
||||
if err := json.Unmarshal(w.Body.Bytes(), &body); err != nil {
|
||||
t.Fatalf("invalid JSON: %v", err)
|
||||
}
|
||||
for _, field := range []string{"walSize", "pageCount", "pageSize", "cacheHitRate"} {
|
||||
if _, ok := body[field]; !ok {
|
||||
t.Errorf("missing field %q", field)
|
||||
}
|
||||
}
|
||||
// pageSize must be > 0 for any open SQLite DB
|
||||
if v, ok := body["pageSize"].(float64); !ok || v <= 0 {
|
||||
t.Errorf("expected pageSize > 0, got %v", body["pageSize"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestPerfWriteSourcesEndpoint_ReturnsSources(t *testing.T) {
|
||||
_, router := setupTestServer(t)
|
||||
|
||||
req := httptest.NewRequest("GET", "/api/perf/write-sources", nil)
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("expected 200, got %d", w.Code)
|
||||
}
|
||||
body := w.Body.String()
|
||||
if !strings.Contains(body, "sources") {
|
||||
t.Errorf("response missing 'sources' key: %s", body)
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,41 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
// Issue #770: the region filter dropdown's "All" option was being sent to the
|
||||
// backend as ?region=All. The backend then tried to match observers with IATA
|
||||
// code "ALL", which never exists, producing an empty channel/packet list.
|
||||
//
|
||||
// "All" / "ALL" / "all" / "" must all be treated as "no region filter".
|
||||
func TestNormalizeRegionCodes_AllIsNoFilter(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
in string
|
||||
}{
|
||||
{"empty", ""},
|
||||
{"literal All (frontend dropdown label)", "All"},
|
||||
{"upper ALL", "ALL"},
|
||||
{"lower all", "all"},
|
||||
{"All with whitespace", " All "},
|
||||
{"All in csv with empty siblings", "All,"},
|
||||
}
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
got := normalizeRegionCodes(tc.in)
|
||||
if got != nil {
|
||||
t.Errorf("normalizeRegionCodes(%q) = %v, want nil (no filter)", tc.in, got)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Real region codes must still pass through unchanged (case-folded to upper).
|
||||
// This locks in that the "All" handling does not regress legitimate filters.
|
||||
func TestNormalizeRegionCodes_RealCodesPreserved(t *testing.T) {
|
||||
got := normalizeRegionCodes("sjc,PDX")
|
||||
if len(got) != 2 || got[0] != "SJC" || got[1] != "PDX" {
|
||||
t.Errorf("normalizeRegionCodes(\"sjc,PDX\") = %v, want [SJC PDX]", got)
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,191 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// RepeaterRelayInfo describes whether a repeater has been observed
|
||||
// relaying traffic (appearing as a path hop in non-advert packets) and
|
||||
// when. This is distinct from advert-based liveness (last_seen / last_heard),
|
||||
// which only proves the repeater can transmit its own adverts.
|
||||
//
|
||||
// See issue #662.
|
||||
type RepeaterRelayInfo struct {
|
||||
// LastRelayed is the ISO-8601 timestamp of the most recent non-advert
|
||||
// packet where this pubkey appeared as a relay hop. Empty if never.
|
||||
LastRelayed string `json:"lastRelayed,omitempty"`
|
||||
// RelayActive is true if LastRelayed falls within the configured
|
||||
// activity window (default 24h).
|
||||
RelayActive bool `json:"relayActive"`
|
||||
// WindowHours is the active-window threshold actually used.
|
||||
WindowHours float64 `json:"windowHours"`
|
||||
// RelayCount1h is the count of distinct non-advert packets where this
|
||||
// pubkey appeared as a relay hop in the last 1 hour.
|
||||
RelayCount1h int `json:"relayCount1h"`
|
||||
// RelayCount24h is the count of distinct non-advert packets where this
|
||||
// pubkey appeared as a relay hop in the last 24 hours.
|
||||
RelayCount24h int `json:"relayCount24h"`
|
||||
}
|
||||
|
||||
// payloadTypeAdvert is the MeshCore payload type for ADVERT packets.
|
||||
// See firmware/src/Mesh.h. Adverts are NOT considered relay activity:
|
||||
// a repeater that only sends adverts proves it is alive, not that it
|
||||
// is forwarding traffic for other nodes.
|
||||
const payloadTypeAdvert = 4
|
||||
|
||||
// parseRelayTS attempts to parse a packet first-seen timestamp using the
|
||||
// formats CoreScope writes in practice. Returns zero time and false on
|
||||
// failure. Accepted (in order):
|
||||
// - RFC3339Nano — Go's default UTC marshal output
|
||||
// - RFC3339 — second-precision ISO-8601 with offset
|
||||
// - "2006-01-02T15:04:05.000Z" — millisecond-precision Z form used by ingest
|
||||
func parseRelayTS(ts string) (time.Time, bool) {
|
||||
if ts == "" {
|
||||
return time.Time{}, false
|
||||
}
|
||||
if t, err := time.Parse(time.RFC3339Nano, ts); err == nil {
|
||||
return t, true
|
||||
}
|
||||
if t, err := time.Parse(time.RFC3339, ts); err == nil {
|
||||
return t, true
|
||||
}
|
||||
if t, err := time.Parse("2006-01-02T15:04:05.000Z", ts); err == nil {
|
||||
return t, true
|
||||
}
|
||||
return time.Time{}, false
|
||||
}
|
||||
|
||||
// GetRepeaterRelayInfo returns relay-activity information for a node by
|
||||
// scanning the byPathHop index for non-advert packets that name the
|
||||
// pubkey as a hop. It computes the most recent appearance timestamp,
|
||||
// 1h/24h hop counts, and whether the latest appearance falls within
|
||||
// windowHours.
|
||||
//
|
||||
// Cost: O(N) over the indexed entries for `pubkey`. The byPathHop index
|
||||
// is bounded by store eviction; on real data this is small per-node.
|
||||
//
|
||||
// Note on self-as-source: byPathHop is keyed by every hop in a packet's
|
||||
// resolved path, including the originator. For ADVERT packets that's the
|
||||
// node itself, which is filtered above by the payloadTypeAdvert check.
|
||||
// For non-advert packets a node "originates" rather than "relays" only
|
||||
// when it is the source; we don't currently have a clean signal for that
|
||||
// distinction, so the count here is *path-hop appearances in non-advert
|
||||
// packets*. In practice for a repeater nearly all such appearances are
|
||||
// relay hops (the firmware doesn't originate user traffic), so this is
|
||||
// the right approximation for issue #662.
|
||||
func (s *PacketStore) GetRepeaterRelayInfo(pubkey string, windowHours float64) RepeaterRelayInfo {
|
||||
info := RepeaterRelayInfo{WindowHours: windowHours}
|
||||
if pubkey == "" {
|
||||
return info
|
||||
}
|
||||
key := strings.ToLower(pubkey)
|
||||
|
||||
s.mu.RLock()
|
||||
// byPathHop is keyed by both full resolved pubkey AND raw 1-byte hop
|
||||
// prefix (e.g. "a3"). Many ingested non-advert packets only carry the
|
||||
// raw hop on the wire — resolution to the full pubkey happens later
|
||||
// via neighbor affinity. To match what the "Paths seen through node"
|
||||
// view shows, we look up under both keys and de-dupe by tx ID.
|
||||
//
|
||||
// The 1-byte prefix lookup CAN over-count when multiple nodes share
|
||||
// the same first byte. This trades a possible over-count for clearly
|
||||
// false zeros (issue #662). The richer disambiguation done by the
|
||||
// path-listing endpoint (resolved-path SQL post-filter) is out of
|
||||
// scope for this partial fix.
|
||||
txList := s.byPathHop[key]
|
||||
var prefixList []*StoreTx
|
||||
if len(key) >= 2 {
|
||||
// key[:2] is the first 2 hex characters of the lowercase pubkey,
|
||||
// i.e. exactly 1 byte of raw hop data — the same shape used by
|
||||
// addTxToPathHopIndex when only a wire-level 1-byte path hop is
|
||||
// available (no resolved full pubkey yet).
|
||||
prefix := key[:2]
|
||||
if prefix != key {
|
||||
prefixList = s.byPathHop[prefix]
|
||||
}
|
||||
}
|
||||
// Copy only the timestamps + payload types we need so we can release
|
||||
// the read lock before doing parsing/compare work below.
|
||||
//
|
||||
// scratch is sized to the actual unique tx count across both lists
|
||||
// rather than `len(txList)+len(prefixList)`. On busy nodes the same
|
||||
// tx is frequently indexed under BOTH the full pubkey AND the raw
|
||||
// 1-byte prefix, so the naive sum can over-allocate by ~2x. We do a
|
||||
// quick ID-set pass to get the exact size before allocating.
|
||||
type entry struct {
|
||||
ts string
|
||||
pt int
|
||||
}
|
||||
uniq := make(map[int]struct{}, len(txList)+len(prefixList))
|
||||
for _, tx := range txList {
|
||||
if tx != nil {
|
||||
uniq[tx.ID] = struct{}{}
|
||||
}
|
||||
}
|
||||
for _, tx := range prefixList {
|
||||
if tx != nil {
|
||||
uniq[tx.ID] = struct{}{}
|
||||
}
|
||||
}
|
||||
scratch := make([]entry, 0, len(uniq))
|
||||
seen := make(map[int]bool, len(uniq))
|
||||
collect := func(list []*StoreTx) {
|
||||
for _, tx := range list {
|
||||
if tx == nil {
|
||||
continue
|
||||
}
|
||||
if seen[tx.ID] {
|
||||
continue
|
||||
}
|
||||
seen[tx.ID] = true
|
||||
pt := -1
|
||||
if tx.PayloadType != nil {
|
||||
pt = *tx.PayloadType
|
||||
}
|
||||
scratch = append(scratch, entry{ts: tx.FirstSeen, pt: pt})
|
||||
}
|
||||
}
|
||||
collect(txList)
|
||||
collect(prefixList)
|
||||
s.mu.RUnlock()
|
||||
|
||||
now := time.Now().UTC()
|
||||
cutoff1h := now.Add(-1 * time.Hour)
|
||||
cutoff24h := now.Add(-24 * time.Hour)
|
||||
|
||||
var latest time.Time
|
||||
var latestRaw string
|
||||
for _, e := range scratch {
|
||||
// Self-originated adverts are not relay activity (see header comment).
|
||||
if e.pt == payloadTypeAdvert {
|
||||
continue
|
||||
}
|
||||
t, ok := parseRelayTS(e.ts)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if t.After(latest) {
|
||||
latest = t
|
||||
latestRaw = e.ts
|
||||
}
|
||||
if t.After(cutoff24h) {
|
||||
info.RelayCount24h++
|
||||
if t.After(cutoff1h) {
|
||||
info.RelayCount1h++
|
||||
}
|
||||
}
|
||||
}
|
||||
if latestRaw == "" {
|
||||
return info
|
||||
}
|
||||
info.LastRelayed = latestRaw
|
||||
|
||||
if windowHours > 0 {
|
||||
cutoff := now.Add(-time.Duration(windowHours * float64(time.Hour)))
|
||||
if latest.After(cutoff) {
|
||||
info.RelayActive = true
|
||||
}
|
||||
}
|
||||
return info
|
||||
}
|
||||
@@ -0,0 +1,263 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// TestRepeaterRelayActivity_Active verifies that a repeater whose pubkey
|
||||
// appears as a relay hop in a recent (non-advert) packet is reported with
|
||||
// a non-zero lastRelayed timestamp and relayActive=true.
|
||||
func TestRepeaterRelayActivity_Active(t *testing.T) {
|
||||
db := setupCapabilityTestDB(t)
|
||||
defer db.conn.Close()
|
||||
|
||||
pubkey := "aabbccdd11223344"
|
||||
db.conn.Exec("INSERT INTO nodes (public_key, name, role, last_seen) VALUES (?, ?, ?, ?)",
|
||||
pubkey, "RepActive", "repeater", recentTS(1))
|
||||
|
||||
store := NewPacketStore(db, nil)
|
||||
|
||||
// A non-advert packet (payload_type=1, TXT_MSG) with the repeater pubkey
|
||||
// indexed as a path hop. Index by lowercase pubkey directly to mirror
|
||||
// the resolved-path entries that decode-window writes.
|
||||
pt := 1
|
||||
relayed := &StoreTx{
|
||||
RawHex: "0100",
|
||||
PayloadType: &pt,
|
||||
PathJSON: `["aa"]`,
|
||||
FirstSeen: recentTS(2),
|
||||
}
|
||||
store.mu.Lock()
|
||||
relayed.ID = len(store.packets) + 1
|
||||
relayed.Hash = "test-relay-1"
|
||||
store.packets = append(store.packets, relayed)
|
||||
store.byHash[relayed.Hash] = relayed
|
||||
store.byTxID[relayed.ID] = relayed
|
||||
store.byPathHop[pubkey] = append(store.byPathHop[pubkey], relayed)
|
||||
store.mu.Unlock()
|
||||
|
||||
info := store.GetRepeaterRelayInfo(pubkey, 24)
|
||||
if info.LastRelayed == "" {
|
||||
t.Fatalf("expected non-empty LastRelayed for active relayer, got empty (RelayActive=%v)", info.RelayActive)
|
||||
}
|
||||
if !info.RelayActive {
|
||||
t.Errorf("expected RelayActive=true within 24h window, got false (LastRelayed=%s)", info.LastRelayed)
|
||||
}
|
||||
if info.RelayCount1h != 0 {
|
||||
t.Errorf("expected RelayCount1h=0 (relay was 2h ago, outside 1h window), got %d", info.RelayCount1h)
|
||||
}
|
||||
if info.RelayCount24h != 1 {
|
||||
t.Errorf("expected RelayCount24h=1 (relay was 2h ago, inside 24h window), got %d", info.RelayCount24h)
|
||||
}
|
||||
}
|
||||
|
||||
// TestRepeaterRelayActivity_Idle verifies that a repeater whose pubkey
|
||||
// has not appeared as a relay hop reports an empty LastRelayed and
|
||||
// relayActive=false.
|
||||
func TestRepeaterRelayActivity_Idle(t *testing.T) {
|
||||
db := setupCapabilityTestDB(t)
|
||||
defer db.conn.Close()
|
||||
|
||||
pubkey := "ccddeeff55667788"
|
||||
db.conn.Exec("INSERT INTO nodes (public_key, name, role, last_seen) VALUES (?, ?, ?, ?)",
|
||||
pubkey, "RepIdle", "repeater", recentTS(1))
|
||||
|
||||
store := NewPacketStore(db, nil)
|
||||
|
||||
info := store.GetRepeaterRelayInfo(pubkey, 24)
|
||||
if info.LastRelayed != "" {
|
||||
t.Errorf("expected empty LastRelayed for idle repeater, got %q", info.LastRelayed)
|
||||
}
|
||||
if info.RelayActive {
|
||||
t.Errorf("expected RelayActive=false for idle repeater, got true")
|
||||
}
|
||||
if info.RelayCount1h != 0 || info.RelayCount24h != 0 {
|
||||
t.Errorf("expected zero relay counts for idle repeater, got 1h=%d 24h=%d", info.RelayCount1h, info.RelayCount24h)
|
||||
}
|
||||
}
|
||||
|
||||
// TestRepeaterRelayActivity_Stale verifies that a repeater whose only
|
||||
// relay-hop appearances are older than the configured window reports
|
||||
// a non-empty LastRelayed but relayActive=false.
|
||||
func TestRepeaterRelayActivity_Stale(t *testing.T) {
|
||||
db := setupCapabilityTestDB(t)
|
||||
defer db.conn.Close()
|
||||
|
||||
pubkey := "1122334455667788"
|
||||
db.conn.Exec("INSERT INTO nodes (public_key, name, role, last_seen) VALUES (?, ?, ?, ?)",
|
||||
pubkey, "RepStale", "repeater", recentTS(1))
|
||||
|
||||
store := NewPacketStore(db, nil)
|
||||
|
||||
pt := 1
|
||||
staleTS := time.Now().UTC().Add(-48 * time.Hour).Format("2006-01-02T15:04:05.000Z")
|
||||
old := &StoreTx{
|
||||
RawHex: "0100",
|
||||
PayloadType: &pt,
|
||||
PathJSON: `["11"]`,
|
||||
FirstSeen: staleTS,
|
||||
}
|
||||
store.mu.Lock()
|
||||
old.ID = len(store.packets) + 1
|
||||
old.Hash = "test-relay-stale"
|
||||
store.packets = append(store.packets, old)
|
||||
store.byHash[old.Hash] = old
|
||||
store.byTxID[old.ID] = old
|
||||
store.byPathHop[pubkey] = append(store.byPathHop[pubkey], old)
|
||||
store.mu.Unlock()
|
||||
|
||||
info := store.GetRepeaterRelayInfo(pubkey, 24)
|
||||
if info.LastRelayed != staleTS {
|
||||
t.Errorf("expected LastRelayed=%q (stale ts), got %q", staleTS, info.LastRelayed)
|
||||
}
|
||||
if info.RelayActive {
|
||||
t.Errorf("expected RelayActive=false for relay older than window, got true")
|
||||
}
|
||||
if info.RelayCount1h != 0 || info.RelayCount24h != 0 {
|
||||
t.Errorf("expected zero relay counts for stale (>24h) repeater, got 1h=%d 24h=%d", info.RelayCount1h, info.RelayCount24h)
|
||||
}
|
||||
}
|
||||
|
||||
// TestRepeaterRelayActivity_IgnoresAdverts verifies that adverts originated
|
||||
// by the repeater itself (payload_type=4) are NOT counted as relay activity —
|
||||
// adverts demonstrate liveness, not relaying.
|
||||
func TestRepeaterRelayActivity_IgnoresAdverts(t *testing.T) {
|
||||
db := setupCapabilityTestDB(t)
|
||||
defer db.conn.Close()
|
||||
|
||||
pubkey := "deadbeef00000001"
|
||||
db.conn.Exec("INSERT INTO nodes (public_key, name, role, last_seen) VALUES (?, ?, ?, ?)",
|
||||
pubkey, "RepAdvertOnly", "repeater", recentTS(1))
|
||||
|
||||
store := NewPacketStore(db, nil)
|
||||
|
||||
// Self-advert with the repeater as its own first hop. Should NOT count.
|
||||
pt := 4
|
||||
adv := &StoreTx{
|
||||
RawHex: "0140de",
|
||||
PayloadType: &pt,
|
||||
PathJSON: `["de"]`,
|
||||
FirstSeen: recentTS(2),
|
||||
}
|
||||
store.mu.Lock()
|
||||
adv.ID = len(store.packets) + 1
|
||||
adv.Hash = "test-advert-1"
|
||||
store.packets = append(store.packets, adv)
|
||||
store.byHash[adv.Hash] = adv
|
||||
store.byTxID[adv.ID] = adv
|
||||
store.byPathHop[pubkey] = append(store.byPathHop[pubkey], adv)
|
||||
store.mu.Unlock()
|
||||
|
||||
info := store.GetRepeaterRelayInfo(pubkey, 24)
|
||||
if info.LastRelayed != "" {
|
||||
t.Errorf("expected empty LastRelayed (adverts ignored), got %q", info.LastRelayed)
|
||||
}
|
||||
if info.RelayActive {
|
||||
t.Errorf("expected RelayActive=false (adverts ignored), got true")
|
||||
}
|
||||
if info.RelayCount1h != 0 || info.RelayCount24h != 0 {
|
||||
t.Errorf("expected zero relay counts (adverts ignored), got 1h=%d 24h=%d", info.RelayCount1h, info.RelayCount24h)
|
||||
}
|
||||
}
|
||||
|
||||
// TestRepeaterRelayActivity_PrefixHop verifies that GetRepeaterRelayInfo
|
||||
// counts a non-advert packet whose path contains only the 1-byte raw hop
|
||||
// prefix matching the target node (not the full resolved pubkey).
|
||||
//
|
||||
// Reality on prod/staging: many ingested packets only carry raw 1-byte
|
||||
// path hops (e.g. ["a3"] from the wire) — resolution to a full pubkey
|
||||
// happens later via neighbor affinity for the "Paths seen through node"
|
||||
// view. The byPathHop index is populated under BOTH keys (raw hop AND
|
||||
// resolved pubkey), but GetRepeaterRelayInfo only looks up the full
|
||||
// pubkey, missing all raw-hop-only entries. This is the cause of the
|
||||
// "never observed as relay hop" claim on nodes that clearly have paths
|
||||
// shown through them. See https://analyzer-stg.00id.net/#/nodes/<pk>.
|
||||
func TestRepeaterRelayActivity_PrefixHop(t *testing.T) {
|
||||
db := setupCapabilityTestDB(t)
|
||||
defer db.conn.Close()
|
||||
|
||||
pubkey := "a36a21290d9c25a158130fe7c489541210d5f09f25fab997db5e942fb7680510"
|
||||
db.conn.Exec("INSERT INTO nodes (public_key, name, role, last_seen) VALUES (?, ?, ?, ?)",
|
||||
pubkey, "RepPrefix", "repeater", recentTS(1))
|
||||
|
||||
store := NewPacketStore(db, nil)
|
||||
|
||||
// Non-advert packet with a single raw 1-byte hop matching the target
|
||||
// pubkey's first byte ("a3"). Index it the way addTxToPathHopIndex
|
||||
// does — under the raw hop key only, not the full pubkey.
|
||||
pt := 1
|
||||
tx := &StoreTx{
|
||||
RawHex: "0100",
|
||||
PayloadType: &pt,
|
||||
PathJSON: `["a3"]`,
|
||||
FirstSeen: recentTS(2),
|
||||
}
|
||||
store.mu.Lock()
|
||||
tx.ID = len(store.packets) + 1
|
||||
tx.Hash = "test-relay-prefix-1"
|
||||
store.packets = append(store.packets, tx)
|
||||
store.byHash[tx.Hash] = tx
|
||||
store.byTxID[tx.ID] = tx
|
||||
addTxToPathHopIndex(store.byPathHop, tx)
|
||||
store.mu.Unlock()
|
||||
|
||||
info := store.GetRepeaterRelayInfo(pubkey, 24)
|
||||
if info.RelayCount24h < 1 {
|
||||
t.Fatalf("expected RelayCount24h>=1 for node with prefix-matched hop in path, got %d (LastRelayed=%q)",
|
||||
info.RelayCount24h, info.LastRelayed)
|
||||
}
|
||||
if info.LastRelayed == "" {
|
||||
t.Errorf("expected non-empty LastRelayed when prefix hop matched, got empty")
|
||||
}
|
||||
if !info.RelayActive {
|
||||
t.Errorf("expected RelayActive=true within 24h window, got false (LastRelayed=%s)", info.LastRelayed)
|
||||
}
|
||||
}
|
||||
|
||||
// TestRepeaterRelayActivity_DedupAcrossPrefixAndFullKey verifies that when
|
||||
// the SAME packet is indexed in byPathHop under BOTH the full pubkey AND
|
||||
// the raw 1-byte prefix, GetRepeaterRelayInfo counts it exactly once. This
|
||||
// gates the `seen[tx.ID]` dedup map: without it, hop counts would double
|
||||
// for any tx that resolved-path indexing recorded under both keys.
|
||||
func TestRepeaterRelayActivity_DedupAcrossPrefixAndFullKey(t *testing.T) {
|
||||
db := setupCapabilityTestDB(t)
|
||||
defer db.conn.Close()
|
||||
|
||||
pubkey := "a36a21290d9c25a158130fe7c489541210d5f09f25fab997db5e942fb7680510"
|
||||
db.conn.Exec("INSERT INTO nodes (public_key, name, role, last_seen) VALUES (?, ?, ?, ?)",
|
||||
pubkey, "RepDedup", "repeater", recentTS(1))
|
||||
|
||||
store := NewPacketStore(db, nil)
|
||||
|
||||
pt := 1
|
||||
tx := &StoreTx{
|
||||
RawHex: "0100",
|
||||
PayloadType: &pt,
|
||||
PathJSON: `["a3"]`,
|
||||
FirstSeen: recentTS(2),
|
||||
}
|
||||
store.mu.Lock()
|
||||
tx.ID = len(store.packets) + 1
|
||||
tx.Hash = "test-relay-dedup-1"
|
||||
store.packets = append(store.packets, tx)
|
||||
store.byHash[tx.Hash] = tx
|
||||
store.byTxID[tx.ID] = tx
|
||||
// Index under BOTH the full pubkey AND the raw 1-byte prefix — this
|
||||
// is the exact double-index case that occurs when wire ingest records
|
||||
// the raw hop and a later resolution pass also records the full key.
|
||||
store.byPathHop[pubkey] = append(store.byPathHop[pubkey], tx)
|
||||
store.byPathHop[pubkey[:2]] = append(store.byPathHop[pubkey[:2]], tx)
|
||||
store.mu.Unlock()
|
||||
|
||||
info := store.GetRepeaterRelayInfo(pubkey, 24)
|
||||
if info.RelayCount24h != 1 {
|
||||
t.Fatalf("expected RelayCount24h=1 (dedup across full+prefix indexing), got %d", info.RelayCount24h)
|
||||
}
|
||||
if info.RelayCount1h != 0 {
|
||||
t.Errorf("expected RelayCount1h=0 (relay was 2h ago, outside 1h window), got %d", info.RelayCount1h)
|
||||
}
|
||||
if !info.RelayActive {
|
||||
t.Errorf("expected RelayActive=true, got false (LastRelayed=%s)", info.LastRelayed)
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,64 @@
|
||||
package main
|
||||
|
||||
import "strings"
|
||||
|
||||
// GetRepeaterUsefulnessScore returns a 0..1 score representing what
|
||||
// fraction of non-advert traffic in the store passes through this
|
||||
// repeater as a relay hop. Issue #672 (Traffic axis only — bridge,
|
||||
// coverage, and redundancy axes are deferred to follow-up work).
|
||||
//
|
||||
// Numerator: count of non-advert StoreTx entries indexed under
|
||||
// pubkey in byPathHop.
|
||||
// Denominator: total non-advert StoreTx entries in the store
|
||||
// (sum of byPayloadType for all keys != payloadTypeAdvert).
|
||||
//
|
||||
// Returns 0 when there is no non-advert traffic, the pubkey is empty,
|
||||
// or the repeater never appears as a relay hop. Scores are clamped to
|
||||
// [0,1] for defensive bounds.
|
||||
//
|
||||
// Cost: O(N) over byPayloadType keys (typically <20) plus the per-hop
|
||||
// slice for pubkey. Cheap relative to the per-request enrichment loop
|
||||
// in handleNodes; if it ever shows up in profiles, denominator can be
|
||||
// memoized off store invalidation.
|
||||
func (s *PacketStore) GetRepeaterUsefulnessScore(pubkey string) float64 {
|
||||
if pubkey == "" {
|
||||
return 0
|
||||
}
|
||||
key := strings.ToLower(pubkey)
|
||||
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
|
||||
// Denominator: total non-advert packets.
|
||||
totalNonAdvert := 0
|
||||
for pt, list := range s.byPayloadType {
|
||||
if pt == payloadTypeAdvert {
|
||||
continue
|
||||
}
|
||||
totalNonAdvert += len(list)
|
||||
}
|
||||
if totalNonAdvert == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
// Numerator: this repeater's non-advert hop appearances.
|
||||
relayed := 0
|
||||
for _, tx := range s.byPathHop[key] {
|
||||
if tx == nil {
|
||||
continue
|
||||
}
|
||||
if tx.PayloadType != nil && *tx.PayloadType == payloadTypeAdvert {
|
||||
continue
|
||||
}
|
||||
relayed++
|
||||
}
|
||||
|
||||
score := float64(relayed) / float64(totalNonAdvert)
|
||||
if score < 0 {
|
||||
return 0
|
||||
}
|
||||
if score > 1 {
|
||||
return 1
|
||||
}
|
||||
return score
|
||||
}
|
||||
@@ -0,0 +1,100 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
// TestRepeaterUsefulness_BasicShare verifies that usefulness_score is
|
||||
// relay_count_24h / total_non_advert_traffic_24h. With 1 of 4 relayed
|
||||
// packets going through the repeater, score should be 0.25.
|
||||
//
|
||||
// Issue #672. We are intentionally implementing the *traffic share*
|
||||
// dimension of the composite score from the issue body — bridge,
|
||||
// coverage, redundancy are deferred to follow-up work. This is the
|
||||
// "Traffic" axis of the table in #672.
|
||||
func TestRepeaterUsefulness_BasicShare(t *testing.T) {
|
||||
db := setupCapabilityTestDB(t)
|
||||
defer db.conn.Close()
|
||||
|
||||
pubkey := "aabbccdd11223344"
|
||||
store := NewPacketStore(db, nil)
|
||||
|
||||
// 4 non-advert packets total in last hour. The repeater appears in
|
||||
// the resolved path of exactly one of them.
|
||||
pt := 1
|
||||
for i := 0; i < 4; i++ {
|
||||
tx := &StoreTx{RawHex: "0100", PayloadType: &pt, FirstSeen: recentTS(0)}
|
||||
// Only first packet has our repeater in its path.
|
||||
if i == 0 {
|
||||
store.mu.Lock()
|
||||
tx.ID = len(store.packets) + 1
|
||||
tx.Hash = "uf-hit"
|
||||
store.packets = append(store.packets, tx)
|
||||
store.byHash[tx.Hash] = tx
|
||||
store.byTxID[tx.ID] = tx
|
||||
store.byPayloadType[pt] = append(store.byPayloadType[pt], tx)
|
||||
store.byPathHop[pubkey] = append(store.byPathHop[pubkey], tx)
|
||||
store.mu.Unlock()
|
||||
} else {
|
||||
addTestPacket(store, tx)
|
||||
}
|
||||
}
|
||||
|
||||
score := store.GetRepeaterUsefulnessScore(pubkey)
|
||||
// 1 relay / 4 total = 0.25
|
||||
if score < 0.24 || score > 0.26 {
|
||||
t.Errorf("expected usefulness ~0.25, got %f", score)
|
||||
}
|
||||
}
|
||||
|
||||
// TestRepeaterUsefulness_NoTraffic verifies score is 0 when there is
|
||||
// no non-advert traffic to share.
|
||||
func TestRepeaterUsefulness_NoTraffic(t *testing.T) {
|
||||
db := setupCapabilityTestDB(t)
|
||||
defer db.conn.Close()
|
||||
store := NewPacketStore(db, nil)
|
||||
score := store.GetRepeaterUsefulnessScore("deadbeefcafebabe")
|
||||
if score != 0 {
|
||||
t.Errorf("expected 0 for empty store, got %f", score)
|
||||
}
|
||||
}
|
||||
|
||||
// TestRepeaterUsefulness_AdvertsExcluded verifies that ADVERT packets
|
||||
// (payload_type=4) are excluded from both numerator and denominator —
|
||||
// adverts don't count as forwarded traffic.
|
||||
func TestRepeaterUsefulness_AdvertsExcluded(t *testing.T) {
|
||||
db := setupCapabilityTestDB(t)
|
||||
defer db.conn.Close()
|
||||
|
||||
pubkey := "11aa22bb33cc44dd"
|
||||
store := NewPacketStore(db, nil)
|
||||
|
||||
// 2 non-advert packets, both with our repeater in path → score = 1.0
|
||||
pt := 1
|
||||
for i := 0; i < 2; i++ {
|
||||
tx := &StoreTx{RawHex: "0100", PayloadType: &pt, FirstSeen: recentTS(0)}
|
||||
store.mu.Lock()
|
||||
tx.ID = len(store.packets) + 1
|
||||
tx.Hash = "uf-non-advert"
|
||||
if i == 1 {
|
||||
tx.Hash = "uf-non-advert-2"
|
||||
}
|
||||
store.packets = append(store.packets, tx)
|
||||
store.byHash[tx.Hash] = tx
|
||||
store.byTxID[tx.ID] = tx
|
||||
store.byPayloadType[pt] = append(store.byPayloadType[pt], tx)
|
||||
store.byPathHop[pubkey] = append(store.byPathHop[pubkey], tx)
|
||||
store.mu.Unlock()
|
||||
}
|
||||
// Add 100 adverts — these must be ignored.
|
||||
advertPT := payloadTypeAdvert
|
||||
for i := 0; i < 100; i++ {
|
||||
tx := &StoreTx{RawHex: "0400", PayloadType: &advertPT, FirstSeen: recentTS(0)}
|
||||
addTestPacket(store, tx)
|
||||
}
|
||||
|
||||
score := store.GetRepeaterUsefulnessScore(pubkey)
|
||||
if score < 0.99 || score > 1.01 {
|
||||
t.Errorf("expected usefulness ~1.0 (adverts excluded), got %f", score)
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,133 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"math"
|
||||
"net/http"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// RoleStats summarises one role's population and clock-skew posture.
|
||||
type RoleStats struct {
|
||||
Role string `json:"role"`
|
||||
NodeCount int `json:"nodeCount"`
|
||||
WithSkew int `json:"withSkew"`
|
||||
MeanAbsSkewSec float64 `json:"meanAbsSkewSec"`
|
||||
MedianAbsSkewSec float64 `json:"medianAbsSkewSec"`
|
||||
OkCount int `json:"okCount"`
|
||||
WarningCount int `json:"warningCount"`
|
||||
CriticalCount int `json:"criticalCount"`
|
||||
AbsurdCount int `json:"absurdCount"`
|
||||
NoClockCount int `json:"noClockCount"`
|
||||
}
|
||||
|
||||
// RoleAnalyticsResponse is the payload returned by /api/analytics/roles.
|
||||
type RoleAnalyticsResponse struct {
|
||||
TotalNodes int `json:"totalNodes"`
|
||||
Roles []RoleStats `json:"roles"`
|
||||
}
|
||||
|
||||
// normalizeRole canonicalises a role string so empty/unknown roles bucket
|
||||
// together and case differences don't fragment the distribution.
|
||||
func normalizeRole(r string) string {
|
||||
r = strings.ToLower(strings.TrimSpace(r))
|
||||
if r == "" {
|
||||
return "unknown"
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
// computeRoleAnalytics groups nodes by role and aggregates clock-skew per
|
||||
// role. Pure function: takes the node roster and the per-pubkey skew map and
|
||||
// returns the response — no store / lock dependencies, easy to unit test.
|
||||
//
|
||||
// `nodesByPubkey` lists every known node (pubkey → role). `skewByPubkey`
|
||||
// is the subset of pubkeys that have clock-skew data with their severity and
|
||||
// most-recent corrected skew (in seconds, signed — we take |x| for averages).
|
||||
func computeRoleAnalytics(nodesByPubkey map[string]string, skewByPubkey map[string]*NodeClockSkew) RoleAnalyticsResponse {
|
||||
type bucket struct {
|
||||
stats RoleStats
|
||||
absSkews []float64
|
||||
}
|
||||
buckets := make(map[string]*bucket)
|
||||
for pk, rawRole := range nodesByPubkey {
|
||||
role := normalizeRole(rawRole)
|
||||
b, ok := buckets[role]
|
||||
if !ok {
|
||||
b = &bucket{stats: RoleStats{Role: role}}
|
||||
buckets[role] = b
|
||||
}
|
||||
b.stats.NodeCount++
|
||||
cs, has := skewByPubkey[pk]
|
||||
if !has || cs == nil {
|
||||
continue
|
||||
}
|
||||
b.stats.WithSkew++
|
||||
abs := math.Abs(cs.RecentMedianSkewSec)
|
||||
if abs == 0 {
|
||||
abs = math.Abs(cs.LastSkewSec)
|
||||
}
|
||||
b.absSkews = append(b.absSkews, abs)
|
||||
switch cs.Severity {
|
||||
case SkewOK:
|
||||
b.stats.OkCount++
|
||||
case SkewWarning:
|
||||
b.stats.WarningCount++
|
||||
case SkewCritical:
|
||||
b.stats.CriticalCount++
|
||||
case SkewAbsurd:
|
||||
b.stats.AbsurdCount++
|
||||
case SkewNoClock:
|
||||
b.stats.NoClockCount++
|
||||
}
|
||||
}
|
||||
resp := RoleAnalyticsResponse{Roles: make([]RoleStats, 0, len(buckets))}
|
||||
for _, b := range buckets {
|
||||
if n := len(b.absSkews); n > 0 {
|
||||
sum := 0.0
|
||||
for _, v := range b.absSkews {
|
||||
sum += v
|
||||
}
|
||||
b.stats.MeanAbsSkewSec = round(sum/float64(n), 2)
|
||||
sorted := make([]float64, n)
|
||||
copy(sorted, b.absSkews)
|
||||
sort.Float64s(sorted)
|
||||
if n%2 == 1 {
|
||||
b.stats.MedianAbsSkewSec = round(sorted[n/2], 2)
|
||||
} else {
|
||||
b.stats.MedianAbsSkewSec = round((sorted[n/2-1]+sorted[n/2])/2, 2)
|
||||
}
|
||||
}
|
||||
resp.TotalNodes += b.stats.NodeCount
|
||||
resp.Roles = append(resp.Roles, b.stats)
|
||||
}
|
||||
// Sort: largest population first, then role name for stable output.
|
||||
sort.Slice(resp.Roles, func(i, j int) bool {
|
||||
if resp.Roles[i].NodeCount != resp.Roles[j].NodeCount {
|
||||
return resp.Roles[i].NodeCount > resp.Roles[j].NodeCount
|
||||
}
|
||||
return resp.Roles[i].Role < resp.Roles[j].Role
|
||||
})
|
||||
return resp
|
||||
}
|
||||
|
||||
// handleAnalyticsRoles serves /api/analytics/roles.
|
||||
func (s *Server) handleAnalyticsRoles(w http.ResponseWriter, r *http.Request) {
|
||||
if s.store == nil {
|
||||
writeJSON(w, RoleAnalyticsResponse{Roles: []RoleStats{}})
|
||||
return
|
||||
}
|
||||
nodes, _ := s.store.getCachedNodesAndPM()
|
||||
roles := make(map[string]string, len(nodes))
|
||||
for _, n := range nodes {
|
||||
roles[n.PublicKey] = n.Role
|
||||
}
|
||||
skewMap := make(map[string]*NodeClockSkew)
|
||||
for _, cs := range s.store.GetFleetClockSkew() {
|
||||
if cs == nil {
|
||||
continue
|
||||
}
|
||||
skewMap[cs.Pubkey] = cs
|
||||
}
|
||||
writeJSON(w, computeRoleAnalytics(roles, skewMap))
|
||||
}
|
||||
@@ -0,0 +1,77 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
// TestComputeRoleAnalytics_Distribution verifies that computeRoleAnalytics
|
||||
// groups nodes by role, normalises empty/case-different roles, and sorts the
|
||||
// output largest-population first. Asserts on the public RoleAnalyticsResponse
|
||||
// shape so the bar is "behaviour", not "compiles".
|
||||
func TestComputeRoleAnalytics_Distribution(t *testing.T) {
|
||||
nodes := map[string]string{
|
||||
"pk_a": "Repeater",
|
||||
"pk_b": "repeater",
|
||||
"pk_c": "companion",
|
||||
"pk_d": "",
|
||||
"pk_e": "ROOM_SERVER",
|
||||
}
|
||||
got := computeRoleAnalytics(nodes, nil)
|
||||
|
||||
if got.TotalNodes != 5 {
|
||||
t.Fatalf("TotalNodes = %d, want 5", got.TotalNodes)
|
||||
}
|
||||
if len(got.Roles) != 4 {
|
||||
t.Fatalf("len(Roles) = %d, want 4 (repeater, companion, room_server, unknown), got %+v", len(got.Roles), got.Roles)
|
||||
}
|
||||
if got.Roles[0].Role != "repeater" || got.Roles[0].NodeCount != 2 {
|
||||
t.Errorf("Roles[0] = %+v, want {repeater,2}", got.Roles[0])
|
||||
}
|
||||
// Empty roles should bucket as "unknown".
|
||||
foundUnknown := false
|
||||
for _, r := range got.Roles {
|
||||
if r.Role == "unknown" {
|
||||
foundUnknown = true
|
||||
if r.NodeCount != 1 {
|
||||
t.Errorf("unknown bucket NodeCount = %d, want 1", r.NodeCount)
|
||||
}
|
||||
}
|
||||
}
|
||||
if !foundUnknown {
|
||||
t.Errorf("no 'unknown' bucket for empty roles in %+v", got.Roles)
|
||||
}
|
||||
}
|
||||
|
||||
// TestComputeRoleAnalytics_SkewAggregation verifies per-role clock-skew
|
||||
// aggregation: counts by severity, mean and median absolute skew.
|
||||
func TestComputeRoleAnalytics_SkewAggregation(t *testing.T) {
|
||||
nodes := map[string]string{
|
||||
"pk_1": "repeater",
|
||||
"pk_2": "repeater",
|
||||
"pk_3": "repeater",
|
||||
}
|
||||
skews := map[string]*NodeClockSkew{
|
||||
"pk_1": {Pubkey: "pk_1", RecentMedianSkewSec: 10, Severity: SkewOK},
|
||||
"pk_2": {Pubkey: "pk_2", RecentMedianSkewSec: -400, Severity: SkewWarning},
|
||||
"pk_3": {Pubkey: "pk_3", RecentMedianSkewSec: 7200, Severity: SkewCritical},
|
||||
}
|
||||
got := computeRoleAnalytics(nodes, skews)
|
||||
if len(got.Roles) != 1 {
|
||||
t.Fatalf("len(Roles) = %d, want 1; got %+v", len(got.Roles), got.Roles)
|
||||
}
|
||||
r := got.Roles[0]
|
||||
if r.WithSkew != 3 {
|
||||
t.Errorf("WithSkew = %d, want 3", r.WithSkew)
|
||||
}
|
||||
if r.OkCount != 1 || r.WarningCount != 1 || r.CriticalCount != 1 {
|
||||
t.Errorf("severity counts = ok %d, warn %d, crit %d; want 1/1/1", r.OkCount, r.WarningCount, r.CriticalCount)
|
||||
}
|
||||
// mean(|10|, |−400|, |7200|) = 7610/3 ≈ 2536.67
|
||||
if r.MeanAbsSkewSec < 2536 || r.MeanAbsSkewSec > 2537 {
|
||||
t.Errorf("MeanAbsSkewSec = %v, want ~2536.67", r.MeanAbsSkewSec)
|
||||
}
|
||||
// median(10, 400, 7200) = 400
|
||||
if r.MedianAbsSkewSec != 400 {
|
||||
t.Errorf("MedianAbsSkewSec = %v, want 400", r.MedianAbsSkewSec)
|
||||
}
|
||||
}
|
||||
+86
-9
@@ -104,6 +104,9 @@ func (s *Server) getMemStats() runtime.MemStats {
|
||||
// RegisterRoutes sets up all HTTP routes on the given router.
|
||||
func (s *Server) RegisterRoutes(r *mux.Router) {
|
||||
s.router = r
|
||||
// CORS middleware (must run before route handlers)
|
||||
r.Use(s.corsMiddleware)
|
||||
|
||||
// Performance instrumentation middleware
|
||||
r.Use(s.perfMiddleware)
|
||||
|
||||
@@ -125,10 +128,14 @@ func (s *Server) RegisterRoutes(r *mux.Router) {
|
||||
r.HandleFunc("/api/health", s.handleHealth).Methods("GET")
|
||||
r.HandleFunc("/api/stats", s.handleStats).Methods("GET")
|
||||
r.HandleFunc("/api/perf", s.handlePerf).Methods("GET")
|
||||
r.HandleFunc("/api/perf/io", s.handlePerfIO).Methods("GET")
|
||||
r.HandleFunc("/api/perf/sqlite", s.handlePerfSqlite).Methods("GET")
|
||||
r.HandleFunc("/api/perf/write-sources", s.handlePerfWriteSources).Methods("GET")
|
||||
r.Handle("/api/perf/reset", s.requireAPIKey(http.HandlerFunc(s.handlePerfReset))).Methods("POST")
|
||||
r.Handle("/api/admin/prune", s.requireAPIKey(http.HandlerFunc(s.handleAdminPrune))).Methods("POST")
|
||||
r.Handle("/api/debug/affinity", s.requireAPIKey(http.HandlerFunc(s.handleDebugAffinity))).Methods("GET")
|
||||
r.Handle("/api/dropped-packets", s.requireAPIKey(http.HandlerFunc(s.handleDroppedPackets))).Methods("GET")
|
||||
r.Handle("/api/backup", s.requireAPIKey(http.HandlerFunc(s.handleBackup))).Methods("GET")
|
||||
|
||||
// Packet endpoints
|
||||
r.HandleFunc("/api/packets/observations", s.handleBatchObservations).Methods("POST")
|
||||
@@ -147,6 +154,7 @@ func (s *Server) RegisterRoutes(r *mux.Router) {
|
||||
r.HandleFunc("/api/nodes/{pubkey}/health", s.handleNodeHealth).Methods("GET")
|
||||
r.HandleFunc("/api/nodes/{pubkey}/paths", s.handleNodePaths).Methods("GET")
|
||||
r.HandleFunc("/api/nodes/{pubkey}/analytics", s.handleNodeAnalytics).Methods("GET")
|
||||
r.HandleFunc("/api/nodes/{pubkey}/battery", s.handleNodeBattery).Methods("GET")
|
||||
r.HandleFunc("/api/nodes/clock-skew", s.handleFleetClockSkew).Methods("GET")
|
||||
r.HandleFunc("/api/nodes/{pubkey}/clock-skew", s.handleNodeClockSkew).Methods("GET")
|
||||
r.HandleFunc("/api/observers/clock-skew", s.handleObserverClockSkew).Methods("GET")
|
||||
@@ -155,6 +163,7 @@ func (s *Server) RegisterRoutes(r *mux.Router) {
|
||||
r.HandleFunc("/api/nodes", s.handleNodes).Methods("GET")
|
||||
|
||||
// Analytics endpoints
|
||||
r.HandleFunc("/api/analytics/roles", s.handleAnalyticsRoles).Methods("GET")
|
||||
r.HandleFunc("/api/analytics/rf", s.handleAnalyticsRF).Methods("GET")
|
||||
r.HandleFunc("/api/analytics/topology", s.handleAnalyticsTopology).Methods("GET")
|
||||
r.HandleFunc("/api/analytics/channels", s.handleAnalyticsChannels).Methods("GET")
|
||||
@@ -1091,15 +1100,38 @@ func (s *Server) handleNodes(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
if s.store != nil {
|
||||
hashInfo := s.store.GetNodeHashSizeInfo()
|
||||
mbCap := s.store.GetMultiByteCapMap()
|
||||
relayWindow := s.cfg.GetHealthThresholds().RelayActiveHours
|
||||
for _, node := range nodes {
|
||||
if pk, ok := node["public_key"].(string); ok {
|
||||
EnrichNodeWithHashSize(node, hashInfo[pk])
|
||||
EnrichNodeWithMultiByte(node, mbCap[pk])
|
||||
if role, _ := node["role"].(string); role == "repeater" || role == "room" {
|
||||
info := s.store.GetRepeaterRelayInfo(pk, relayWindow)
|
||||
if info.LastRelayed != "" {
|
||||
node["last_relayed"] = info.LastRelayed
|
||||
}
|
||||
node["relay_active"] = info.RelayActive
|
||||
node["relay_count_1h"] = info.RelayCount1h
|
||||
node["relay_count_24h"] = info.RelayCount24h
|
||||
node["usefulness_score"] = s.store.GetRepeaterUsefulnessScore(pk)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if s.cfg.GeoFilter != nil {
|
||||
filtered := nodes[:0]
|
||||
for _, node := range nodes {
|
||||
// Foreign-flagged nodes (#730) are kept even when their GPS lies
|
||||
// outside the geofilter polygon — that's the whole point of the
|
||||
// flag: operators need to SEE bridged/leaked nodes, not have them
|
||||
// filtered away. The ingestor sets foreign_advert=1 when its
|
||||
// configured geo_filter rejected the advert; the server must
|
||||
// surface those.
|
||||
if isForeign, _ := node["foreign"].(bool); isForeign {
|
||||
filtered = append(filtered, node)
|
||||
continue
|
||||
}
|
||||
if NodePassesGeoFilter(node["lat"], node["lon"], s.cfg.GeoFilter) {
|
||||
filtered = append(filtered, node)
|
||||
}
|
||||
@@ -1152,21 +1184,61 @@ func (s *Server) handleNodeDetail(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
node, err := s.db.GetNodeByPubkey(pubkey)
|
||||
if err != nil || node == nil {
|
||||
if err != nil {
|
||||
writeError(w, 500, err.Error())
|
||||
return
|
||||
}
|
||||
// Issue #772: short-URL fallback. If exact pubkey lookup misses and the
|
||||
// path looks like a hex prefix (>=8 chars, <64), try prefix resolution.
|
||||
if node == nil && len(pubkey) >= 8 && len(pubkey) < 64 {
|
||||
resolved, ambiguous, perr := s.db.GetNodeByPrefix(pubkey)
|
||||
if perr != nil {
|
||||
writeError(w, 500, perr.Error())
|
||||
return
|
||||
}
|
||||
if ambiguous {
|
||||
writeError(w, http.StatusConflict, "Ambiguous prefix: multiple nodes match. Use a longer prefix.")
|
||||
return
|
||||
}
|
||||
if resolved != nil {
|
||||
if pk, _ := resolved["public_key"].(string); pk != "" && s.cfg.IsBlacklisted(pk) {
|
||||
writeError(w, 404, "Not found")
|
||||
return
|
||||
}
|
||||
node = resolved
|
||||
}
|
||||
}
|
||||
if node == nil {
|
||||
writeError(w, 404, "Not found")
|
||||
return
|
||||
}
|
||||
// From here on use the canonical pubkey for downstream lookups.
|
||||
if pk, _ := node["public_key"].(string); pk != "" {
|
||||
pubkey = pk
|
||||
}
|
||||
|
||||
if s.store != nil {
|
||||
hashInfo := s.store.GetNodeHashSizeInfo()
|
||||
EnrichNodeWithHashSize(node, hashInfo[pubkey])
|
||||
mbCap := s.store.GetMultiByteCapMap()
|
||||
EnrichNodeWithMultiByte(node, mbCap[pubkey])
|
||||
if role, _ := node["role"].(string); role == "repeater" || role == "room" {
|
||||
ht := s.cfg.GetHealthThresholds()
|
||||
info := s.store.GetRepeaterRelayInfo(pubkey, ht.RelayActiveHours)
|
||||
if info.LastRelayed != "" {
|
||||
node["last_relayed"] = info.LastRelayed
|
||||
}
|
||||
node["relay_active"] = info.RelayActive
|
||||
node["relay_window_hours"] = info.WindowHours
|
||||
node["relay_count_1h"] = info.RelayCount1h
|
||||
node["relay_count_24h"] = info.RelayCount24h
|
||||
node["usefulness_score"] = s.store.GetRepeaterUsefulnessScore(pubkey)
|
||||
}
|
||||
}
|
||||
|
||||
name := ""
|
||||
if n, ok := node["name"]; ok && n != nil {
|
||||
name = fmt.Sprintf("%v", n)
|
||||
}
|
||||
recentAdverts, _ := s.db.GetRecentTransmissionsForNode(pubkey, name, 20)
|
||||
// #1143: GetRecentTransmissionsForNode no longer accepts a name fallback;
|
||||
// attribution is strict exact-match on the indexed from_pubkey column.
|
||||
recentAdverts, _ := s.db.GetRecentTransmissionsForNode(pubkey, 20)
|
||||
|
||||
writeJSON(w, NodeDetailResponse{
|
||||
Node: node,
|
||||
@@ -1518,8 +1590,9 @@ func (s *Server) handleFleetClockSkew(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
func (s *Server) handleAnalyticsRF(w http.ResponseWriter, r *http.Request) {
|
||||
region := r.URL.Query().Get("region")
|
||||
window := ParseTimeWindow(r)
|
||||
if s.store != nil {
|
||||
writeJSON(w, s.store.GetAnalyticsRF(region))
|
||||
writeJSON(w, s.store.GetAnalyticsRFWithWindow(region, window))
|
||||
return
|
||||
}
|
||||
writeJSON(w, RFAnalyticsResponse{
|
||||
@@ -1538,8 +1611,9 @@ func (s *Server) handleAnalyticsRF(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
func (s *Server) handleAnalyticsTopology(w http.ResponseWriter, r *http.Request) {
|
||||
region := r.URL.Query().Get("region")
|
||||
window := ParseTimeWindow(r)
|
||||
if s.store != nil {
|
||||
data := s.store.GetAnalyticsTopology(region)
|
||||
data := s.store.GetAnalyticsTopologyWithWindow(region, window)
|
||||
if s.cfg != nil && len(s.cfg.NodeBlacklist) > 0 {
|
||||
data = s.filterBlacklistedFromTopology(data)
|
||||
}
|
||||
@@ -1561,7 +1635,8 @@ func (s *Server) handleAnalyticsTopology(w http.ResponseWriter, r *http.Request)
|
||||
func (s *Server) handleAnalyticsChannels(w http.ResponseWriter, r *http.Request) {
|
||||
if s.store != nil {
|
||||
region := r.URL.Query().Get("region")
|
||||
writeJSON(w, s.store.GetAnalyticsChannels(region))
|
||||
window := ParseTimeWindow(r)
|
||||
writeJSON(w, s.store.GetAnalyticsChannelsWithWindow(region, window))
|
||||
return
|
||||
}
|
||||
channels, _ := s.db.GetChannels()
|
||||
@@ -1978,6 +2053,7 @@ func (s *Server) handleObservers(w http.ResponseWriter, r *http.Request) {
|
||||
ClientVersion: o.ClientVersion, Radio: o.Radio,
|
||||
BatteryMv: o.BatteryMv, UptimeSecs: o.UptimeSecs,
|
||||
NoiseFloor: o.NoiseFloor,
|
||||
LastPacketAt: o.LastPacketAt,
|
||||
PacketsLastHour: plh,
|
||||
Lat: lat, Lon: lon, NodeRole: nodeRole,
|
||||
})
|
||||
@@ -2019,6 +2095,7 @@ func (s *Server) handleObserverDetail(w http.ResponseWriter, r *http.Request) {
|
||||
ClientVersion: obs.ClientVersion, Radio: obs.Radio,
|
||||
BatteryMv: obs.BatteryMv, UptimeSecs: obs.UptimeSecs,
|
||||
NoiseFloor: obs.NoiseFloor,
|
||||
LastPacketAt: obs.LastPacketAt,
|
||||
PacketsLastHour: plh,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -0,0 +1,59 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// rwCache holds a process-wide cached RW connection per database path.
|
||||
// Instead of opening and closing a new RW connection on every call to openRW,
|
||||
// we cache a single *sql.DB (which internally manages one connection due to
|
||||
// SetMaxOpenConns(1)). This eliminates repeated open/close overhead for
|
||||
// vacuum, prune, persist operations that run frequently (#921).
|
||||
var rwCache = struct {
|
||||
mu sync.Mutex
|
||||
conns map[string]*sql.DB
|
||||
}{conns: make(map[string]*sql.DB)}
|
||||
|
||||
// cachedRW returns a cached read-write connection for the given dbPath.
|
||||
// The connection is created on first call and reused thereafter.
|
||||
// Callers MUST NOT call Close() on the returned *sql.DB.
|
||||
func cachedRW(dbPath string) (*sql.DB, error) {
|
||||
rwCache.mu.Lock()
|
||||
defer rwCache.mu.Unlock()
|
||||
|
||||
if db, ok := rwCache.conns[dbPath]; ok {
|
||||
return db, nil
|
||||
}
|
||||
|
||||
dsn := fmt.Sprintf("file:%s?_journal_mode=WAL", dbPath)
|
||||
db, err := sql.Open("sqlite", dsn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
db.SetMaxOpenConns(1)
|
||||
if _, err := db.Exec("PRAGMA busy_timeout = 5000"); err != nil {
|
||||
db.Close()
|
||||
return nil, fmt.Errorf("set busy_timeout: %w", err)
|
||||
}
|
||||
rwCache.conns[dbPath] = db
|
||||
return db, nil
|
||||
}
|
||||
|
||||
// closeRWCache closes all cached RW connections (for tests/shutdown).
|
||||
func closeRWCache() {
|
||||
rwCache.mu.Lock()
|
||||
defer rwCache.mu.Unlock()
|
||||
for k, db := range rwCache.conns {
|
||||
db.Close()
|
||||
delete(rwCache.conns, k)
|
||||
}
|
||||
}
|
||||
|
||||
// rwCacheLen returns the number of cached connections (for testing).
|
||||
func rwCacheLen() int {
|
||||
rwCache.mu.Lock()
|
||||
defer rwCache.mu.Unlock()
|
||||
return len(rwCache.conns)
|
||||
}
|
||||
@@ -0,0 +1,55 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestCachedRW_ReturnsSameHandle(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
dbPath := filepath.Join(dir, "test.db")
|
||||
|
||||
// Create the DB file
|
||||
f, _ := os.Create(dbPath)
|
||||
f.Close()
|
||||
|
||||
defer closeRWCache()
|
||||
|
||||
db1, err := cachedRW(dbPath)
|
||||
if err != nil {
|
||||
t.Fatalf("first cachedRW: %v", err)
|
||||
}
|
||||
db2, err := cachedRW(dbPath)
|
||||
if err != nil {
|
||||
t.Fatalf("second cachedRW: %v", err)
|
||||
}
|
||||
if db1 != db2 {
|
||||
t.Fatalf("cachedRW returned different handles: %p vs %p", db1, db2)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCachedRW_100Calls_SingleConnection(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
dbPath := filepath.Join(dir, "test.db")
|
||||
f, _ := os.Create(dbPath)
|
||||
f.Close()
|
||||
|
||||
defer closeRWCache()
|
||||
|
||||
var first interface{}
|
||||
for i := 0; i < 100; i++ {
|
||||
db, err := cachedRW(dbPath)
|
||||
if err != nil {
|
||||
t.Fatalf("call %d: %v", i, err)
|
||||
}
|
||||
if i == 0 {
|
||||
first = db
|
||||
} else if db != first {
|
||||
t.Fatalf("call %d returned different handle", i)
|
||||
}
|
||||
}
|
||||
if rwCacheLen() != 1 {
|
||||
t.Fatalf("expected 1 cached connection, got %d", rwCacheLen())
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,109 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// Issue #772 — shortened URL for easier sending over the mesh.
|
||||
//
|
||||
// Public keys are 64 hex chars. Operators want to share node URLs over a
|
||||
// mesh radio link where every byte counts. We allow truncating the pubkey
|
||||
// in the URL down to a minimum 8-hex-char prefix; the server resolves the
|
||||
// prefix back to the full pubkey when (and only when) it is unambiguous.
|
||||
|
||||
func TestResolveNodePrefix_Unique(t *testing.T) {
|
||||
db := setupTestDB(t)
|
||||
defer db.Close()
|
||||
seedTestData(t, db)
|
||||
|
||||
// "aabbccdd" uniquely identifies the seeded TestRepeater (pubkey aabbccdd11223344).
|
||||
node, ambiguous, err := db.GetNodeByPrefix("aabbccdd")
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected err: %v", err)
|
||||
}
|
||||
if ambiguous {
|
||||
t.Fatalf("expected unambiguous match, got ambiguous=true")
|
||||
}
|
||||
if node == nil {
|
||||
t.Fatalf("expected node, got nil")
|
||||
}
|
||||
if got, _ := node["public_key"].(string); got != "aabbccdd11223344" {
|
||||
t.Errorf("expected public_key aabbccdd11223344, got %q", got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveNodePrefix_Ambiguous(t *testing.T) {
|
||||
db := setupTestDB(t)
|
||||
defer db.Close()
|
||||
seedTestData(t, db)
|
||||
|
||||
// Insert a second node sharing the 8-char prefix "aabbccdd".
|
||||
if _, err := db.conn.Exec(`INSERT INTO nodes (public_key, name, role, advert_count)
|
||||
VALUES ('aabbccdd99887766', 'OtherNode', 'companion', 1)`); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
node, ambiguous, err := db.GetNodeByPrefix("aabbccdd")
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected err: %v", err)
|
||||
}
|
||||
if !ambiguous {
|
||||
t.Fatalf("expected ambiguous=true for shared prefix, got false (node=%v)", node)
|
||||
}
|
||||
if node != nil {
|
||||
t.Errorf("expected nil node when ambiguous, got %v", node["public_key"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveNodePrefix_TooShort(t *testing.T) {
|
||||
db := setupTestDB(t)
|
||||
defer db.Close()
|
||||
seedTestData(t, db)
|
||||
|
||||
// <8 hex chars must NOT resolve, even if it would be unique.
|
||||
node, _, err := db.GetNodeByPrefix("aabbccd")
|
||||
if err == nil && node != nil {
|
||||
t.Errorf("expected nil/error for 7-char prefix, got node %v", node["public_key"])
|
||||
}
|
||||
}
|
||||
|
||||
// Route-level: GET /api/nodes/<8-char-prefix> resolves to the full node.
|
||||
func TestNodeDetailRoute_PrefixResolves(t *testing.T) {
|
||||
_, router := setupTestServer(t)
|
||||
|
||||
req := httptest.NewRequest("GET", "/api/nodes/aabbccdd", nil)
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("expected 200 for unique 8-char prefix, got %d body=%s", w.Code, w.Body.String())
|
||||
}
|
||||
var body NodeDetailResponse
|
||||
if err := json.Unmarshal(w.Body.Bytes(), &body); err != nil {
|
||||
t.Fatalf("unmarshal: %v", err)
|
||||
}
|
||||
pk, _ := body.Node["public_key"].(string)
|
||||
if pk != "aabbccdd11223344" {
|
||||
t.Errorf("expected resolved pubkey aabbccdd11223344, got %q", pk)
|
||||
}
|
||||
}
|
||||
|
||||
// Route-level: GET /api/nodes/<ambiguous-prefix> returns 409 with a hint.
|
||||
func TestNodeDetailRoute_PrefixAmbiguous(t *testing.T) {
|
||||
srv, router := setupTestServer(t)
|
||||
if _, err := srv.db.conn.Exec(`INSERT INTO nodes (public_key, name, role, advert_count)
|
||||
VALUES ('aabbccdd99887766', 'OtherNode', 'companion', 1)`); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
req := httptest.NewRequest("GET", "/api/nodes/aabbccdd", nil)
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusConflict {
|
||||
t.Fatalf("expected 409 for ambiguous prefix, got %d body=%s", w.Code, w.Body.String())
|
||||
}
|
||||
}
|
||||
+555
-72
@@ -1,6 +1,7 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
@@ -188,6 +189,10 @@ type PacketStore struct {
|
||||
hashSizeInfoCache map[string]*hashSizeNodeInfo
|
||||
hashSizeInfoAt time.Time
|
||||
|
||||
// Cached multi-byte capability map (pubkey → entry), recomputed every 15s.
|
||||
multiByteCapCache map[string]*MultiByteCapEntry
|
||||
multiByteCapAt time.Time
|
||||
|
||||
// Precomputed distinct advert pubkey count (refcounted for eviction correctness).
|
||||
// Updated incrementally during Load/Ingest/Evict — avoids JSON parsing in GetPerfStoreStats.
|
||||
advertPubkeys map[string]int // pubkey → number of advert packets referencing it
|
||||
@@ -2271,6 +2276,10 @@ func (s *PacketStore) filterPackets(q PacketQuery) []*StoreTx {
|
||||
}
|
||||
// Single-pass filter: apply all predicates in one scan.
|
||||
results := filterTxSlice(source, func(tx *StoreTx) bool {
|
||||
// Data integrity: exclude legacy rows missing hash or timestamp (#871)
|
||||
if tx.Hash == "" || tx.FirstSeen == "" {
|
||||
return false
|
||||
}
|
||||
if hasType && (tx.PayloadType == nil || *tx.PayloadType != filterType) {
|
||||
return false
|
||||
}
|
||||
@@ -2432,6 +2441,145 @@ func (s *PacketStore) fetchAndCacheRegionObs(region string) map[string]bool {
|
||||
return m
|
||||
}
|
||||
|
||||
// iataMatchesRegion returns true if iata matches any of the comma-separated
|
||||
// region codes in regionParam. Comparison is case-insensitive and trim-tolerant.
|
||||
// Empty iata never matches; empty regionParam never matches.
|
||||
//
|
||||
// #804: shared helper used by analytics to attribute transmissions to a node's
|
||||
// HOME region (derived from observers that hear its zero-hop direct adverts)
|
||||
// rather than to the observer that happened to relay a packet.
|
||||
func iataMatchesRegion(iata, regionParam string) bool {
|
||||
if iata == "" || regionParam == "" {
|
||||
return false
|
||||
}
|
||||
codes := normalizeRegionCodes(regionParam)
|
||||
if len(codes) == 0 {
|
||||
return false
|
||||
}
|
||||
got := strings.TrimSpace(strings.ToUpper(iata))
|
||||
if got == "" {
|
||||
return false
|
||||
}
|
||||
for _, c := range codes {
|
||||
if c == got {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// computeNodeHomeRegions returns a pubkey → IATA map deriving each node's
|
||||
// HOME region from zero-hop DIRECT adverts. A zero-hop direct advert is the
|
||||
// most authoritative location signal because the path byte is set locally on
|
||||
// the originating radio and the packet has not been relayed: the observer
|
||||
// that hears it is necessarily within direct RF range of the originator.
|
||||
//
|
||||
// When a node has zero-hop direct adverts heard by observers from multiple
|
||||
// regions, the most-frequently-observed region wins (geographic plurality).
|
||||
//
|
||||
// Caller must hold s.mu (read or write). Returns empty map (not nil) if no
|
||||
// observers are loaded or no zero-hop direct adverts have been seen.
|
||||
//
|
||||
// #804: feeds analytics region-attribution so a multi-byte repeater whose
|
||||
// flood adverts get relayed across regions is still attributed to its home.
|
||||
func (s *PacketStore) computeNodeHomeRegions() map[string]string {
|
||||
// Build observer → IATA map. observers table is small (≪ packets), so a
|
||||
// single DB read here is acceptable; resolveRegionObservers does similar.
|
||||
obsIATA := make(map[string]string, 64)
|
||||
if s.db != nil {
|
||||
if observers, err := s.db.GetObservers(); err == nil {
|
||||
for _, o := range observers {
|
||||
if o.IATA != nil && *o.IATA != "" {
|
||||
obsIATA[o.ID] = strings.TrimSpace(strings.ToUpper(*o.IATA))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(obsIATA) == 0 {
|
||||
return map[string]string{}
|
||||
}
|
||||
|
||||
// Tally zero-hop direct ADVERT region observations per pubkey.
|
||||
type tally struct {
|
||||
counts map[string]int
|
||||
}
|
||||
per := make(map[string]*tally, 256)
|
||||
|
||||
for _, tx := range s.packets {
|
||||
if tx.RawHex == "" || len(tx.RawHex) < 4 {
|
||||
continue
|
||||
}
|
||||
if tx.PayloadType == nil || *tx.PayloadType != PayloadADVERT {
|
||||
continue
|
||||
}
|
||||
if tx.DecodedJSON == "" {
|
||||
continue
|
||||
}
|
||||
header, err := strconv.ParseUint(tx.RawHex[:2], 16, 8)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
routeType := header & 0x03
|
||||
if routeType != uint64(RouteDirect) && routeType != uint64(RouteTransportDirect) {
|
||||
continue
|
||||
}
|
||||
// Path byte index — for direct/transport-direct it's at offset 1
|
||||
// (matches the analytics decoder's pathByteIdx logic).
|
||||
if len(tx.RawHex) < 4 {
|
||||
continue
|
||||
}
|
||||
pathByte, err := strconv.ParseUint(tx.RawHex[2:4], 16, 8)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
hopCount := pathByte & 0x3F
|
||||
if hopCount != 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
var d map[string]interface{}
|
||||
if json.Unmarshal([]byte(tx.DecodedJSON), &d) != nil {
|
||||
continue
|
||||
}
|
||||
pk, _ := d["pubKey"].(string)
|
||||
if pk == "" {
|
||||
pk, _ = d["public_key"].(string)
|
||||
}
|
||||
if pk == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, obs := range tx.Observations {
|
||||
iata := obsIATA[obs.ObserverID]
|
||||
if iata == "" {
|
||||
continue
|
||||
}
|
||||
t := per[pk]
|
||||
if t == nil {
|
||||
t = &tally{counts: map[string]int{}}
|
||||
per[pk] = t
|
||||
}
|
||||
t.counts[iata]++
|
||||
}
|
||||
}
|
||||
|
||||
out := make(map[string]string, len(per))
|
||||
for pk, t := range per {
|
||||
var bestIATA string
|
||||
bestCount := 0
|
||||
for iata, n := range t.counts {
|
||||
if n > bestCount || (n == bestCount && iata < bestIATA) {
|
||||
bestCount = n
|
||||
bestIATA = iata
|
||||
}
|
||||
}
|
||||
if bestIATA != "" {
|
||||
out[pk] = bestIATA
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// enrichObs returns a map with observation fields + transmission fields.
|
||||
func (s *PacketStore) enrichObs(obs *StoreObs) map[string]interface{} {
|
||||
tx := s.byTxID[obs.TransmissionID]
|
||||
@@ -3524,6 +3672,51 @@ func (s *PacketStore) GetChannels(region string) []map[string]interface{} {
|
||||
})
|
||||
}
|
||||
|
||||
// #688: scan decoded message text for #hashtag mentions and surface any
|
||||
// previously-unseen channel names as discovered channels. We dedup against
|
||||
// channelMap (matched by name) so a channel that already has traffic does
|
||||
// NOT also appear as discovered.
|
||||
discovered := map[string]string{} // name -> lastActivity
|
||||
for _, snap := range snapshots {
|
||||
if !snap.hasRegion {
|
||||
continue
|
||||
}
|
||||
var decoded decodedGrp
|
||||
if json.Unmarshal([]byte(snap.decodedJSON), &decoded) != nil {
|
||||
continue
|
||||
}
|
||||
if decoded.Type != "CHAN" || decoded.Text == "" {
|
||||
continue
|
||||
}
|
||||
if hasGarbageChars(decoded.Text) {
|
||||
continue
|
||||
}
|
||||
for _, tag := range extractHashtagsFromText(decoded.Text) {
|
||||
// Skip if already a known/decoded channel (by name with or without '#').
|
||||
bare := tag[1:]
|
||||
if _, ok := channelMap[tag]; ok {
|
||||
continue
|
||||
}
|
||||
if _, ok := channelMap[bare]; ok {
|
||||
continue
|
||||
}
|
||||
if existing, ok := discovered[tag]; !ok || snap.firstSeen > existing {
|
||||
discovered[tag] = snap.firstSeen
|
||||
}
|
||||
}
|
||||
}
|
||||
for name, lastActivity := range discovered {
|
||||
channels = append(channels, map[string]interface{}{
|
||||
"hash": name,
|
||||
"name": name,
|
||||
"lastMessage": nil,
|
||||
"lastSender": nil,
|
||||
"messageCount": 0,
|
||||
"lastActivity": lastActivity,
|
||||
"discovered": true,
|
||||
})
|
||||
}
|
||||
|
||||
s.channelsCacheMu.Lock()
|
||||
s.channelsCacheRes = channels
|
||||
s.channelsCacheKey = cacheKey
|
||||
@@ -3782,8 +3975,18 @@ func (s *PacketStore) GetChannelMessages(channelHash string, limit, offset int,
|
||||
|
||||
// GetAnalyticsChannels returns full channel analytics computed from in-memory packets.
|
||||
func (s *PacketStore) GetAnalyticsChannels(region string) map[string]interface{} {
|
||||
return s.GetAnalyticsChannelsWithWindow(region, TimeWindow{})
|
||||
}
|
||||
|
||||
// GetAnalyticsChannelsWithWindow returns channel analytics for the given region,
|
||||
// optionally bounded to a time window (issue #842). Zero TimeWindow = all data.
|
||||
func (s *PacketStore) GetAnalyticsChannelsWithWindow(region string, window TimeWindow) map[string]interface{} {
|
||||
cacheKey := region
|
||||
if !window.IsZero() {
|
||||
cacheKey = region + "|" + window.CacheKey()
|
||||
}
|
||||
s.cacheMu.Lock()
|
||||
if cached, ok := s.chanCache[region]; ok && time.Now().Before(cached.expiresAt) {
|
||||
if cached, ok := s.chanCache[cacheKey]; ok && time.Now().Before(cached.expiresAt) {
|
||||
s.cacheHits++
|
||||
s.cacheMu.Unlock()
|
||||
return cached.data
|
||||
@@ -3791,16 +3994,43 @@ func (s *PacketStore) GetAnalyticsChannels(region string) map[string]interface{}
|
||||
s.cacheMisses++
|
||||
s.cacheMu.Unlock()
|
||||
|
||||
result := s.computeAnalyticsChannels(region)
|
||||
result := s.computeAnalyticsChannels(region, window)
|
||||
|
||||
s.cacheMu.Lock()
|
||||
s.chanCache[region] = &cachedResult{data: result, expiresAt: time.Now().Add(s.rfCacheTTL)}
|
||||
s.chanCache[cacheKey] = &cachedResult{data: result, expiresAt: time.Now().Add(s.rfCacheTTL)}
|
||||
s.cacheMu.Unlock()
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func (s *PacketStore) computeAnalyticsChannels(region string) map[string]interface{} {
|
||||
// channelNameMatchesHash validates that a decrypted channel name hashes to the
|
||||
// observed single-byte channel hash. This rejects rainbow-table mismatches where
|
||||
// an observer's lookup table incorrectly maps a hash byte to the wrong name.
|
||||
// Firmware invariant: channelHash = SHA256(SHA256("#name")[:16])[0]
|
||||
func channelNameMatchesHash(name string, hashStr string) bool {
|
||||
expected, err := strconv.Atoi(hashStr)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
chanName := name
|
||||
if !strings.HasPrefix(chanName, "#") {
|
||||
chanName = "#" + chanName
|
||||
}
|
||||
h1 := sha256.Sum256([]byte(chanName))
|
||||
h2 := sha256.Sum256(h1[:16])
|
||||
return int(h2[0]) == expected
|
||||
}
|
||||
|
||||
// isPlaceholderName returns true if the name is a "chN" placeholder (not a real decrypted name).
|
||||
func isPlaceholderName(name string) bool {
|
||||
if !strings.HasPrefix(name, "ch") {
|
||||
return false
|
||||
}
|
||||
_, err := strconv.Atoi(name[2:])
|
||||
return err == nil
|
||||
}
|
||||
|
||||
func (s *PacketStore) computeAnalyticsChannels(region string, window TimeWindow) map[string]interface{} {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
|
||||
@@ -3849,6 +4079,9 @@ func (s *PacketStore) computeAnalyticsChannels(region string) map[string]interfa
|
||||
|
||||
grpTxts := s.byPayloadType[5]
|
||||
for _, tx := range grpTxts {
|
||||
if !window.Includes(tx.FirstSeen) {
|
||||
continue
|
||||
}
|
||||
if regionObs != nil {
|
||||
match := false
|
||||
for _, obs := range tx.Observations {
|
||||
@@ -3879,16 +4112,27 @@ func (s *PacketStore) computeAnalyticsChannels(region string) map[string]interfa
|
||||
name = "ch" + hash
|
||||
}
|
||||
encrypted := decoded.Text == "" && decoded.Sender == ""
|
||||
// Use hash as key for grouping (matches Node.js String(hash))
|
||||
chKey := hash
|
||||
if decoded.Type == "CHAN" && decoded.Channel != "" {
|
||||
chKey = hash + "_" + decoded.Channel
|
||||
|
||||
// Bug #978 fix: validate channel name against hash to reject rainbow-table mismatches.
|
||||
// If the claimed channel name doesn't hash to the observed channelHash byte, discard it.
|
||||
if name != "" && name != "ch"+hash && !channelNameMatchesHash(name, hash) {
|
||||
name = "ch" + hash
|
||||
encrypted = true
|
||||
}
|
||||
|
||||
// Bug #978 fix: always group by hash byte alone — same physical channel,
|
||||
// regardless of which observer decrypted it.
|
||||
chKey := hash
|
||||
|
||||
ch := channelMap[chKey]
|
||||
if ch == nil {
|
||||
ch = &chanInfo{Hash: hash, Name: name, Senders: map[string]bool{}, LastActivity: tx.FirstSeen, Encrypted: encrypted}
|
||||
channelMap[chKey] = ch
|
||||
} else {
|
||||
// Upgrade bucket name: if current is placeholder and we have a validated decrypted name
|
||||
if isPlaceholderName(ch.Name) && !isPlaceholderName(name) {
|
||||
ch.Name = name
|
||||
}
|
||||
}
|
||||
ch.Messages++
|
||||
ch.LastActivity = tx.FirstSeen
|
||||
@@ -3978,8 +4222,18 @@ func (s *PacketStore) computeAnalyticsChannels(region string) map[string]interfa
|
||||
|
||||
// GetAnalyticsRF returns full RF analytics computed from in-memory observations.
|
||||
func (s *PacketStore) GetAnalyticsRF(region string) map[string]interface{} {
|
||||
return s.GetAnalyticsRFWithWindow(region, TimeWindow{})
|
||||
}
|
||||
|
||||
// GetAnalyticsRFWithWindow returns RF analytics bounded by an optional
|
||||
// time window (issue #842). Zero TimeWindow = all data (backwards compatible).
|
||||
func (s *PacketStore) GetAnalyticsRFWithWindow(region string, window TimeWindow) map[string]interface{} {
|
||||
cacheKey := region
|
||||
if !window.IsZero() {
|
||||
cacheKey = region + "|" + window.CacheKey()
|
||||
}
|
||||
s.cacheMu.Lock()
|
||||
if cached, ok := s.rfCache[region]; ok && time.Now().Before(cached.expiresAt) {
|
||||
if cached, ok := s.rfCache[cacheKey]; ok && time.Now().Before(cached.expiresAt) {
|
||||
s.cacheHits++
|
||||
s.cacheMu.Unlock()
|
||||
return cached.data
|
||||
@@ -3987,16 +4241,16 @@ func (s *PacketStore) GetAnalyticsRF(region string) map[string]interface{} {
|
||||
s.cacheMisses++
|
||||
s.cacheMu.Unlock()
|
||||
|
||||
result := s.computeAnalyticsRF(region)
|
||||
result := s.computeAnalyticsRF(region, window)
|
||||
|
||||
s.cacheMu.Lock()
|
||||
s.rfCache[region] = &cachedResult{data: result, expiresAt: time.Now().Add(s.rfCacheTTL)}
|
||||
s.rfCache[cacheKey] = &cachedResult{data: result, expiresAt: time.Now().Add(s.rfCacheTTL)}
|
||||
s.cacheMu.Unlock()
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func (s *PacketStore) computeAnalyticsRF(region string) map[string]interface{} {
|
||||
func (s *PacketStore) computeAnalyticsRF(region string, window TimeWindow) map[string]interface{} {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
|
||||
@@ -4035,6 +4289,9 @@ func (s *PacketStore) computeAnalyticsRF(region string) map[string]interface{} {
|
||||
for obsID := range regionObs {
|
||||
obsList := s.byObserver[obsID]
|
||||
for _, obs := range obsList {
|
||||
if !window.Includes(obs.Timestamp) {
|
||||
continue
|
||||
}
|
||||
totalObs++
|
||||
tx := s.byTxID[obs.TransmissionID]
|
||||
hash := ""
|
||||
@@ -4120,6 +4377,12 @@ func (s *PacketStore) computeAnalyticsRF(region string) map[string]interface{} {
|
||||
} else {
|
||||
// No region: iterate all transmissions and their observations
|
||||
for _, tx := range s.packets {
|
||||
// Window filter: skip transmissions outside the requested window.
|
||||
// We use tx.FirstSeen as the bounding timestamp; per-obs window
|
||||
// filter below handles cases where individual obs timestamps differ.
|
||||
if !window.Includes(tx.FirstSeen) {
|
||||
continue
|
||||
}
|
||||
hash := tx.Hash
|
||||
if hash != "" {
|
||||
regionalHashes[hash] = true
|
||||
@@ -4814,8 +5077,17 @@ func parsePathJSON(pathJSON string) []string {
|
||||
}
|
||||
|
||||
func (s *PacketStore) GetAnalyticsTopology(region string) map[string]interface{} {
|
||||
return s.GetAnalyticsTopologyWithWindow(region, TimeWindow{})
|
||||
}
|
||||
|
||||
// GetAnalyticsTopologyWithWindow — see issue #842.
|
||||
func (s *PacketStore) GetAnalyticsTopologyWithWindow(region string, window TimeWindow) map[string]interface{} {
|
||||
cacheKey := region
|
||||
if !window.IsZero() {
|
||||
cacheKey = region + "|" + window.CacheKey()
|
||||
}
|
||||
s.cacheMu.Lock()
|
||||
if cached, ok := s.topoCache[region]; ok && time.Now().Before(cached.expiresAt) {
|
||||
if cached, ok := s.topoCache[cacheKey]; ok && time.Now().Before(cached.expiresAt) {
|
||||
s.cacheHits++
|
||||
s.cacheMu.Unlock()
|
||||
return cached.data
|
||||
@@ -4823,16 +5095,16 @@ func (s *PacketStore) GetAnalyticsTopology(region string) map[string]interface{}
|
||||
s.cacheMisses++
|
||||
s.cacheMu.Unlock()
|
||||
|
||||
result := s.computeAnalyticsTopology(region)
|
||||
result := s.computeAnalyticsTopology(region, window)
|
||||
|
||||
s.cacheMu.Lock()
|
||||
s.topoCache[region] = &cachedResult{data: result, expiresAt: time.Now().Add(s.rfCacheTTL)}
|
||||
s.topoCache[cacheKey] = &cachedResult{data: result, expiresAt: time.Now().Add(s.rfCacheTTL)}
|
||||
s.cacheMu.Unlock()
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func (s *PacketStore) computeAnalyticsTopology(region string) map[string]interface{} {
|
||||
func (s *PacketStore) computeAnalyticsTopology(region string, window TimeWindow) map[string]interface{} {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
|
||||
@@ -4863,6 +5135,9 @@ func (s *PacketStore) computeAnalyticsTopology(region string) map[string]interfa
|
||||
perObserver := map[string]map[string]*struct{ minDist, maxDist, count int }{}
|
||||
|
||||
for _, tx := range s.packets {
|
||||
if !window.Includes(tx.FirstSeen) {
|
||||
continue
|
||||
}
|
||||
hops := txGetParsedPath(tx)
|
||||
if len(hops) == 0 {
|
||||
continue
|
||||
@@ -4954,6 +5229,103 @@ func (s *PacketStore) computeAnalyticsTopology(region string) map[string]interfa
|
||||
}
|
||||
}
|
||||
|
||||
// pmLookup resolves a hop hex string to its prefix-map candidates,
|
||||
// applying the same truncation used during map construction.
|
||||
pmLookup := func(hop string) []nodeInfo {
|
||||
key := strings.ToLower(hop)
|
||||
if len(key) > maxPrefixLen {
|
||||
key = key[:maxPrefixLen]
|
||||
}
|
||||
return pm.m[key]
|
||||
}
|
||||
|
||||
// --- Dedup pass: merge hop prefixes that resolve unambiguously to the same node ---
|
||||
// Only merge when pm.m[hop] has exactly 1 candidate (unique_prefix).
|
||||
// Ambiguous short prefixes (efiten's concern: 1-byte collisions) stay separate.
|
||||
{
|
||||
type dedupInfo struct {
|
||||
totalCount int
|
||||
longestHop string
|
||||
}
|
||||
byPubkey := map[string]*dedupInfo{} // pubkey → merged info
|
||||
ambiguous := map[string]int{} // hop → count (kept as-is)
|
||||
for h, c := range hopFreq {
|
||||
candidates := pmLookup(h)
|
||||
if len(candidates) == 1 {
|
||||
pk := strings.ToLower(candidates[0].PublicKey)
|
||||
if info, ok := byPubkey[pk]; ok {
|
||||
info.totalCount += c
|
||||
if len(h) > len(info.longestHop) {
|
||||
info.longestHop = h
|
||||
}
|
||||
} else {
|
||||
byPubkey[pk] = &dedupInfo{totalCount: c, longestHop: h}
|
||||
}
|
||||
} else {
|
||||
ambiguous[h] = c
|
||||
}
|
||||
}
|
||||
// Rebuild hopFreq
|
||||
hopFreq = make(map[string]int, len(byPubkey)+len(ambiguous))
|
||||
for _, info := range byPubkey {
|
||||
hopFreq[info.longestHop] = info.totalCount
|
||||
}
|
||||
for h, c := range ambiguous {
|
||||
hopFreq[h] = c
|
||||
}
|
||||
}
|
||||
|
||||
// --- Dedup pass for pairs: merge by resolved pubkey pair ---
|
||||
{
|
||||
type pairDedupInfo struct {
|
||||
totalCount int
|
||||
longestA string
|
||||
longestB string
|
||||
}
|
||||
byPubkeyPair := map[string]*pairDedupInfo{} // "pkA|pkB" (sorted) → merged info
|
||||
ambiguousPairs := map[string]int{}
|
||||
for p, c := range pairFreq {
|
||||
parts := strings.SplitN(p, "|", 2)
|
||||
candA := pmLookup(parts[0])
|
||||
candB := pmLookup(parts[1])
|
||||
if len(candA) == 1 && len(candB) == 1 {
|
||||
pkA := strings.ToLower(candA[0].PublicKey)
|
||||
pkB := strings.ToLower(candB[0].PublicKey)
|
||||
// Canonicalize by sorted pubkey
|
||||
if pkA > pkB {
|
||||
pkA, pkB = pkB, pkA
|
||||
parts[0], parts[1] = parts[1], parts[0]
|
||||
}
|
||||
key := pkA + "|" + pkB
|
||||
if info, ok := byPubkeyPair[key]; ok {
|
||||
info.totalCount += c
|
||||
if len(parts[0]) > len(info.longestA) {
|
||||
info.longestA = parts[0]
|
||||
}
|
||||
if len(parts[1]) > len(info.longestB) {
|
||||
info.longestB = parts[1]
|
||||
}
|
||||
} else {
|
||||
byPubkeyPair[key] = &pairDedupInfo{totalCount: c, longestA: parts[0], longestB: parts[1]}
|
||||
}
|
||||
} else {
|
||||
ambiguousPairs[p] = c
|
||||
}
|
||||
}
|
||||
// Rebuild pairFreq
|
||||
pairFreq = make(map[string]int, len(byPubkeyPair)+len(ambiguousPairs))
|
||||
for _, info := range byPubkeyPair {
|
||||
a, b := info.longestA, info.longestB
|
||||
if a > b {
|
||||
a, b = b, a
|
||||
}
|
||||
pairFreq[a+"|"+b] = info.totalCount
|
||||
}
|
||||
for p, c := range ambiguousPairs {
|
||||
pairFreq[p] = c
|
||||
}
|
||||
}
|
||||
|
||||
// Top repeaters
|
||||
type freqEntry struct {
|
||||
hop string
|
||||
@@ -5446,21 +5818,41 @@ func (s *PacketStore) GetAnalyticsHashSizes(region string) map[string]interface{
|
||||
|
||||
result := s.computeAnalyticsHashSizes(region)
|
||||
|
||||
// Add multi-byte capability data (only for unfiltered/global view)
|
||||
// Multi-byte capability is a NODE property (derived from each node's own
|
||||
// adverts), not a function of the observing region. The region filter
|
||||
// should only control which nodes appear in the analytics list, not the
|
||||
// evidence used to classify their capability. Always compute capability
|
||||
// against the GLOBAL advert dataset so a region-filtered view doesn't
|
||||
// downgrade every adopter to "unknown" just because the confirming
|
||||
// advert was heard by an out-of-region observer (#bug: meshat.se/JKG
|
||||
// showed 14 unknown vs 0 unknown unfiltered).
|
||||
globalAdopterHS := make(map[string]int)
|
||||
if region == "" {
|
||||
// Pass adopter hash sizes so capability can cross-reference
|
||||
adopterHS := make(map[string]int)
|
||||
if mbNodes, ok := result["multiByteNodes"].([]map[string]interface{}); ok {
|
||||
for _, n := range mbNodes {
|
||||
pk, _ := n["pubkey"].(string)
|
||||
hs, _ := n["hashSize"].(int)
|
||||
if pk != "" && hs >= 2 {
|
||||
adopterHS[pk] = hs
|
||||
globalAdopterHS[pk] = hs
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Pull the global multiByteNodes set without the region filter.
|
||||
// Use a separate compute call (not the cached path) to avoid
|
||||
// recursive locking on hashCache and to keep this side-effect free.
|
||||
globalRes := s.computeAnalyticsHashSizes("")
|
||||
if mbNodes, ok := globalRes["multiByteNodes"].([]map[string]interface{}); ok {
|
||||
for _, n := range mbNodes {
|
||||
pk, _ := n["pubkey"].(string)
|
||||
hs, _ := n["hashSize"].(int)
|
||||
if pk != "" && hs >= 2 {
|
||||
globalAdopterHS[pk] = hs
|
||||
}
|
||||
}
|
||||
}
|
||||
result["multiByteCapability"] = s.computeMultiByteCapability(adopterHS)
|
||||
}
|
||||
result["multiByteCapability"] = s.computeMultiByteCapability(globalAdopterHS)
|
||||
|
||||
s.cacheMu.Lock()
|
||||
s.hashCache[region] = &cachedResult{data: result, expiresAt: time.Now().Add(s.rfCacheTTL)}
|
||||
@@ -5478,6 +5870,16 @@ func (s *PacketStore) computeAnalyticsHashSizes(region string) map[string]interf
|
||||
regionObs = s.resolveRegionObservers(region)
|
||||
}
|
||||
|
||||
// #804: derive each node's HOME region from zero-hop direct adverts (the
|
||||
// most authoritative location signal — those packets cannot have been
|
||||
// relayed). When non-empty, multi-byte node attribution prefers this
|
||||
// over observer-region. Falls back to observer-region when unknown.
|
||||
nodeHomeRegion := s.computeNodeHomeRegions()
|
||||
attributionMethod := "observer"
|
||||
if region != "" && len(nodeHomeRegion) > 0 {
|
||||
attributionMethod = "repeater"
|
||||
}
|
||||
|
||||
allNodes, pm := s.getCachedNodesAndPM()
|
||||
|
||||
// Build pubkey→role map for filtering by node type.
|
||||
@@ -5496,18 +5898,6 @@ func (s *PacketStore) computeAnalyticsHashSizes(region string) map[string]interf
|
||||
if tx.RawHex == "" {
|
||||
continue
|
||||
}
|
||||
if regionObs != nil {
|
||||
match := false
|
||||
for _, obs := range tx.Observations {
|
||||
if regionObs[obs.ObserverID] {
|
||||
match = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !match {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Parse header and path byte
|
||||
if len(tx.RawHex) < 4 {
|
||||
@@ -5537,52 +5927,84 @@ func (s *PacketStore) computeAnalyticsHashSizes(region string) map[string]interf
|
||||
continue
|
||||
}
|
||||
|
||||
// Track originator from advert packets (including zero-hop adverts,
|
||||
// keyed by pubKey so same-name nodes don't merge).
|
||||
// #804: pre-extract originator pubkey for ADVERT packets so we can
|
||||
// (a) relax observer-region filter when the originator's HOME region
|
||||
// matches the requested region (a flood relay heard outside the
|
||||
// home region must still attribute to the home), and
|
||||
// (b) reuse the parsed values below without re-parsing.
|
||||
var advertPK, advertName string
|
||||
var advertParsed bool
|
||||
if tx.PayloadType != nil && *tx.PayloadType == PayloadADVERT && tx.DecodedJSON != "" {
|
||||
var d map[string]interface{}
|
||||
if json.Unmarshal([]byte(tx.DecodedJSON), &d) == nil {
|
||||
pk := ""
|
||||
if v, ok := d["pubKey"].(string); ok {
|
||||
pk = v
|
||||
advertPK = v
|
||||
} else if v, ok := d["public_key"].(string); ok {
|
||||
pk = v
|
||||
advertPK = v
|
||||
}
|
||||
if pk != "" {
|
||||
name := ""
|
||||
if n, ok := d["name"].(string); ok {
|
||||
name = n
|
||||
}
|
||||
if name == "" {
|
||||
if len(pk) >= 8 {
|
||||
name = pk[:8]
|
||||
} else {
|
||||
name = pk
|
||||
}
|
||||
}
|
||||
// Skip zero-hop direct adverts for hash_size — the
|
||||
// path byte is locally generated and unreliable.
|
||||
// Still count the packet and update lastSeen.
|
||||
isZeroHop := (routeType == uint64(RouteDirect) || routeType == uint64(RouteTransportDirect)) && (actualPathByte&0x3F) == 0
|
||||
if byNode[pk] == nil {
|
||||
role := nodeRoleByPK[pk] // empty if unknown
|
||||
initHS := hashSize
|
||||
if isZeroHop {
|
||||
initHS = 0
|
||||
}
|
||||
byNode[pk] = map[string]interface{}{
|
||||
"hashSize": initHS, "packets": 0,
|
||||
"lastSeen": tx.FirstSeen, "name": name,
|
||||
"role": role,
|
||||
}
|
||||
}
|
||||
byNode[pk]["packets"] = byNode[pk]["packets"].(int) + 1
|
||||
if !isZeroHop {
|
||||
byNode[pk]["hashSize"] = hashSize
|
||||
}
|
||||
byNode[pk]["lastSeen"] = tx.FirstSeen
|
||||
if n, ok := d["name"].(string); ok {
|
||||
advertName = n
|
||||
}
|
||||
advertParsed = advertPK != ""
|
||||
}
|
||||
}
|
||||
|
||||
if regionObs != nil {
|
||||
match := false
|
||||
for _, obs := range tx.Observations {
|
||||
if regionObs[obs.ObserverID] {
|
||||
match = true
|
||||
break
|
||||
}
|
||||
}
|
||||
// #804: allow ADVERTs from a node whose HOME region matches the
|
||||
// requested region even if no observer in that region heard this
|
||||
// particular packet (e.g. flood relay heard only by an out-of-
|
||||
// region observer). Conservative: only ADVERTs (the source is
|
||||
// known by pubkey) and only when home is established.
|
||||
if !match && advertParsed {
|
||||
if home, ok := nodeHomeRegion[advertPK]; ok && iataMatchesRegion(home, region) {
|
||||
match = true
|
||||
}
|
||||
}
|
||||
if !match {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Track originator from advert packets (including zero-hop adverts,
|
||||
// keyed by pubKey so same-name nodes don't merge).
|
||||
if advertParsed {
|
||||
pk := advertPK
|
||||
name := advertName
|
||||
if name == "" {
|
||||
if len(pk) >= 8 {
|
||||
name = pk[:8]
|
||||
} else {
|
||||
name = pk
|
||||
}
|
||||
}
|
||||
// Skip zero-hop direct adverts for hash_size — the
|
||||
// path byte is locally generated and unreliable.
|
||||
// Still count the packet and update lastSeen.
|
||||
isZeroHop := (routeType == uint64(RouteDirect) || routeType == uint64(RouteTransportDirect)) && (actualPathByte&0x3F) == 0
|
||||
if byNode[pk] == nil {
|
||||
role := nodeRoleByPK[pk] // empty if unknown
|
||||
initHS := hashSize
|
||||
if isZeroHop {
|
||||
initHS = 0
|
||||
}
|
||||
byNode[pk] = map[string]interface{}{
|
||||
"hashSize": initHS, "packets": 0,
|
||||
"lastSeen": tx.FirstSeen, "name": name,
|
||||
"role": role,
|
||||
}
|
||||
}
|
||||
byNode[pk]["packets"] = byNode[pk]["packets"].(int) + 1
|
||||
if !isZeroHop {
|
||||
byNode[pk]["hashSize"] = hashSize
|
||||
}
|
||||
byNode[pk]["lastSeen"] = tx.FirstSeen
|
||||
}
|
||||
|
||||
// Distribution/hourly/uniqueHops only for packets with relay hops
|
||||
@@ -5663,6 +6085,15 @@ func (s *PacketStore) computeAnalyticsHashSizes(region string) map[string]interf
|
||||
// Multi-byte nodes
|
||||
multiByteNodes := make([]map[string]interface{}, 0)
|
||||
for pk, data := range byNode {
|
||||
// #804: when a region filter is active, prefer the repeater's HOME
|
||||
// region over the observer that happened to relay it. Falls back to
|
||||
// the (already-applied) observer-region filter when the node's home
|
||||
// region is unknown.
|
||||
if region != "" {
|
||||
if home, ok := nodeHomeRegion[pk]; ok && !iataMatchesRegion(home, region) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if data["hashSize"].(int) > 1 {
|
||||
multiByteNodes = append(multiByteNodes, map[string]interface{}{
|
||||
"name": data["name"], "hashSize": data["hashSize"],
|
||||
@@ -5677,11 +6108,17 @@ func (s *PacketStore) computeAnalyticsHashSizes(region string) map[string]interf
|
||||
|
||||
// Distribution by repeaters: count unique REPEATER nodes per hash size
|
||||
distributionByRepeaters := map[string]int{"1": 0, "2": 0, "3": 0}
|
||||
for _, data := range byNode {
|
||||
for pk, data := range byNode {
|
||||
role, _ := data["role"].(string)
|
||||
if !strings.Contains(strings.ToLower(role), "repeater") {
|
||||
continue
|
||||
}
|
||||
// #804: same repeater-region preference as multiByteNodes.
|
||||
if region != "" {
|
||||
if home, ok := nodeHomeRegion[pk]; ok && !iataMatchesRegion(home, region) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
hs := data["hashSize"].(int)
|
||||
key := strconv.Itoa(hs)
|
||||
distributionByRepeaters[key]++
|
||||
@@ -5694,6 +6131,7 @@ func (s *PacketStore) computeAnalyticsHashSizes(region string) map[string]interf
|
||||
"hourly": hourly,
|
||||
"topHops": topHops,
|
||||
"multiByteNodes": multiByteNodes,
|
||||
"attributionMethod": attributionMethod,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6170,6 +6608,51 @@ func EnrichNodeWithHashSize(node map[string]interface{}, info *hashSizeNodeInfo)
|
||||
}
|
||||
}
|
||||
|
||||
// EnrichNodeWithMultiByte adds multi-byte capability fields to a node map.
|
||||
func EnrichNodeWithMultiByte(node map[string]interface{}, entry *MultiByteCapEntry) {
|
||||
if entry == nil {
|
||||
return
|
||||
}
|
||||
node["multi_byte_status"] = entry.Status
|
||||
node["multi_byte_evidence"] = entry.Evidence
|
||||
node["multi_byte_max_hash_size"] = entry.MaxHashSize
|
||||
}
|
||||
|
||||
// GetMultiByteCapMap returns a cached pubkey → MultiByteCapEntry map.
|
||||
// Reuses the same 15s TTL cache pattern as hash size info.
|
||||
func (s *PacketStore) GetMultiByteCapMap() map[string]*MultiByteCapEntry {
|
||||
s.hashSizeInfoMu.Lock()
|
||||
if s.multiByteCapCache != nil && time.Since(s.multiByteCapAt) < 15*time.Second {
|
||||
cached := s.multiByteCapCache
|
||||
s.hashSizeInfoMu.Unlock()
|
||||
return cached
|
||||
}
|
||||
s.hashSizeInfoMu.Unlock()
|
||||
|
||||
// Get adopter hash sizes from analytics for cross-referencing
|
||||
analyticsData := s.GetAnalyticsHashSizes("")
|
||||
adopterSizes := make(map[string]int)
|
||||
if nodes, ok := analyticsData["nodes"].(map[string]map[string]interface{}); ok {
|
||||
for pk, data := range nodes {
|
||||
if hs, ok := data["hashSize"].(int); ok {
|
||||
adopterSizes[pk] = hs
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
caps := s.computeMultiByteCapability(adopterSizes)
|
||||
result := make(map[string]*MultiByteCapEntry, len(caps))
|
||||
for i := range caps {
|
||||
result[caps[i].PublicKey] = &caps[i]
|
||||
}
|
||||
|
||||
s.hashSizeInfoMu.Lock()
|
||||
s.multiByteCapCache = result
|
||||
s.multiByteCapAt = time.Now()
|
||||
s.hashSizeInfoMu.Unlock()
|
||||
return result
|
||||
}
|
||||
|
||||
// --- Multi-Byte Capability Inference ---
|
||||
|
||||
// MultiByteCapEntry represents a node's inferred multi-byte capability.
|
||||
|
||||
@@ -0,0 +1,133 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
// TimeWindow is a half-open time range used to bound analytics queries.
|
||||
// Empty Since/Until means unbounded on that end (backwards compatible).
|
||||
type TimeWindow struct {
|
||||
Since string // RFC3339, empty = unbounded
|
||||
Until string // RFC3339, empty = unbounded
|
||||
// Label is a stable identifier for the user-requested window
|
||||
// (e.g. "24h"). For relative windows it is the original alias; for
|
||||
// absolute ranges it is empty (Since/Until are already stable).
|
||||
// Used only for cache keying so that "?window=24h" produces a single
|
||||
// cache entry instead of one per second.
|
||||
Label string
|
||||
}
|
||||
|
||||
// IsZero reports whether the window imposes no bounds at all.
|
||||
func (w TimeWindow) IsZero() bool {
|
||||
return w.Since == "" && w.Until == ""
|
||||
}
|
||||
|
||||
// CacheKey returns a deterministic key suitable for analytics caches.
|
||||
// For relative windows the key is the alias label so that the cache
|
||||
// remains stable across the wall-clock advancing.
|
||||
func (w TimeWindow) CacheKey() string {
|
||||
if w.IsZero() {
|
||||
return ""
|
||||
}
|
||||
if w.Label != "" {
|
||||
return "rel:" + w.Label
|
||||
}
|
||||
return w.Since + "|" + w.Until
|
||||
}
|
||||
|
||||
// Includes reports whether ts (an RFC3339-style string) falls within the
|
||||
// window. Empty ts is treated as included (for callers that don't have a
|
||||
// timestamp on every observation).
|
||||
//
|
||||
// Comparison is done by parsing both sides into time.Time. Lex compare is
|
||||
// unsafe here because stored timestamps carry millisecond precision
|
||||
// ("...HH:MM:SS.000Z") while bounds emitted by ParseTimeWindow do not
|
||||
// ("...HH:MM:SSZ"), and '.' (0x2e) sorts before 'Z' (0x5a). If a timestamp
|
||||
// fails to parse we fall back to lex compare to preserve old behavior.
|
||||
func (w TimeWindow) Includes(ts string) bool {
|
||||
if ts == "" {
|
||||
return true
|
||||
}
|
||||
tt, terr := parseAnyRFC3339(ts)
|
||||
if w.Since != "" {
|
||||
if s, err := parseAnyRFC3339(w.Since); err == nil && terr == nil {
|
||||
if tt.Before(s) {
|
||||
return false
|
||||
}
|
||||
} else if ts < w.Since {
|
||||
return false
|
||||
}
|
||||
}
|
||||
if w.Until != "" {
|
||||
if u, err := parseAnyRFC3339(w.Until); err == nil && terr == nil {
|
||||
if tt.After(u) {
|
||||
return false
|
||||
}
|
||||
} else if ts > w.Until {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// parseAnyRFC3339 accepts both fractional-second ("...000Z") and second-
|
||||
// precision ("...Z") RFC3339 timestamps. time.RFC3339Nano handles both.
|
||||
func parseAnyRFC3339(s string) (time.Time, error) {
|
||||
return time.Parse(time.RFC3339Nano, s)
|
||||
}
|
||||
|
||||
// ParseTimeWindow extracts a TimeWindow from query params.
|
||||
//
|
||||
// Supported parameters:
|
||||
//
|
||||
// ?window=1h | 24h | 7d | 30d — relative window ending "now"
|
||||
// ?from=<RFC3339>&to=<RFC3339> — absolute custom range (either bound optional)
|
||||
//
|
||||
// When neither is set, returns the zero TimeWindow (unbounded; original behavior).
|
||||
// Invalid values are silently ignored to preserve backwards compatibility.
|
||||
func ParseTimeWindow(r *http.Request) TimeWindow {
|
||||
q := r.URL.Query()
|
||||
|
||||
// Absolute range takes precedence if either bound is set.
|
||||
from := q.Get("from")
|
||||
to := q.Get("to")
|
||||
if from != "" || to != "" {
|
||||
w := TimeWindow{}
|
||||
if from != "" {
|
||||
if t, err := time.Parse(time.RFC3339, from); err == nil {
|
||||
w.Since = t.UTC().Format(time.RFC3339)
|
||||
}
|
||||
}
|
||||
if to != "" {
|
||||
if t, err := time.Parse(time.RFC3339, to); err == nil {
|
||||
w.Until = t.UTC().Format(time.RFC3339)
|
||||
}
|
||||
}
|
||||
return w
|
||||
}
|
||||
|
||||
// Relative window.
|
||||
if win := q.Get("window"); win != "" {
|
||||
var d time.Duration
|
||||
switch win {
|
||||
case "1h":
|
||||
d = 1 * time.Hour
|
||||
case "24h", "1d":
|
||||
d = 24 * time.Hour
|
||||
case "3d":
|
||||
d = 3 * 24 * time.Hour
|
||||
case "7d", "1w":
|
||||
d = 7 * 24 * time.Hour
|
||||
case "30d":
|
||||
d = 30 * 24 * time.Hour
|
||||
default:
|
||||
// Unknown values are silently ignored — backwards compatible.
|
||||
return TimeWindow{}
|
||||
}
|
||||
since := time.Now().UTC().Add(-d).Format(time.RFC3339)
|
||||
return TimeWindow{Since: since, Label: win}
|
||||
}
|
||||
|
||||
return TimeWindow{}
|
||||
}
|
||||
@@ -0,0 +1,144 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Issue #842 — selectable analytics timeframes.
|
||||
// Backend must accept ?window=1h|24h|7d|30d and ?from=/?to= and yield a
|
||||
// TimeWindow that correctly bounds analytics queries.
|
||||
|
||||
func TestParseTimeWindow_Window24h(t *testing.T) {
|
||||
r := httptest.NewRequest("GET", "/api/analytics/rf?window=24h", nil)
|
||||
w := ParseTimeWindow(r)
|
||||
if w.Since == "" {
|
||||
t.Fatalf("window=24h: expected non-empty Since, got %q", w.Since)
|
||||
}
|
||||
since, err := time.Parse(time.RFC3339, w.Since)
|
||||
if err != nil {
|
||||
t.Fatalf("window=24h: Since %q is not RFC3339: %v", w.Since, err)
|
||||
}
|
||||
delta := time.Since(since)
|
||||
if delta < 23*time.Hour || delta > 25*time.Hour {
|
||||
t.Fatalf("window=24h: Since should be ~24h ago, got delta=%v", delta)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseTimeWindow_WindowAliases(t *testing.T) {
|
||||
cases := map[string]time.Duration{
|
||||
"1h": 1 * time.Hour,
|
||||
"24h": 24 * time.Hour,
|
||||
"7d": 7 * 24 * time.Hour,
|
||||
"30d": 30 * 24 * time.Hour,
|
||||
}
|
||||
for q, want := range cases {
|
||||
r := httptest.NewRequest("GET", "/api/analytics/rf?window="+q, nil)
|
||||
got := ParseTimeWindow(r)
|
||||
if got.Since == "" {
|
||||
t.Errorf("window=%s: empty Since", q)
|
||||
continue
|
||||
}
|
||||
since, err := time.Parse(time.RFC3339, got.Since)
|
||||
if err != nil {
|
||||
t.Errorf("window=%s: bad RFC3339 %q", q, got.Since)
|
||||
continue
|
||||
}
|
||||
delta := time.Since(since)
|
||||
// allow 5 minutes of slack
|
||||
if delta < want-5*time.Minute || delta > want+5*time.Minute {
|
||||
t.Errorf("window=%s: expected ~%v, got %v", q, want, delta)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseTimeWindow_FromTo(t *testing.T) {
|
||||
from := "2026-04-01T00:00:00Z"
|
||||
to := "2026-04-08T00:00:00Z"
|
||||
r := httptest.NewRequest("GET", "/api/analytics/rf?from="+from+"&to="+to, nil)
|
||||
w := ParseTimeWindow(r)
|
||||
if w.Since != from {
|
||||
t.Errorf("expected Since=%q, got %q", from, w.Since)
|
||||
}
|
||||
if w.Until != to {
|
||||
t.Errorf("expected Until=%q, got %q", to, w.Until)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseTimeWindow_NoParams_BackwardsCompatible(t *testing.T) {
|
||||
r := httptest.NewRequest("GET", "/api/analytics/rf", nil)
|
||||
w := ParseTimeWindow(r)
|
||||
if !w.IsZero() {
|
||||
t.Errorf("no params should yield zero window, got %+v", w)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTimeWindow_Includes(t *testing.T) {
|
||||
w := TimeWindow{Since: "2026-04-01T00:00:00Z", Until: "2026-04-08T00:00:00Z"}
|
||||
if !w.Includes("2026-04-05T12:00:00Z") {
|
||||
t.Error("mid-range ts should be included")
|
||||
}
|
||||
if w.Includes("2026-03-31T23:59:59Z") {
|
||||
t.Error("ts before Since should be excluded")
|
||||
}
|
||||
if w.Includes("2026-04-08T00:00:01Z") {
|
||||
t.Error("ts after Until should be excluded")
|
||||
}
|
||||
// Empty ts always included (some observations lack timestamps)
|
||||
if !w.Includes("") {
|
||||
t.Error("empty ts should be included")
|
||||
}
|
||||
}
|
||||
|
||||
func TestTimeWindow_CacheKey_DistinctPerWindow(t *testing.T) {
|
||||
a := TimeWindow{Since: "2026-04-01T00:00:00Z"}
|
||||
b := TimeWindow{Since: "2026-04-02T00:00:00Z"}
|
||||
z := TimeWindow{}
|
||||
if a.CacheKey() == b.CacheKey() {
|
||||
t.Error("different windows must produce different cache keys")
|
||||
}
|
||||
if z.CacheKey() != "" {
|
||||
t.Errorf("zero window cache key must be empty, got %q", z.CacheKey())
|
||||
}
|
||||
if !strings.Contains(a.CacheKey(), "2026-04-01") {
|
||||
t.Errorf("cache key should encode Since, got %q", a.CacheKey())
|
||||
}
|
||||
}
|
||||
|
||||
// Self-review fixes (#1018 polish).
|
||||
|
||||
// B1: a relative window must produce a STABLE cache key across calls,
|
||||
// otherwise the analytics cache thrashes (one entry per second).
|
||||
func TestTimeWindow_RelativeWindow_StableCacheKey(t *testing.T) {
|
||||
r1 := httptest.NewRequest("GET", "/api/analytics/rf?window=24h", nil)
|
||||
w1 := ParseTimeWindow(r1)
|
||||
time.Sleep(1100 * time.Millisecond)
|
||||
r2 := httptest.NewRequest("GET", "/api/analytics/rf?window=24h", nil)
|
||||
w2 := ParseTimeWindow(r2)
|
||||
if w1.CacheKey() != w2.CacheKey() {
|
||||
t.Fatalf("relative window cache key must be stable across calls, got %q vs %q", w1.CacheKey(), w2.CacheKey())
|
||||
}
|
||||
}
|
||||
|
||||
// B2: stored timestamps use millisecond precision (".000Z") while RFC3339
|
||||
// bounds have none. Includes() must use time-based compare, not lex compare,
|
||||
// so tx past Until are correctly excluded regardless of fractional digits.
|
||||
func TestTimeWindow_Includes_FractionalSecondsBoundary(t *testing.T) {
|
||||
w := TimeWindow{Until: "2026-04-08T00:00:00Z"}
|
||||
// A tx 1ms past Until should NOT be included.
|
||||
if w.Includes("2026-04-08T00:00:00.001Z") {
|
||||
t.Error("ts 1ms past Until must be excluded; lex compare against fractional ts is wrong")
|
||||
}
|
||||
// A tx well inside the window must be included.
|
||||
if !w.Includes("2026-04-07T23:59:59.999Z") {
|
||||
t.Error("ts just before Until must be included")
|
||||
}
|
||||
|
||||
w2 := TimeWindow{Since: "2026-04-01T00:00:00Z"}
|
||||
// A tx at exactly Since should be included.
|
||||
if !w2.Includes("2026-04-01T00:00:00.000Z") {
|
||||
t.Error("ts exactly at Since must be included; lex compare excludes it because '.' < 'Z'")
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,338 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
_ "modernc.org/sqlite"
|
||||
)
|
||||
|
||||
// TestTopologyDedup_RepeatersMergeByPubkey verifies that topRepeaters
|
||||
// merges entries whose hop prefixes resolve unambiguously to the same node.
|
||||
func TestTopologyDedup_RepeatersMergeByPubkey(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
dbPath := filepath.Join(dir, "test.db")
|
||||
conn, err := sql.Open("sqlite", dbPath+"?_journal_mode=WAL")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
exec := func(s string) {
|
||||
if _, err := conn.Exec(s); err != nil {
|
||||
t.Fatalf("SQL exec failed: %v\nSQL: %s", err, s)
|
||||
}
|
||||
}
|
||||
exec(`CREATE TABLE transmissions (
|
||||
id INTEGER PRIMARY KEY, raw_hex TEXT, hash TEXT, first_seen TEXT,
|
||||
route_type INTEGER, payload_type INTEGER, payload_version INTEGER, decoded_json TEXT
|
||||
)`)
|
||||
exec(`CREATE TABLE observations (
|
||||
id INTEGER PRIMARY KEY, transmission_id INTEGER, observer_id TEXT, observer_name TEXT,
|
||||
direction TEXT, snr REAL, rssi REAL, score INTEGER, path_json TEXT, timestamp TEXT, raw_hex TEXT
|
||||
)`)
|
||||
exec(`CREATE TABLE observers (rowid INTEGER PRIMARY KEY, id TEXT, name TEXT)`)
|
||||
exec(`CREATE TABLE nodes (
|
||||
public_key TEXT PRIMARY KEY, name TEXT, role TEXT, lat REAL, lon REAL,
|
||||
last_seen TEXT, frequency REAL
|
||||
)`)
|
||||
exec(`CREATE TABLE schema_version (version INTEGER)`)
|
||||
exec(`INSERT INTO schema_version (version) VALUES (1)`)
|
||||
exec(`CREATE INDEX idx_tx_first_seen ON transmissions(first_seen)`)
|
||||
|
||||
// Insert two repeater nodes with distinct pubkeys.
|
||||
// AQUA: pubkey starts with 0735bc...
|
||||
// BETA: pubkey starts with 99aabb...
|
||||
exec(`INSERT INTO nodes (public_key, name, role) VALUES ('0735bc6dda4d1122aabbccdd', 'AQUA', 'Repeater')`)
|
||||
exec(`INSERT INTO nodes (public_key, name, role) VALUES ('99aabb001122334455667788', 'BETA', 'Repeater')`)
|
||||
|
||||
base := time.Date(2026, 1, 1, 0, 0, 0, 0, time.UTC)
|
||||
|
||||
// Create packets:
|
||||
// - 10 packets with path ["07", "99aa"] (short prefix for AQUA, medium for BETA)
|
||||
// - 5 packets with path ["0735bc", "99"] (medium prefix for AQUA, short for BETA)
|
||||
// - 3 packets with path ["0735bc6dda4d", "99aabb"] (long prefix for both)
|
||||
txID := 1
|
||||
obsID := 1
|
||||
insertTx := func(path string, count int) {
|
||||
for i := 0; i < count; i++ {
|
||||
ts := base.Add(time.Duration(txID) * time.Minute).Format(time.RFC3339)
|
||||
hash := fmt.Sprintf("h%04d", txID)
|
||||
conn.Exec("INSERT INTO transmissions (id, raw_hex, hash, first_seen, route_type, payload_type, payload_version, decoded_json) VALUES (?, ?, ?, ?, 0, 4, 1, ?)",
|
||||
txID, "aabb", hash, ts, fmt.Sprintf(`{"pubKey":"pk%04d"}`, txID))
|
||||
conn.Exec("INSERT INTO observations (id, transmission_id, observer_id, observer_name, direction, snr, rssi, score, path_json, timestamp) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
|
||||
obsID, txID, "obs1", "Obs1", "RX", -10.0, -80.0, 5, path, ts)
|
||||
txID++
|
||||
obsID++
|
||||
}
|
||||
}
|
||||
|
||||
insertTx(`["07","99aa"]`, 10)
|
||||
insertTx(`["0735bc","99"]`, 5)
|
||||
insertTx(`["0735bc6d","99aabb"]`, 3)
|
||||
|
||||
// Total: AQUA appears as "07" (10×), "0735bc" (5×), "0735bc6d" (3×) = 18 total
|
||||
// Total: BETA appears as "99aa" (10×), "99" (5×), "99aabb" (3×) = 18 total
|
||||
// After dedup, each should appear ONCE with count=18.
|
||||
|
||||
db, err := OpenDB(dbPath)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer db.conn.Close()
|
||||
|
||||
store := NewPacketStore(db, &PacketStoreConfig{MaxMemoryMB: 100})
|
||||
if err := store.Load(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
result := store.computeAnalyticsTopology("", TimeWindow{})
|
||||
topRepeaters := result["topRepeaters"].([]map[string]interface{})
|
||||
|
||||
// Build a map of pubkey → total count from topRepeaters
|
||||
pubkeyCounts := map[string]int{}
|
||||
for _, entry := range topRepeaters {
|
||||
pk, _ := entry["pubkey"].(string)
|
||||
if pk == "" {
|
||||
continue
|
||||
}
|
||||
pubkeyCounts[pk] += entry["count"].(int)
|
||||
}
|
||||
|
||||
// Each pubkey should appear exactly once in topRepeaters
|
||||
aquaEntries := 0
|
||||
betaEntries := 0
|
||||
for _, entry := range topRepeaters {
|
||||
pk, _ := entry["pubkey"].(string)
|
||||
if pk == "0735bc6dda4d1122aabbccdd" {
|
||||
aquaEntries++
|
||||
}
|
||||
if pk == "99aabb001122334455667788" {
|
||||
betaEntries++
|
||||
}
|
||||
}
|
||||
|
||||
if aquaEntries != 1 {
|
||||
t.Errorf("AQUA should appear exactly once in topRepeaters after dedup, got %d entries", aquaEntries)
|
||||
for _, e := range topRepeaters {
|
||||
t.Logf(" entry: hop=%v name=%v pubkey=%v count=%v", e["hop"], e["name"], e["pubkey"], e["count"])
|
||||
}
|
||||
}
|
||||
if betaEntries != 1 {
|
||||
t.Errorf("BETA should appear exactly once in topRepeaters after dedup, got %d entries", betaEntries)
|
||||
}
|
||||
|
||||
// Check that the merged count is correct (18 each)
|
||||
if c := pubkeyCounts["0735bc6dda4d1122aabbccdd"]; c != 18 {
|
||||
t.Errorf("AQUA total count should be 18, got %d", c)
|
||||
}
|
||||
if c := pubkeyCounts["99aabb001122334455667788"]; c != 18 {
|
||||
t.Errorf("BETA total count should be 18, got %d", c)
|
||||
}
|
||||
}
|
||||
|
||||
// TestTopologyDedup_AmbiguousPrefixNotMerged verifies that ambiguous short
|
||||
// prefixes (matching multiple nodes) are NOT merged — they stay separate.
|
||||
func TestTopologyDedup_AmbiguousPrefixNotMerged(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
dbPath := filepath.Join(dir, "test.db")
|
||||
conn, err := sql.Open("sqlite", dbPath+"?_journal_mode=WAL")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
exec := func(s string) {
|
||||
if _, err := conn.Exec(s); err != nil {
|
||||
t.Fatalf("SQL exec failed: %v\nSQL: %s", err, s)
|
||||
}
|
||||
}
|
||||
exec(`CREATE TABLE transmissions (
|
||||
id INTEGER PRIMARY KEY, raw_hex TEXT, hash TEXT, first_seen TEXT,
|
||||
route_type INTEGER, payload_type INTEGER, payload_version INTEGER, decoded_json TEXT
|
||||
)`)
|
||||
exec(`CREATE TABLE observations (
|
||||
id INTEGER PRIMARY KEY, transmission_id INTEGER, observer_id TEXT, observer_name TEXT,
|
||||
direction TEXT, snr REAL, rssi REAL, score INTEGER, path_json TEXT, timestamp TEXT, raw_hex TEXT
|
||||
)`)
|
||||
exec(`CREATE TABLE observers (rowid INTEGER PRIMARY KEY, id TEXT, name TEXT)`)
|
||||
exec(`CREATE TABLE nodes (
|
||||
public_key TEXT PRIMARY KEY, name TEXT, role TEXT, lat REAL, lon REAL,
|
||||
last_seen TEXT, frequency REAL
|
||||
)`)
|
||||
exec(`CREATE TABLE schema_version (version INTEGER)`)
|
||||
exec(`INSERT INTO schema_version (version) VALUES (1)`)
|
||||
exec(`CREATE INDEX idx_tx_first_seen ON transmissions(first_seen)`)
|
||||
|
||||
// Two nodes whose pubkeys share the prefix "ab" — collision!
|
||||
exec(`INSERT INTO nodes (public_key, name, role) VALUES ('ab11223344556677aabbccdd', 'NODE_A', 'Repeater')`)
|
||||
exec(`INSERT INTO nodes (public_key, name, role) VALUES ('ab99887766554433aabbccdd', 'NODE_B', 'Repeater')`)
|
||||
|
||||
base := time.Date(2026, 1, 1, 0, 0, 0, 0, time.UTC)
|
||||
txID := 1
|
||||
obsID := 1
|
||||
|
||||
// 10 packets with hop "ab" — ambiguous (matches both NODE_A and NODE_B)
|
||||
for i := 0; i < 10; i++ {
|
||||
ts := base.Add(time.Duration(txID) * time.Minute).Format(time.RFC3339)
|
||||
hash := fmt.Sprintf("h%04d", txID)
|
||||
conn.Exec("INSERT INTO transmissions (id, raw_hex, hash, first_seen, route_type, payload_type, payload_version, decoded_json) VALUES (?, ?, ?, ?, 0, 4, 1, ?)",
|
||||
txID, "aabb", hash, ts, fmt.Sprintf(`{"pubKey":"pk%04d"}`, txID))
|
||||
conn.Exec("INSERT INTO observations (id, transmission_id, observer_id, observer_name, direction, snr, rssi, score, path_json, timestamp) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
|
||||
obsID, txID, "obs1", "Obs1", "RX", -10.0, -80.0, 5, `["ab"]`, ts)
|
||||
txID++
|
||||
obsID++
|
||||
}
|
||||
// 5 packets with hop "ab1122" — unambiguous (only NODE_A)
|
||||
for i := 0; i < 5; i++ {
|
||||
ts := base.Add(time.Duration(txID) * time.Minute).Format(time.RFC3339)
|
||||
hash := fmt.Sprintf("h%04d", txID)
|
||||
conn.Exec("INSERT INTO transmissions (id, raw_hex, hash, first_seen, route_type, payload_type, payload_version, decoded_json) VALUES (?, ?, ?, ?, 0, 4, 1, ?)",
|
||||
txID, "aabb", hash, ts, fmt.Sprintf(`{"pubKey":"pk%04d"}`, txID))
|
||||
conn.Exec("INSERT INTO observations (id, transmission_id, observer_id, observer_name, direction, snr, rssi, score, path_json, timestamp) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
|
||||
obsID, txID, "obs1", "Obs1", "RX", -10.0, -80.0, 5, `["ab1122"]`, ts)
|
||||
txID++
|
||||
obsID++
|
||||
}
|
||||
|
||||
db, err := OpenDB(dbPath)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer db.conn.Close()
|
||||
|
||||
store := NewPacketStore(db, &PacketStoreConfig{MaxMemoryMB: 100})
|
||||
if err := store.Load(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
result := store.computeAnalyticsTopology("", TimeWindow{})
|
||||
topRepeaters := result["topRepeaters"].([]map[string]interface{})
|
||||
|
||||
// "ab" is ambiguous — should NOT be merged with "ab1122"
|
||||
// We expect two separate entries: one for "ab" (count=10) and one for "ab1122" (count=5)
|
||||
foundAb := false
|
||||
foundAb1122 := false
|
||||
for _, entry := range topRepeaters {
|
||||
hop := entry["hop"].(string)
|
||||
count := entry["count"].(int)
|
||||
if hop == "ab" {
|
||||
foundAb = true
|
||||
if count != 10 {
|
||||
t.Errorf("ambiguous hop 'ab' should have count=10, got %d", count)
|
||||
}
|
||||
}
|
||||
if hop == "ab1122" {
|
||||
foundAb1122 = true
|
||||
if count != 5 {
|
||||
t.Errorf("unambiguous hop 'ab1122' should have count=5, got %d", count)
|
||||
}
|
||||
}
|
||||
}
|
||||
if !foundAb {
|
||||
t.Error("ambiguous hop 'ab' should remain as separate entry")
|
||||
}
|
||||
if !foundAb1122 {
|
||||
t.Error("unambiguous hop 'ab1122' should remain as separate entry (not merged with ambiguous 'ab')")
|
||||
}
|
||||
}
|
||||
|
||||
// TestTopologyDedup_PairsMergeByPubkey verifies that topPairs merges
|
||||
// pair entries whose hops resolve unambiguously to the same node pair.
|
||||
func TestTopologyDedup_PairsMergeByPubkey(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
dbPath := filepath.Join(dir, "test.db")
|
||||
conn, err := sql.Open("sqlite", dbPath+"?_journal_mode=WAL")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
exec := func(s string) {
|
||||
if _, err := conn.Exec(s); err != nil {
|
||||
t.Fatalf("SQL exec failed: %v\nSQL: %s", err, s)
|
||||
}
|
||||
}
|
||||
exec(`CREATE TABLE transmissions (
|
||||
id INTEGER PRIMARY KEY, raw_hex TEXT, hash TEXT, first_seen TEXT,
|
||||
route_type INTEGER, payload_type INTEGER, payload_version INTEGER, decoded_json TEXT
|
||||
)`)
|
||||
exec(`CREATE TABLE observations (
|
||||
id INTEGER PRIMARY KEY, transmission_id INTEGER, observer_id TEXT, observer_name TEXT,
|
||||
direction TEXT, snr REAL, rssi REAL, score INTEGER, path_json TEXT, timestamp TEXT, raw_hex TEXT
|
||||
)`)
|
||||
exec(`CREATE TABLE observers (rowid INTEGER PRIMARY KEY, id TEXT, name TEXT)`)
|
||||
exec(`CREATE TABLE nodes (
|
||||
public_key TEXT PRIMARY KEY, name TEXT, role TEXT, lat REAL, lon REAL,
|
||||
last_seen TEXT, frequency REAL
|
||||
)`)
|
||||
exec(`CREATE TABLE schema_version (version INTEGER)`)
|
||||
exec(`INSERT INTO schema_version (version) VALUES (1)`)
|
||||
exec(`CREATE INDEX idx_tx_first_seen ON transmissions(first_seen)`)
|
||||
|
||||
exec(`INSERT INTO nodes (public_key, name, role) VALUES ('0735bc6dda4d1122aabbccdd', 'AQUA', 'Repeater')`)
|
||||
exec(`INSERT INTO nodes (public_key, name, role) VALUES ('99aabb001122334455667788', 'BETA', 'Repeater')`)
|
||||
|
||||
base := time.Date(2026, 1, 1, 0, 0, 0, 0, time.UTC)
|
||||
txID := 1
|
||||
obsID := 1
|
||||
insertTx := func(path string, count int) {
|
||||
for i := 0; i < count; i++ {
|
||||
ts := base.Add(time.Duration(txID) * time.Minute).Format(time.RFC3339)
|
||||
hash := fmt.Sprintf("h%04d", txID)
|
||||
conn.Exec("INSERT INTO transmissions (id, raw_hex, hash, first_seen, route_type, payload_type, payload_version, decoded_json) VALUES (?, ?, ?, ?, 0, 4, 1, ?)",
|
||||
txID, "aabb", hash, ts, fmt.Sprintf(`{"pubKey":"pk%04d"}`, txID))
|
||||
conn.Exec("INSERT INTO observations (id, transmission_id, observer_id, observer_name, direction, snr, rssi, score, path_json, timestamp) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
|
||||
obsID, txID, "obs1", "Obs1", "RX", -10.0, -80.0, 5, path, ts)
|
||||
txID++
|
||||
obsID++
|
||||
}
|
||||
}
|
||||
|
||||
// Path ["07","99aa"] → pair "07|99aa", 10 times
|
||||
// Path ["0735bc","99"] → pair "0735bc|99" but sorted = "0735bc|99", 5 times
|
||||
// Wait: pair sorting is by string comparison: "07" < "99aa", "0735bc" < "99"
|
||||
// After dedup both should merge to AQUA|BETA pair with count=15
|
||||
insertTx(`["07","99aa"]`, 10)
|
||||
insertTx(`["0735bc","99"]`, 5)
|
||||
|
||||
db, err := OpenDB(dbPath)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer db.conn.Close()
|
||||
|
||||
store := NewPacketStore(db, &PacketStoreConfig{MaxMemoryMB: 100})
|
||||
if err := store.Load(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
result := store.computeAnalyticsTopology("", TimeWindow{})
|
||||
topPairs := result["topPairs"].([]map[string]interface{})
|
||||
|
||||
// Should have exactly 1 pair entry for AQUA-BETA with count=15
|
||||
aquaBetaPairs := 0
|
||||
totalCount := 0
|
||||
for _, entry := range topPairs {
|
||||
pkA, _ := entry["pubkeyA"].(string)
|
||||
pkB, _ := entry["pubkeyB"].(string)
|
||||
if (pkA == "0735bc6dda4d1122aabbccdd" && pkB == "99aabb001122334455667788") ||
|
||||
(pkA == "99aabb001122334455667788" && pkB == "0735bc6dda4d1122aabbccdd") {
|
||||
aquaBetaPairs++
|
||||
totalCount += entry["count"].(int)
|
||||
}
|
||||
}
|
||||
|
||||
if aquaBetaPairs != 1 {
|
||||
t.Errorf("AQUA-BETA pair should appear exactly once after dedup, got %d entries", aquaBetaPairs)
|
||||
for _, e := range topPairs {
|
||||
t.Logf(" pair: hopA=%v hopB=%v count=%v pkA=%v pkB=%v", e["hopA"], e["hopB"], e["count"], e["pubkeyA"], e["pubkeyB"])
|
||||
}
|
||||
}
|
||||
if totalCount != 15 {
|
||||
t.Errorf("AQUA-BETA pair total count should be 15, got %d", totalCount)
|
||||
}
|
||||
}
|
||||
@@ -859,6 +859,7 @@ type ObserverResp struct {
|
||||
BatteryMv interface{} `json:"battery_mv"`
|
||||
UptimeSecs interface{} `json:"uptime_secs"`
|
||||
NoiseFloor interface{} `json:"noise_floor"`
|
||||
LastPacketAt interface{} `json:"last_packet_at"`
|
||||
PacketsLastHour int `json:"packetsLastHour"`
|
||||
Lat interface{} `json:"lat"`
|
||||
Lon interface{} `json:"lon"`
|
||||
|
||||
@@ -37,12 +37,11 @@ func checkAutoVacuum(db *DB, cfg *Config, dbPath string) {
|
||||
log.Printf("[db] vacuumOnStartup=true — starting one-time full VACUUM (ensure 2x DB size free disk space)...")
|
||||
start := time.Now()
|
||||
|
||||
rw, err := openRW(dbPath)
|
||||
rw, err := cachedRW(dbPath)
|
||||
if err != nil {
|
||||
log.Printf("[db] VACUUM failed: could not open RW connection: %v", err)
|
||||
return
|
||||
}
|
||||
defer rw.Close()
|
||||
|
||||
if _, err := rw.Exec("PRAGMA auto_vacuum = INCREMENTAL"); err != nil {
|
||||
log.Printf("[db] VACUUM failed: could not set auto_vacuum: %v", err)
|
||||
@@ -71,12 +70,11 @@ func checkAutoVacuum(db *DB, cfg *Config, dbPath string) {
|
||||
// runIncrementalVacuum runs PRAGMA incremental_vacuum(N) on a read-write
|
||||
// connection. Safe to call on auto_vacuum=NONE databases (noop).
|
||||
func runIncrementalVacuum(dbPath string, pages int) {
|
||||
rw, err := openRW(dbPath)
|
||||
rw, err := cachedRW(dbPath)
|
||||
if err != nil {
|
||||
log.Printf("[vacuum] could not open RW connection: %v", err)
|
||||
return
|
||||
}
|
||||
defer rw.Close()
|
||||
|
||||
if _, err := rw.Exec(fmt.Sprintf("PRAGMA incremental_vacuum(%d)", pages)); err != nil {
|
||||
log.Printf("[vacuum] incremental_vacuum error: %v", err)
|
||||
|
||||
+22
-6
@@ -3,6 +3,8 @@
|
||||
"apiKey": "your-secret-api-key-here",
|
||||
"nodeBlacklist": [],
|
||||
"_comment_nodeBlacklist": "Public keys of nodes to hide from all API responses. Use for trolls, offensive names, or nodes reporting false data that operators refuse to fix.",
|
||||
"observerIATAWhitelist": [],
|
||||
"_comment_observerIATAWhitelist": "Global IATA region whitelist. When non-empty, only observers whose IATA code (from MQTT topic) matches are processed. Case-insensitive. Empty = allow all. Unlike per-source iataFilter, this applies across all MQTT sources.",
|
||||
"retention": {
|
||||
"nodeDays": 7,
|
||||
"observerDays": 14,
|
||||
@@ -14,6 +16,7 @@
|
||||
"incrementalVacuumPages": 1024,
|
||||
"_comment": "vacuumOnStartup: run one-time full VACUUM to enable incremental auto-vacuum on existing DBs (blocks startup for minutes on large DBs; requires 2x DB file size in free disk space). incrementalVacuumPages: free pages returned to OS after each retention reaper cycle (default 1024). See #919."
|
||||
},
|
||||
"_comment_ingestorStats": "Ingestor publishes a 1-Hz stats snapshot consumed by the server's /api/perf/io and /api/perf/write-sources endpoints (#1120). Path is configured via the CORESCOPE_INGESTOR_STATS environment variable on the INGESTOR process. Default: /tmp/corescope-ingestor-stats.json. The writer uses O_NOFOLLOW + 0o600, so a pre-planted symlink in /tmp cannot be used to clobber an arbitrary file. SECURITY: in shared-tmp environments (multi-tenant hosts), point CORESCOPE_INGESTOR_STATS at a private directory like /var/lib/corescope/ingestor-stats.json that only the corescope user can write to.",
|
||||
"https": {
|
||||
"cert": "/path/to/cert.pem",
|
||||
"key": "/path/to/key.pem",
|
||||
@@ -129,7 +132,9 @@
|
||||
"SFO",
|
||||
"OAK",
|
||||
"MRY"
|
||||
]
|
||||
],
|
||||
"region": "SJC",
|
||||
"connectTimeoutSec": 45
|
||||
}
|
||||
],
|
||||
"channelKeys": {
|
||||
@@ -151,7 +156,8 @@
|
||||
"infraSilentHours": 72,
|
||||
"nodeDegradedHours": 1,
|
||||
"nodeSilentHours": 24,
|
||||
"_comment": "How long (hours) before nodes show as degraded/silent. 'infra' = repeaters & rooms, 'node' = companions & others."
|
||||
"relayActiveHours": 24,
|
||||
"_comment": "How long (hours) before nodes show as degraded/silent. 'infra' = repeaters & rooms, 'node' = companions & others. relayActiveHours: a repeater is shown as 'actively relaying' if its pubkey appeared as a path hop in a non-advert packet within this window (issue #662)."
|
||||
},
|
||||
"defaultRegion": "SJC",
|
||||
"mapDefaults": {
|
||||
@@ -169,7 +175,11 @@
|
||||
[37.20, -122.52]
|
||||
],
|
||||
"bufferKm": 20,
|
||||
"_comment": "Optional. Restricts ingestion and API responses to nodes within the polygon + bufferKm. Polygon is an array of [lat, lon] pairs (minimum 3). Use tools/geofilter-builder.html to draw a polygon visually. Remove this section to disable filtering. Nodes with no GPS fix are always allowed through."
|
||||
"_comment": "Optional. Restricts ingestion and API responses to nodes within the polygon + bufferKm. Polygon is an array of [lat, lon] pairs (minimum 3). Use the GeoFilter Builder (`/geofilter-builder.html`) to draw a polygon, save drafts to localStorage with Save Draft, and export a config snippet with Download — paste the snippet here as the `geo_filter` block. Remove this section to disable filtering. Nodes with no GPS fix are always allowed through."
|
||||
},
|
||||
"foreignAdverts": {
|
||||
"mode": "flag",
|
||||
"_comment": "Controls how the ingestor handles ADVERTs whose GPS is OUTSIDE the geo_filter polygon (#730). 'flag' (default): store the advert/node and tag it foreign_advert=1 so operators can see bridged/leaked nodes via the API ('foreign': true on /api/nodes). 'drop': legacy behavior — silently discard the advert (no log, no node row). Only applies when geo_filter is configured; otherwise has no effect."
|
||||
},
|
||||
"regions": {
|
||||
"SJC": "San Jose, US",
|
||||
@@ -214,7 +224,8 @@
|
||||
"maxMemoryMB": 1024,
|
||||
"estimatedPacketBytes": 450,
|
||||
"retentionHours": 168,
|
||||
"_comment": "In-memory packet store. maxMemoryMB caps RAM usage. retentionHours: only packets younger than this are loaded on startup and kept in memory (0 = unlimited, not recommended for large DBs — causes OOM on cold start). 168 = 7 days. Must be ≤ retention.packetDays * 24."
|
||||
"_comment": "In-memory packet store. maxMemoryMB caps RAM usage. retentionHours: only packets younger than this are loaded on startup and kept in memory (0 = unlimited, not recommended for large DBs — causes OOM on cold start). 168 = 7 days. Must be ≤ retention.packetDays * 24.",
|
||||
"_comment_gomemlimit": "On startup the server reads GOMEMLIMIT from the environment if set; otherwise it derives a Go runtime soft memory limit of maxMemoryMB * 1.5 and applies it via debug.SetMemoryLimit. This forces aggressive GC under cgroup pressure so the process self-throttles before the kernel SIGKILLs it. To override, set GOMEMLIMIT explicitly (e.g. GOMEMLIMIT=850MiB). See issue #836."
|
||||
},
|
||||
"resolvedPath": {
|
||||
"backfillHours": 24,
|
||||
@@ -224,10 +235,15 @@
|
||||
"maxAgeDays": 5,
|
||||
"_comment": "Neighbor edges older than this many days are pruned on startup and daily. Default: 5."
|
||||
},
|
||||
"_comment_mqttSources": "Each source connects to an MQTT broker. topics: what to subscribe to. iataFilter: only ingest packets from these regions (optional).",
|
||||
"batteryThresholds": {
|
||||
"lowMv": 3300,
|
||||
"criticalMv": 3000,
|
||||
"_comment": "Voltage cutoffs (millivolts) for the per-node battery trend chart on /node-analytics. Latest sample below lowMv shows the node as ⚠️ Low; below criticalMv shows 🪫 Critical. Both default to 3300 / 3000 if omitted. Source data: observer_metrics.battery_mv populated from observer status messages; only nodes that are themselves observers (matching pubkey ↔ observer id) yield a series. Issue #663."
|
||||
},
|
||||
"_comment_mqttSources": "Each source connects to an MQTT broker. topics: what to subscribe to. iataFilter: only ingest packets from these regions (optional). region: default IATA region for this source — used when packet/topic doesn't specify one (optional, priority: payload > topic > this field).",
|
||||
"_comment_channelKeys": "Hex keys for decrypting channel messages. Key name = channel display name. public channel key is well-known.",
|
||||
"_comment_hashChannels": "Channel names whose keys are derived via SHA256. Key = SHA256(name)[:16]. Listed here so the ingestor can auto-derive keys.",
|
||||
"_comment_defaultRegion": "IATA code shown by default in region filters.",
|
||||
"_comment_mapDefaults": "Initial map center [lat, lon] and zoom level.",
|
||||
"_comment_regions": "IATA code to display name mapping. Packets are tagged with region codes by MQTT topic structure."
|
||||
"_comment_regions": "IATA code → display name mapping for the region filter UI. Each key is a 3-letter IATA code that an observer is tagged with (resolved priority: MQTT payload `region` field > topic-derived region > mqttSources.region). Observers without an IATA tag will not appear under any region filter — only under 'All Regions'. The region filter dropdown shows one entry per code listed here PLUS any extra IATA codes the server discovers from observers at runtime (so you can omit codes here and they will still be selectable, just labelled with the bare IATA code instead of a friendly name). Selecting 'All Regions' (or no region) returns results from every observer including those with no IATA tag; selecting one or more codes restricts results to packets observed by observers tagged with those codes. The reserved value 'All' (case-insensitive) is treated as 'no filter' on the server, so the URL ?region=All behaves identically to omitting the param. Issue #770."
|
||||
}
|
||||
|
||||
@@ -0,0 +1,17 @@
|
||||
// Package dbconfig provides the shared DBConfig struct used by both the server
|
||||
// and ingestor binaries for SQLite vacuum and maintenance settings (#919, #921).
|
||||
package dbconfig
|
||||
|
||||
// DBConfig controls SQLite vacuum and maintenance behavior (#919).
|
||||
type DBConfig struct {
|
||||
VacuumOnStartup bool `json:"vacuumOnStartup"` // one-time full VACUUM on startup if auto_vacuum is not INCREMENTAL
|
||||
IncrementalVacuumPages int `json:"incrementalVacuumPages"` // pages returned to OS per reaper cycle (default 1024)
|
||||
}
|
||||
|
||||
// GetIncrementalVacuumPages returns the configured pages or 1024 default.
|
||||
func (c *DBConfig) GetIncrementalVacuumPages() int {
|
||||
if c != nil && c.IncrementalVacuumPages > 0 {
|
||||
return c.IncrementalVacuumPages
|
||||
}
|
||||
return 1024
|
||||
}
|
||||
@@ -0,0 +1,21 @@
|
||||
package dbconfig
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestGetIncrementalVacuumPages_Default(t *testing.T) {
|
||||
var c *DBConfig
|
||||
if got := c.GetIncrementalVacuumPages(); got != 1024 {
|
||||
t.Fatalf("nil DBConfig: got %d, want 1024", got)
|
||||
}
|
||||
c = &DBConfig{}
|
||||
if got := c.GetIncrementalVacuumPages(); got != 1024 {
|
||||
t.Fatalf("zero DBConfig: got %d, want 1024", got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetIncrementalVacuumPages_Configured(t *testing.T) {
|
||||
c := &DBConfig{IncrementalVacuumPages: 512}
|
||||
if got := c.GetIncrementalVacuumPages(); got != 512 {
|
||||
t.Fatalf("got %d, want 512", got)
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,3 @@
|
||||
module github.com/meshcore-analyzer/dbconfig
|
||||
|
||||
go 1.22
|
||||
@@ -0,0 +1,3 @@
|
||||
module github.com/meshcore-analyzer/perfio
|
||||
|
||||
go 1.22
|
||||
@@ -0,0 +1,79 @@
|
||||
// Package perfio holds the canonical PerfIOSample type shared between the
|
||||
// ingestor (which publishes /proc/self/io rate samples to its on-disk stats
|
||||
// file) and the server (which reads that file and surfaces the sample under
|
||||
// /api/perf/io's `ingestor` block). Sharing the type prevents silent JSON
|
||||
// contract drift if a field is added on one side only.
|
||||
//
|
||||
// The /proc/self/io key:value parser also lives here (Carmack #1167
|
||||
// must-fix #7) so the two binaries don't carry divergent copies of the
|
||||
// same parser — past divergence already produced a real bug (see must-fix
|
||||
// #6: the parsedAny empty-key gate was added on one side only).
|
||||
package perfio
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Sample is the per-process I/O rate sample written by the ingestor and
|
||||
// consumed by the server. Field names + json tags MUST be considered the
|
||||
// stable on-disk contract — adding/renaming a field is a breaking change.
|
||||
type Sample struct {
|
||||
ReadBytesPerSec float64 `json:"readBytesPerSec"`
|
||||
WriteBytesPerSec float64 `json:"writeBytesPerSec"`
|
||||
CancelledWriteBytesPerSec float64 `json:"cancelledWriteBytesPerSec"`
|
||||
SyscallsRead float64 `json:"syscallsRead"`
|
||||
SyscallsWrite float64 `json:"syscallsWrite"`
|
||||
SampledAt string `json:"sampledAt,omitempty"`
|
||||
}
|
||||
|
||||
// Counters is the raw /proc/self/io counter snapshot. Both the ingestor's
|
||||
// procIOSnapshot and the server's procIOSample are thin wrappers around
|
||||
// these fields plus a sampled-at timestamp; the parser populates Counters
|
||||
// directly so there's exactly ONE implementation of the key:value walker.
|
||||
type Counters struct {
|
||||
ReadBytes int64
|
||||
WriteBytes int64
|
||||
CancelledWriteBytes int64
|
||||
SyscR int64
|
||||
SyscW int64
|
||||
}
|
||||
|
||||
// ParseProcIO reads /proc/self/io-shaped key:value lines from sc and
|
||||
// populates c. Returns true iff at least one recognised key was
|
||||
// successfully parsed (Carmack must-fix #6 — empty / no-known-keys input
|
||||
// must NOT be treated as a valid sample, otherwise the next tick computes
|
||||
// a phantom delta against zero counters).
|
||||
func ParseProcIO(sc *bufio.Scanner, c *Counters) bool {
|
||||
parsedAny := false
|
||||
for sc.Scan() {
|
||||
parts := strings.SplitN(sc.Text(), ":", 2)
|
||||
if len(parts) != 2 {
|
||||
continue
|
||||
}
|
||||
key := strings.TrimSpace(parts[0])
|
||||
val, err := strconv.ParseInt(strings.TrimSpace(parts[1]), 10, 64)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
switch key {
|
||||
case "read_bytes":
|
||||
c.ReadBytes = val
|
||||
parsedAny = true
|
||||
case "write_bytes":
|
||||
c.WriteBytes = val
|
||||
parsedAny = true
|
||||
case "cancelled_write_bytes":
|
||||
c.CancelledWriteBytes = val
|
||||
parsedAny = true
|
||||
case "syscr":
|
||||
c.SyscR = val
|
||||
parsedAny = true
|
||||
case "syscw":
|
||||
c.SyscW = val
|
||||
parsedAny = true
|
||||
}
|
||||
}
|
||||
return parsedAny
|
||||
}
|
||||
+308
-24
@@ -4,7 +4,29 @@
|
||||
(function () {
|
||||
let _analyticsData = {};
|
||||
const sf = (v, d) => (v != null ? v.toFixed(d) : '–'); // safe toFixed
|
||||
function esc(s) { return s ? String(s).replace(/&/g,'&').replace(/</g,'<').replace(/>/g,'>').replace(/"/g,'"') : ''; }
|
||||
function esc(s) { return s ? String(s).replace(/&/g,'&').replace(/</g,'<').replace(/>/g,'>').replace(/"/g,'"').replace(/'/g,''') : ''; }
|
||||
|
||||
// #1085 — Roles tab helpers (hoisted from renderRolesTab so they're not
|
||||
// re-allocated per render).
|
||||
function _rolesEmoji(role) {
|
||||
if (window.ROLE_EMOJI && window.ROLE_EMOJI[role]) return window.ROLE_EMOJI[role];
|
||||
return '•';
|
||||
}
|
||||
function _rolesFmtSec(v) {
|
||||
if (!v && v !== 0) return '—';
|
||||
var abs = Math.abs(v);
|
||||
if (abs < 1) return v.toFixed(2) + 's';
|
||||
if (abs < 60) return v.toFixed(1) + 's';
|
||||
if (abs < 3600) return (v / 60).toFixed(1) + 'm';
|
||||
if (abs < 86400) return (v / 3600).toFixed(1) + 'h';
|
||||
return (v / 86400).toFixed(1) + 'd';
|
||||
}
|
||||
// #1085 — auto-refresh timer for the Roles tab. Started when the Roles
|
||||
// tab is rendered, cleared on tab switch and destroy.
|
||||
var _rolesRefreshTimer = null;
|
||||
function _stopRolesRefresh() {
|
||||
if (_rolesRefreshTimer) { clearInterval(_rolesRefreshTimer); _rolesRefreshTimer = null; }
|
||||
}
|
||||
|
||||
// --- Status color helpers (read from CSS variables for theme support) ---
|
||||
function cssVar(name) { return getComputedStyle(document.documentElement).getPropertyValue(name).trim(); }
|
||||
@@ -75,6 +97,16 @@
|
||||
<h2>📊 Mesh Analytics</h2>
|
||||
<p class="text-muted">Deep dive into your mesh network data</p>
|
||||
<div id="analyticsRegionFilter" class="region-filter-container"></div>
|
||||
<div class="time-window-filter" style="margin:8px 0">
|
||||
<label for="analyticsTimeWindow" style="font-size:0.9em;color:var(--text-muted);margin-right:6px">Time window:</label>
|
||||
<select id="analyticsTimeWindow" data-testid="analytics-time-window" aria-label="Time window">
|
||||
<option value="">All data</option>
|
||||
<option value="1h">Last 1 hour</option>
|
||||
<option value="24h">Last 24 hours</option>
|
||||
<option value="7d">Last 7 days</option>
|
||||
<option value="30d">Last 30 days</option>
|
||||
</select>
|
||||
</div>
|
||||
<div class="analytics-tabs" id="analyticsTabs" role="tablist" aria-label="Analytics tabs">
|
||||
<button class="tab-btn active" data-tab="overview">Overview</button>
|
||||
<button class="tab-btn" data-tab="rf">RF / Signal</button>
|
||||
@@ -88,6 +120,10 @@
|
||||
<button class="tab-btn" data-tab="neighbor-graph">Neighbor Graph</button>
|
||||
<button class="tab-btn" data-tab="rf-health">RF Health</button>
|
||||
<button class="tab-btn" data-tab="clock-health">Clock Health</button>
|
||||
<!-- #1085 — Roles tab folded in from former /#/roles standalone page.
|
||||
Placed after Clock Health (clock-skew posture is shown per-role
|
||||
inside this tab) and before Prefix Tool (utility tabs trail). -->
|
||||
<button class="tab-btn" data-tab="roles">Roles</button>
|
||||
<button class="tab-btn" data-tab="prefix-tool">Prefix Tool</button>
|
||||
</div>
|
||||
</div>
|
||||
@@ -99,18 +135,40 @@
|
||||
// Tab handling
|
||||
const analyticsTabs = document.getElementById('analyticsTabs');
|
||||
initTabBar(analyticsTabs);
|
||||
// #749 — keep analytics tab + window in URL for deep-linking.
|
||||
function _updateAnalyticsUrl() {
|
||||
if (!window.URLState) return;
|
||||
var twElNow = document.getElementById('analyticsTimeWindow');
|
||||
var updates = {
|
||||
tab: _currentTab && _currentTab !== 'overview' ? _currentTab : '',
|
||||
window: twElNow && twElNow.value ? twElNow.value : ''
|
||||
};
|
||||
// Drop any subview-specific keys that don't belong to the active tab
|
||||
// so switching tabs gives a clean URL. (rf-health uses 'range', 'observer', 'from', 'to')
|
||||
if (_currentTab !== 'rf-health') {
|
||||
var cleared = ['range', 'observer', 'from', 'to'];
|
||||
for (var i = 0; i < cleared.length; i++) updates[cleared[i]] = '';
|
||||
}
|
||||
var newHash = URLState.updateHashParams(updates, location.hash);
|
||||
if (newHash !== location.hash) history.replaceState(null, '', newHash);
|
||||
}
|
||||
|
||||
analyticsTabs.addEventListener('click', e => {
|
||||
const btn = e.target.closest('.tab-btn');
|
||||
if (!btn) return;
|
||||
document.querySelectorAll('.tab-btn').forEach(b => b.classList.remove('active'));
|
||||
btn.classList.add('active');
|
||||
_currentTab = btn.dataset.tab;
|
||||
// #1085 — Roles tab owns its own 60s auto-refresh; stop it on switch.
|
||||
if (_currentTab !== 'roles') _stopRolesRefresh();
|
||||
_updateAnalyticsUrl();
|
||||
renderTab(_currentTab);
|
||||
});
|
||||
|
||||
// Deep-link: #/analytics?tab=collisions
|
||||
// Deep-link: #/analytics?tab=collisions&window=7d
|
||||
const hashParams = location.hash.split('?')[1] || '';
|
||||
const urlTab = new URLSearchParams(hashParams).get('tab');
|
||||
const _ap = new URLSearchParams(hashParams);
|
||||
const urlTab = _ap.get('tab');
|
||||
if (urlTab) {
|
||||
const tabBtn = analyticsTabs.querySelector(`[data-tab="${urlTab}"]`);
|
||||
if (tabBtn) {
|
||||
@@ -119,10 +177,22 @@
|
||||
_currentTab = urlTab;
|
||||
}
|
||||
}
|
||||
// #749 — restore time window from URL.
|
||||
const urlWindow = _ap.get('window');
|
||||
if (urlWindow) {
|
||||
const twInit = document.getElementById('analyticsTimeWindow');
|
||||
if (twInit) twInit.value = urlWindow;
|
||||
}
|
||||
|
||||
RegionFilter.init(document.getElementById('analyticsRegionFilter'));
|
||||
RegionFilter.onChange(function () { loadAnalytics(); });
|
||||
|
||||
// Time-window picker (#842) — refresh analytics on change.
|
||||
const tw = document.getElementById('analyticsTimeWindow');
|
||||
if (tw) {
|
||||
tw.addEventListener('change', function () { _updateAnalyticsUrl(); loadAnalytics(); });
|
||||
}
|
||||
|
||||
// Delegated click/keyboard handler for clickable table rows
|
||||
const analyticsContent = document.getElementById('analyticsContent');
|
||||
if (analyticsContent) {
|
||||
@@ -150,14 +220,24 @@
|
||||
async function loadAnalytics() {
|
||||
try {
|
||||
_analyticsData = {};
|
||||
const rqs = RegionFilter.regionQueryString();
|
||||
const sep = rqs ? '?' + rqs.slice(1) : '';
|
||||
const rqs = RegionFilter.regionQueryString(); // "®ion=..." or ""
|
||||
// Time window picker (#842) — append &window=… when set.
|
||||
// NOTE: only the three window-aware endpoints (rf/topology/channels)
|
||||
// receive ?window=…; hash-sizes and hash-collisions are about node
|
||||
// identity / hash-byte distribution and intentionally span all data.
|
||||
const twEl = document.getElementById('analyticsTimeWindow');
|
||||
const twVal = twEl ? twEl.value : '';
|
||||
const tws = twVal ? '&window=' + encodeURIComponent(twVal) : '';
|
||||
const baseQS = rqs.slice(1); // drop leading '&', "" or "region=…"
|
||||
const sepBase = baseQS ? '?' + baseQS : '';
|
||||
const windowedQS = (rqs + tws).slice(1);
|
||||
const sepWin = windowedQS ? '?' + windowedQS : '';
|
||||
const [hashData, rfData, topoData, chanData, collisionData] = await Promise.all([
|
||||
api('/analytics/hash-sizes' + sep, { ttl: CLIENT_TTL.analyticsRF }),
|
||||
api('/analytics/rf' + sep, { ttl: CLIENT_TTL.analyticsRF }),
|
||||
api('/analytics/topology' + sep, { ttl: CLIENT_TTL.analyticsRF }),
|
||||
api('/analytics/channels' + sep, { ttl: CLIENT_TTL.analyticsRF }),
|
||||
api('/analytics/hash-collisions' + sep, { ttl: CLIENT_TTL.analyticsRF }),
|
||||
api('/analytics/hash-sizes' + sepBase, { ttl: CLIENT_TTL.analyticsRF }),
|
||||
api('/analytics/rf' + sepWin, { ttl: CLIENT_TTL.analyticsRF }),
|
||||
api('/analytics/topology' + sepWin, { ttl: CLIENT_TTL.analyticsRF }),
|
||||
api('/analytics/channels' + sepWin, { ttl: CLIENT_TTL.analyticsRF }),
|
||||
api('/analytics/hash-collisions' + sepBase, { ttl: CLIENT_TTL.analyticsRF }),
|
||||
]);
|
||||
_analyticsData = { hashData, rfData, topoData, chanData, collisionData };
|
||||
renderTab(_currentTab);
|
||||
@@ -183,6 +263,7 @@
|
||||
case 'neighbor-graph': await renderNeighborGraphTab(el); break;
|
||||
case 'rf-health': await renderRFHealthTab(el); break;
|
||||
case 'clock-health': await renderClockHealthTab(el); break;
|
||||
case 'roles': await renderRolesTab(el); break;
|
||||
case 'prefix-tool': await renderPrefixTool(el); break;
|
||||
}
|
||||
// Auto-apply column resizing to all analytics tables
|
||||
@@ -711,6 +792,7 @@
|
||||
// ===================== CHANNELS =====================
|
||||
var _channelSortState = null;
|
||||
var _channelData = null;
|
||||
var _channelRenderGen = 0;
|
||||
var CHANNEL_SORT_KEY = 'meshcore-channel-sort';
|
||||
|
||||
function loadChannelSort() {
|
||||
@@ -721,6 +803,18 @@
|
||||
return { col: 'lastActivity', dir: 'desc' };
|
||||
}
|
||||
|
||||
// True when the user has explicitly chosen a sort (saved in localStorage).
|
||||
// Used by the grouped analytics view to decide whether to apply its own
|
||||
// default ("messages desc") instead of the global flat-list default.
|
||||
function hasSavedChannelSort() {
|
||||
try {
|
||||
var s = localStorage.getItem(CHANNEL_SORT_KEY);
|
||||
if (!s) return false;
|
||||
var p = JSON.parse(s);
|
||||
return !!(p && p.col && p.dir);
|
||||
} catch (e) { return false; }
|
||||
}
|
||||
|
||||
function saveChannelSort(state) {
|
||||
try { localStorage.setItem(CHANNEL_SORT_KEY, JSON.stringify(state)); } catch (e) {}
|
||||
}
|
||||
@@ -755,20 +849,107 @@
|
||||
}
|
||||
|
||||
function channelRowHtml(c) {
|
||||
var name = c.displayName || c.name || 'Unknown';
|
||||
return '<tr class="clickable-row" data-action="navigate" data-value="#/channels?ch=' + c.hash + '" tabindex="0" role="row">' +
|
||||
'<td><strong>' + esc(c.name || 'Unknown') + '</strong></td>' +
|
||||
'<td><strong>' + esc(name) + '</strong></td>' +
|
||||
'<td class="mono">' + (typeof c.hash === 'number' ? '0x' + c.hash.toString(16).toUpperCase().padStart(2, '0') : c.hash) + '</td>' +
|
||||
'<td>' + c.messages + '</td>' +
|
||||
'<td>' + c.senders + '</td>' +
|
||||
'<td>' + timeAgo(c.lastActivity) + '</td>' +
|
||||
'<td>' + (c.encrypted ? '🔒' : '✅') + '</td>' +
|
||||
'<td>' + (c.encrypted ? (c.group === 'mine' ? '🔑' : '🔒') : '✅') + '</td>' +
|
||||
'</tr>';
|
||||
}
|
||||
|
||||
function channelTbodyHtml(channels, col, dir) {
|
||||
// ── PSK-aware decoration ──────────────────────────────────────────────────
|
||||
// Server returns raw "chNNN" placeholder names for encrypted channels it
|
||||
// doesn't know. Decorate so the UI shows a useful display name and a
|
||||
// group bucket: mine / network / encrypted. Pure function for testability.
|
||||
function decorateAnalyticsChannels(channels, hashByteToKeyName, labels) {
|
||||
var keyMap = hashByteToKeyName || {};
|
||||
var lab = labels || {};
|
||||
var out = [];
|
||||
for (var i = 0; i < (channels || []).length; i++) {
|
||||
var c = channels[i];
|
||||
var copy = Object.assign({}, c);
|
||||
var hashNum = typeof c.hash === 'number' ? c.hash : parseInt(c.hash, 10);
|
||||
var rawName = String(c.name || '');
|
||||
var isPlaceholder = /^ch(\d+|\?)$/.test(rawName);
|
||||
if (c.encrypted) {
|
||||
var keyName = !isNaN(hashNum) ? keyMap[hashNum] : null;
|
||||
if (keyName) {
|
||||
copy.displayName = lab[keyName] || keyName;
|
||||
copy.group = 'mine';
|
||||
} else if (isPlaceholder || !rawName) {
|
||||
// Placeholder ("chNNN") or empty name → render as opaque encrypted.
|
||||
// Empty-name encrypted rows would otherwise leak through with an
|
||||
// empty <strong> in the row; force the placeholder rendering.
|
||||
copy.displayName = !isNaN(hashNum)
|
||||
? '🔒 Encrypted (0x' + hashNum.toString(16).toUpperCase().padStart(2, '0') + ')'
|
||||
: '🔒 Encrypted';
|
||||
copy.group = 'encrypted';
|
||||
} else {
|
||||
// Server gave us a real name (rainbow table hit) for an encrypted ch.
|
||||
copy.displayName = rawName;
|
||||
copy.group = 'network';
|
||||
}
|
||||
} else {
|
||||
copy.displayName = rawName || 'Unknown';
|
||||
copy.group = 'network';
|
||||
}
|
||||
out.push(copy);
|
||||
}
|
||||
return out;
|
||||
}
|
||||
|
||||
// Build the (hash byte → key name) map from ChannelDecrypt's stored keys.
|
||||
// Async because computeChannelHash uses subtle.digest. Returns {} if the
|
||||
// module or its keys are unavailable (graceful fallback).
|
||||
async function buildHashKeyMap() {
|
||||
if (typeof ChannelDecrypt === 'undefined' || !ChannelDecrypt.getStoredKeys) return {};
|
||||
var keys = ChannelDecrypt.getStoredKeys();
|
||||
var map = {};
|
||||
var names = Object.keys(keys || {});
|
||||
for (var ni = 0; ni < names.length; ni++) {
|
||||
var name = names[ni];
|
||||
try {
|
||||
var bytes = ChannelDecrypt.hexToBytes(keys[name]);
|
||||
var hb = await ChannelDecrypt.computeChannelHash(bytes);
|
||||
if (typeof hb === 'number') map[hb] = name;
|
||||
} catch (e) { /* skip bad key */ }
|
||||
}
|
||||
return map;
|
||||
}
|
||||
|
||||
function channelTbodyHtml(channels, col, dir, opts) {
|
||||
var sorted = sortChannels(channels, col, dir);
|
||||
var parts = [];
|
||||
for (var i = 0; i < sorted.length; i++) parts.push(channelRowHtml(sorted[i]));
|
||||
if (opts && opts.grouped) {
|
||||
// Group by .group: mine → network → encrypted. Inside each group keep
|
||||
// the active sort (caller passes col/dir; for the integration we sort
|
||||
// by messages desc by default).
|
||||
var groups = { mine: [], network: [], encrypted: [] };
|
||||
for (var gi = 0; gi < sorted.length; gi++) {
|
||||
var g = sorted[gi].group || (sorted[gi].encrypted ? 'encrypted' : 'network');
|
||||
(groups[g] || (groups[g] = [])).push(sorted[gi]);
|
||||
}
|
||||
var sections = [
|
||||
{ key: 'mine', label: '🔑 My Channels' },
|
||||
{ key: 'network', label: '📻 Network' },
|
||||
{ key: 'encrypted', label: '🔒 Encrypted' },
|
||||
];
|
||||
for (var si = 0; si < sections.length; si++) {
|
||||
var rows = groups[sections[si].key] || [];
|
||||
if (!rows.length) continue;
|
||||
parts.push(
|
||||
'<tr class="ch-section-row"><td colspan="6" class="ch-section-header">' +
|
||||
esc(sections[si].label) + ' <span class="text-muted">(' + rows.length + ')</span>' +
|
||||
'</td></tr>'
|
||||
);
|
||||
for (var ri = 0; ri < rows.length; ri++) parts.push(channelRowHtml(rows[ri]));
|
||||
}
|
||||
} else {
|
||||
for (var i = 0; i < sorted.length; i++) parts.push(channelRowHtml(sorted[i]));
|
||||
}
|
||||
return parts.join('');
|
||||
}
|
||||
|
||||
@@ -799,13 +980,39 @@
|
||||
var tbody = document.getElementById('channelsTbody');
|
||||
var thead = document.querySelector('#channelsTable thead');
|
||||
if (!tbody || !_channelData) return;
|
||||
tbody.innerHTML = channelTbodyHtml(_channelData, _channelSortState.col, _channelSortState.dir);
|
||||
tbody.innerHTML = channelTbodyHtml(_channelData, _channelSortState.col, _channelSortState.dir, { grouped: true });
|
||||
if (thead) thead.outerHTML = channelTheadHtml(_channelSortState.col, _channelSortState.dir);
|
||||
}
|
||||
|
||||
function renderChannels(el, ch) {
|
||||
_channelData = ch.channels;
|
||||
if (!_channelSortState) _channelSortState = loadChannelSort();
|
||||
// Decorate first so grouping/display name reflect locally-stored PSK keys.
|
||||
// buildHashKeyMap is async; render once with a sync best-effort empty map,
|
||||
// then upgrade once keys resolve. That keeps first paint fast and avoids
|
||||
// blocking on subtle.digest in environments where it's slow.
|
||||
var rawChannels = ch.channels || [];
|
||||
// Resolve the persisted sort first so the default-fallback below doesn't
|
||||
// shadow what the user previously chose. Default for the grouped view is
|
||||
// messages desc (matches the PR description); only used when nothing saved.
|
||||
if (!_channelSortState) {
|
||||
_channelSortState = hasSavedChannelSort()
|
||||
? loadChannelSort()
|
||||
: { col: 'messages', dir: 'desc' };
|
||||
}
|
||||
var ranOnce = false;
|
||||
// Generation token: if renderChannels is called again before
|
||||
// buildHashKeyMap() resolves, the older promise must not clobber the
|
||||
// newer rawChannels / decoration with stale-key data.
|
||||
var myGen = ++_channelRenderGen;
|
||||
function applyDecorate(map) {
|
||||
if (myGen !== _channelRenderGen) return; // superseded
|
||||
var labels = (typeof ChannelDecrypt !== 'undefined' && ChannelDecrypt.getLabels)
|
||||
? ChannelDecrypt.getLabels() : {};
|
||||
_channelData = decorateAnalyticsChannels(rawChannels, map, labels);
|
||||
if (ranOnce) updateChannelTable();
|
||||
}
|
||||
applyDecorate({});
|
||||
ranOnce = true;
|
||||
buildHashKeyMap().then(applyDecorate).catch(function () { /* graceful */ });
|
||||
|
||||
var timelineHtml = renderChannelTimeline(ch.channelTimeline);
|
||||
var topSendersHtml = renderTopSenders(ch.topSenders);
|
||||
@@ -818,7 +1025,7 @@
|
||||
'<table class="analytics-table" id="channelsTable">' +
|
||||
channelTheadHtml(_channelSortState.col, _channelSortState.dir) +
|
||||
'<tbody id="channelsTbody">' +
|
||||
channelTbodyHtml(_channelData, _channelSortState.col, _channelSortState.dir) +
|
||||
channelTbodyHtml(_channelData, _channelSortState.col, _channelSortState.dir, { grouped: true }) +
|
||||
'</tbody>' +
|
||||
'</table>' +
|
||||
'</div>' +
|
||||
@@ -1732,8 +1939,8 @@
|
||||
|
||||
<div class="subpath-section">
|
||||
<h5>⏱️ Timeline</h5>
|
||||
<div>First seen: ${data.firstSeen ? new Date(data.firstSeen).toLocaleString() : '—'}</div>
|
||||
<div>Last seen: ${data.lastSeen ? new Date(data.lastSeen).toLocaleString() : '—'}</div>
|
||||
<div>First seen: ${data.firstSeen ? (typeof formatAbsoluteTimestamp === 'function' ? formatAbsoluteTimestamp(data.firstSeen) : new Date(data.firstSeen).toLocaleString()) : '—'}</div>
|
||||
<div>Last seen: ${data.lastSeen ? (typeof formatAbsoluteTimestamp === 'function' ? formatAbsoluteTimestamp(data.lastSeen) : new Date(data.lastSeen).toLocaleString()) : '—'}</div>
|
||||
</div>
|
||||
|
||||
${data.observers.length ? `
|
||||
@@ -2025,10 +2232,11 @@
|
||||
}
|
||||
}
|
||||
|
||||
function destroy() { _analyticsData = {}; _channelData = null; if (_ngState && _ngState.animId) { cancelAnimationFrame(_ngState.animId); } _ngState = null; if (_themeRefreshHandler) { window.removeEventListener('theme-refresh', _themeRefreshHandler); _themeRefreshHandler = null; } }
|
||||
function destroy() { _stopRolesRefresh(); _analyticsData = {}; _channelData = null; if (_ngState && _ngState.animId) { cancelAnimationFrame(_ngState.animId); } _ngState = null; if (_themeRefreshHandler) { window.removeEventListener('theme-refresh', _themeRefreshHandler); _themeRefreshHandler = null; } }
|
||||
|
||||
// Expose for testing
|
||||
if (typeof window !== 'undefined') {
|
||||
window._analyticsDecorateChannels = decorateAnalyticsChannels;
|
||||
window._analyticsSortChannels = sortChannels;
|
||||
window._analyticsLoadChannelSort = loadChannelSort;
|
||||
window._analyticsSaveChannelSort = saveChannelSort;
|
||||
@@ -2660,7 +2868,7 @@ function destroy() { _analyticsData = {}; _channelData = null; if (_ngState && _
|
||||
const name = esc(n.name || n.public_key.slice(0, 12));
|
||||
const role = n.role ? `<span class="text-muted" style="font-size:0.82em">${esc(n.role)}</span>` : '';
|
||||
const hs = n.hash_size ? ` <span class="text-muted" style="font-size:0.78em;opacity:0.7">${n.hash_size}B hash</span>` : '';
|
||||
const when = n.last_seen ? ` <span class="text-muted" style="font-size:0.8em">${new Date(n.last_seen).toLocaleDateString()}</span>` : '';
|
||||
const when = n.last_seen ? ` <span class="text-muted" style="font-size:0.8em">${(typeof formatAbsoluteTimestamp === 'function') ? formatAbsoluteTimestamp(n.last_seen) : new Date(n.last_seen).toLocaleDateString()}</span>` : '';
|
||||
return `<div style="padding:3px 0"><a href="#/nodes/${encodeURIComponent(n.public_key)}" class="analytics-link">${name}</a> ${role}${hs}${when}</div>`;
|
||||
}
|
||||
|
||||
@@ -3158,7 +3366,7 @@ function destroy() { _analyticsData = {}; _channelData = null; if (_ngState && _
|
||||
const t = new Date(d.t);
|
||||
const x = sx(t.getTime());
|
||||
const y = sy(d.v);
|
||||
const ts = t.toISOString().replace('T', ' ').replace(/\.\d+Z/, ' UTC');
|
||||
const ts = (typeof formatAbsoluteTimestamp === 'function') ? formatAbsoluteTimestamp(d.t) : t.toISOString().replace('T', ' ').replace(/\.\d+Z/, ' UTC');
|
||||
const tip = `${label}: ${formatV(d.v)}${unit}\n${ts}`;
|
||||
svg += `<circle cx="${x.toFixed(1)}" cy="${y.toFixed(1)}" r="8" fill="transparent" stroke="none" pointer-events="all"><title>${tip}</title></circle>`;
|
||||
});
|
||||
@@ -3172,7 +3380,7 @@ function destroy() { _analyticsData = {}; _channelData = null; if (_ngState && _
|
||||
const idx = Math.floor(i * (data.length - 1) / Math.max(xTicks - 1, 1));
|
||||
const t = new Date(data[idx].t);
|
||||
const x = sx(t.getTime());
|
||||
const label = t.toLocaleTimeString([], { hour: '2-digit', minute: '2-digit' });
|
||||
const label = (typeof formatChartAxisLabel === 'function') ? formatChartAxisLabel(t, true) : t.toLocaleTimeString([], { hour: '2-digit', minute: '2-digit' });
|
||||
svg += `<text x="${x.toFixed(1)}" y="${h - 5}" text-anchor="middle" font-size="9" fill="var(--text-muted)">${label}</text>`;
|
||||
}
|
||||
return svg;
|
||||
@@ -3567,5 +3775,81 @@ function destroy() { _analyticsData = {}; _channelData = null; if (_ngState && _
|
||||
}
|
||||
}
|
||||
|
||||
// #1085 — Roles tab (folded in from former /#/roles page).
|
||||
// Renders distribution of node roles + per-role clock-skew posture.
|
||||
// Auto-refreshes every 60s while the Roles tab is active (matches the
|
||||
// behavior of the former standalone roles-page.js).
|
||||
async function renderRolesTab(el) {
|
||||
el.innerHTML = '<div class="text-center text-muted" style="padding:40px">Loading roles…</div>';
|
||||
await _renderRolesTabBody(el);
|
||||
// (Re)start the 60s auto-refresh.
|
||||
_stopRolesRefresh();
|
||||
_rolesRefreshTimer = setInterval(function () {
|
||||
// Bail if the user navigated away from the Roles tab.
|
||||
if (_currentTab !== 'roles') { _stopRolesRefresh(); return; }
|
||||
var cur = document.getElementById('analyticsContent');
|
||||
if (!cur) { _stopRolesRefresh(); return; }
|
||||
_renderRolesTabBody(cur);
|
||||
}, 60000);
|
||||
}
|
||||
|
||||
async function _renderRolesTabBody(el) {
|
||||
try {
|
||||
var data = await api('/analytics/roles', { ttl: CLIENT_TTL.analyticsRF });
|
||||
var roles = (data && data.roles) || [];
|
||||
var total = (data && data.totalNodes) || 0;
|
||||
if (!roles.length) {
|
||||
el.innerHTML = '<div class="text-center text-muted" style="padding:40px">No roles to show.</div>';
|
||||
return;
|
||||
}
|
||||
var maxCount = roles.reduce(function (m, r) { return Math.max(m, r.nodeCount || 0); }, 0) || 1;
|
||||
var rows = roles.map(function (r) {
|
||||
var pct = total > 0 ? ((r.nodeCount / total) * 100).toFixed(1) : '0.0';
|
||||
var barW = Math.round((r.nodeCount / maxCount) * 100);
|
||||
var sevCells =
|
||||
'<span title="OK (skew < 5min)" style="color:var(--color-success,#0a0)">' + (r.okCount || 0) + '</span> / ' +
|
||||
'<span title="Warning (5min – 1h)" style="color:var(--color-warning,#e80)">' + (r.warningCount || 0) + '</span> / ' +
|
||||
'<span title="Critical (1h – 30d)" style="color:var(--color-error,#c00)">' + (r.criticalCount || 0) + '</span> / ' +
|
||||
'<span title="Absurd (> 30d)" style="color:#a0a">' + (r.absurdCount || 0) + '</span> / ' +
|
||||
'<span title="No clock (> 365d)" style="color:#888">' + (r.noClockCount || 0) + '</span>';
|
||||
return '' +
|
||||
'<tr data-role="' + esc(r.role) + '">' +
|
||||
'<td>' + _rolesEmoji(r.role) + ' <strong>' + esc(r.role) + '</strong></td>' +
|
||||
'<td style="text-align:right">' + r.nodeCount + '</td>' +
|
||||
'<td style="text-align:right">' + pct + '%</td>' +
|
||||
'<td style="min-width:140px">' +
|
||||
'<div style="background:var(--color-surface-2,#eee);height:10px;border-radius:5px;overflow:hidden">' +
|
||||
'<div style="background:var(--color-accent,#06c);width:' + barW + '%;height:100%"></div>' +
|
||||
'</div>' +
|
||||
'</td>' +
|
||||
'<td style="text-align:right">' + (r.withSkew || 0) + '</td>' +
|
||||
'<td style="text-align:right">' + _rolesFmtSec(r.medianAbsSkewSec || 0) + '</td>' +
|
||||
'<td style="text-align:right">' + _rolesFmtSec(r.meanAbsSkewSec || 0) + '</td>' +
|
||||
'<td style="white-space:nowrap">' + sevCells + '</td>' +
|
||||
'</tr>';
|
||||
}).join('');
|
||||
el.innerHTML =
|
||||
'<p class="text-muted" style="margin:0 0 12px 0">Distribution of node roles across the mesh, with per-role clock-skew posture.</p>' +
|
||||
'<div class="roles-summary" style="margin-bottom:12px;color:var(--color-text-muted,#666)">' +
|
||||
'<strong>' + total + '</strong> nodes across <strong>' + roles.length + '</strong> roles' +
|
||||
'</div>' +
|
||||
'<table id="rolesTable" class="data-table analytics-table" style="width:100%">' +
|
||||
'<thead><tr>' +
|
||||
'<th>Role</th>' +
|
||||
'<th style="text-align:right">Count</th>' +
|
||||
'<th style="text-align:right">Share</th>' +
|
||||
'<th>Distribution</th>' +
|
||||
'<th style="text-align:right" title="Nodes with clock-skew samples">w/ Skew</th>' +
|
||||
'<th style="text-align:right" title="Median absolute skew">Median |skew|</th>' +
|
||||
'<th style="text-align:right" title="Mean absolute skew">Mean |skew|</th>' +
|
||||
'<th title="OK / Warning / Critical / Absurd / No-clock">Severity</th>' +
|
||||
'</tr></thead>' +
|
||||
'<tbody>' + rows + '</tbody>' +
|
||||
'</table>';
|
||||
} catch (err) {
|
||||
el.innerHTML = '<div class="text-center" style="color:var(--status-red);padding:40px">Failed to load roles: ' + esc(String(err.message || err)) + '</div>';
|
||||
}
|
||||
}
|
||||
|
||||
registerPage('analytics', { init, destroy });
|
||||
})();
|
||||
|
||||
+536
-11
@@ -309,6 +309,39 @@ function formatTimestampWithTooltip(isoString, mode) {
|
||||
return { text, tooltip, isFuture };
|
||||
}
|
||||
|
||||
// Format a Date for chart axis labels, respecting customizer timestamp settings.
|
||||
// shortForm: true = time only (for intra-day), false = date+time (multi-day).
|
||||
function formatChartAxisLabel(d, shortForm) {
|
||||
if (!(d instanceof Date) || !isFinite(d.getTime())) return '—';
|
||||
var timezone = (typeof getTimestampTimezone === 'function') ? getTimestampTimezone() : 'local';
|
||||
var preset = (typeof getTimestampFormatPreset === 'function') ? getTimestampFormatPreset() : 'iso';
|
||||
var useUtc = timezone === 'utc';
|
||||
|
||||
if (preset === 'locale') {
|
||||
if (shortForm) {
|
||||
var opts = { hour: '2-digit', minute: '2-digit' };
|
||||
if (useUtc) opts.timeZone = 'UTC';
|
||||
return d.toLocaleTimeString([], opts);
|
||||
}
|
||||
var opts2 = { month: 'short', day: 'numeric', hour: '2-digit', minute: '2-digit' };
|
||||
if (useUtc) opts2.timeZone = 'UTC';
|
||||
return d.toLocaleString([], opts2);
|
||||
}
|
||||
|
||||
// ISO-style (iso or iso-seconds)
|
||||
var hour = useUtc ? d.getUTCHours() : d.getHours();
|
||||
var minute = useUtc ? d.getUTCMinutes() : d.getMinutes();
|
||||
var timeStr = pad2(hour) + ':' + pad2(minute);
|
||||
if (preset === 'iso-seconds') {
|
||||
var sec = useUtc ? d.getUTCSeconds() : d.getSeconds();
|
||||
timeStr += ':' + pad2(sec);
|
||||
}
|
||||
if (shortForm) return timeStr;
|
||||
var month = useUtc ? d.getUTCMonth() + 1 : d.getMonth() + 1;
|
||||
var day = useUtc ? d.getUTCDate() : d.getDate();
|
||||
return pad2(month) + '-' + pad2(day) + ' ' + timeStr;
|
||||
}
|
||||
|
||||
function truncate(str, len) {
|
||||
if (!str) return '';
|
||||
return str.length > len ? str.slice(0, len) + '…' : str;
|
||||
@@ -440,16 +473,160 @@ function buildHexLegend(ranges) {
|
||||
let ws = null;
|
||||
let wsListeners = [];
|
||||
|
||||
// --- Brand-logo packet-driven pulse (#1173) ---
|
||||
// Replaces the legacy live-dot indicator. Class-toggle only (CSS animations); colors come from
|
||||
// --logo-accent / --logo-accent-hi tokens. Test seam at window.__corescopeLogo.
|
||||
//
|
||||
// Cache the prefers-reduced-motion MediaQueryList ONCE at module load (#1177
|
||||
// Carmack must-fix #2). Calling window.matchMedia on every pulse() allocates
|
||||
// a new MQL + parses the query string — wasteful at 15Hz. The CSS @media rule
|
||||
// already handles render-time switching, so we just cache and read .matches.
|
||||
var _reducedMotionMQL = null;
|
||||
try {
|
||||
if (typeof window !== 'undefined' && typeof window.matchMedia === 'function') {
|
||||
_reducedMotionMQL = window.matchMedia('(prefers-reduced-motion: reduce)');
|
||||
}
|
||||
} catch (_) { _reducedMotionMQL = null; }
|
||||
|
||||
const Logo = (function () {
|
||||
const RATE_GAP_MS = 66; // 15/sec (≤16 toggles per second).
|
||||
const HALF_MS = 80; // each half of a ping ≤80ms.
|
||||
const stats = { triggered: 0, dropped: 0 };
|
||||
let lastPingTs = 0;
|
||||
let flip = 0; // 0 → A→B, 1 → B→A.
|
||||
let lastDirection = null; // 'a' or 'b' (source circle).
|
||||
let connected = true; // WS state — gates in-flight chained pulses.
|
||||
let generation = 0; // bumped on setConnected(false) / visibilitychange to cancel scheduled halves.
|
||||
|
||||
function reducedMotion() {
|
||||
return _reducedMotionMQL ? !!_reducedMotionMQL.matches : false;
|
||||
}
|
||||
function $all(sel) { return Array.prototype.slice.call(document.querySelectorAll(sel)); }
|
||||
function clearAll() {
|
||||
$all('.brand-logo circle.logo-node-a, .brand-mark-only circle.logo-node-a,' +
|
||||
'.brand-logo circle.logo-node-b, .brand-mark-only circle.logo-node-b').forEach((el) => {
|
||||
el.classList.remove('logo-pulse-active', 'logo-pulse-blip');
|
||||
});
|
||||
}
|
||||
function pulseChained(srcSel, dstSel) {
|
||||
const gen = generation;
|
||||
// Source half: ~80ms.
|
||||
$all(srcSel).forEach((el) => el.classList.add('logo-pulse-active'));
|
||||
setTimeout(() => {
|
||||
$all(srcSel).forEach((el) => el.classList.remove('logo-pulse-active'));
|
||||
// Destination half: scheduled via rAF then ~80ms.
|
||||
// Bail if WS dropped (or another disconnect cycle ran) since this ping started —
|
||||
// otherwise a zombie pulse fires on a logo that's already showing the
|
||||
// .logo-disconnected sustained state.
|
||||
if (gen !== generation || !connected) return;
|
||||
requestAnimationFrame(() => {
|
||||
if (gen !== generation || !connected) return;
|
||||
$all(dstSel).forEach((el) => el.classList.add('logo-pulse-active'));
|
||||
setTimeout(() => {
|
||||
$all(dstSel).forEach((el) => el.classList.remove('logo-pulse-active'));
|
||||
}, HALF_MS);
|
||||
});
|
||||
}, HALF_MS);
|
||||
}
|
||||
function pulseBlip(dstSel) {
|
||||
// Reduced-motion: single-step opacity blip on destination only.
|
||||
$all(dstSel).forEach((el) => el.classList.add('logo-pulse-blip'));
|
||||
setTimeout(() => {
|
||||
$all(dstSel).forEach((el) => el.classList.remove('logo-pulse-blip'));
|
||||
}, 140);
|
||||
}
|
||||
function pulse(_msg) {
|
||||
// Hidden-tab gate (#1177 Carmack must-fix #1): drop the pulse BEFORE
|
||||
// mutating lastPingTs and BEFORE scheduling any rAF/setTimeout chain.
|
||||
// Background tabs throttle timers but still ran the source-class toggle
|
||||
// and queued a chain that fired in a clump on tab focus — wasted work
|
||||
// and a visible storm. Returning early here makes the gate cost ~1
|
||||
// property read per WS message.
|
||||
if (typeof document !== 'undefined' && document.hidden) {
|
||||
stats.dropped++;
|
||||
return false;
|
||||
}
|
||||
if (!connected) { stats.dropped++; return false; }
|
||||
const now = (typeof performance !== 'undefined' && performance.now) ? performance.now() : Date.now();
|
||||
if (now - lastPingTs < RATE_GAP_MS) { stats.dropped++; return false; }
|
||||
lastPingTs = now;
|
||||
stats.triggered++;
|
||||
const aToB = (flip === 0);
|
||||
flip ^= 1;
|
||||
lastDirection = aToB ? 'a' : 'b';
|
||||
const srcSel = aToB ? '.brand-logo circle.logo-node-a, .brand-mark-only circle.logo-node-a'
|
||||
: '.brand-logo circle.logo-node-b, .brand-mark-only circle.logo-node-b';
|
||||
const dstSel = aToB ? '.brand-logo circle.logo-node-b, .brand-mark-only circle.logo-node-b'
|
||||
: '.brand-logo circle.logo-node-a, .brand-mark-only circle.logo-node-a';
|
||||
if (reducedMotion()) {
|
||||
pulseBlip(dstSel);
|
||||
} else {
|
||||
pulseChained(srcSel, dstSel);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
function setConnected(isConnected) {
|
||||
connected = !!isConnected;
|
||||
// Bump generation so any in-flight chained-pulse callbacks bail before
|
||||
// toggling classes on the destination circle (otherwise a zombie pulse
|
||||
// briefly fights the .logo-disconnected sustained desaturate state).
|
||||
generation++;
|
||||
$all('.brand-logo, .brand-mark-only').forEach((el) => {
|
||||
if (connected) el.classList.remove('logo-disconnected');
|
||||
else el.classList.add('logo-disconnected');
|
||||
});
|
||||
// #1174 mesh-op review: mirror connected state onto the bottom-nav so
|
||||
// the 2px top-border indicator (see bottom-nav.css) goes red on
|
||||
// disconnect. Mesh-alive is otherwise invisible at ≤768 because
|
||||
// .nav-stats is hidden at that breakpoint.
|
||||
var bn = document.querySelector('[data-bottom-nav]');
|
||||
if (bn) {
|
||||
if (connected) bn.classList.remove('disconnected');
|
||||
else bn.classList.add('disconnected');
|
||||
}
|
||||
if (!connected) clearAll();
|
||||
}
|
||||
// Expose hook for E2E + customizer/devtools introspection.
|
||||
// Frozen so consumers can't replace .pulse / .setConnected from outside
|
||||
// (the seam is read-only — invocation only).
|
||||
const api = Object.freeze({
|
||||
pulse: pulse,
|
||||
setConnected: setConnected,
|
||||
get lastDirection() { return lastDirection; },
|
||||
get stats() { return { triggered: stats.triggered, dropped: stats.dropped }; },
|
||||
});
|
||||
try { window.__corescopeLogo = api; } catch (_) {}
|
||||
|
||||
// Visibility gate (#1177 Carmack must-fix #1): when the tab becomes
|
||||
// hidden, bump generation so any in-flight chained pulse halves bail
|
||||
// out before they paint, and clear any active pulse classes. The
|
||||
// pulse() entry already early-returns on document.hidden — this handles
|
||||
// pulses already mid-flight at the moment the tab is backgrounded.
|
||||
try {
|
||||
if (typeof document !== 'undefined' && typeof document.addEventListener === 'function') {
|
||||
document.addEventListener('visibilitychange', function () {
|
||||
if (document.hidden) {
|
||||
generation++;
|
||||
clearAll();
|
||||
}
|
||||
});
|
||||
}
|
||||
} catch (_) {}
|
||||
|
||||
return api;
|
||||
})();
|
||||
|
||||
function connectWS() {
|
||||
const proto = location.protocol === 'https:' ? 'wss:' : 'ws:';
|
||||
ws = new WebSocket(`${proto}//${location.host}`);
|
||||
ws.onopen = () => document.getElementById('liveDot')?.classList.add('connected');
|
||||
ws.onopen = () => Logo.setConnected(true);
|
||||
ws.onclose = () => {
|
||||
document.getElementById('liveDot')?.classList.remove('connected');
|
||||
Logo.setConnected(false);
|
||||
setTimeout(connectWS, 3000);
|
||||
};
|
||||
ws.onerror = () => ws.close();
|
||||
ws.onmessage = (e) => {
|
||||
Logo.pulse(e);
|
||||
try {
|
||||
const msg = JSON.parse(e.data);
|
||||
// Debounce cache invalidation — don't nuke on every packet
|
||||
@@ -468,6 +645,166 @@ function connectWS() {
|
||||
function onWS(fn) { wsListeners.push(fn); }
|
||||
function offWS(fn) { wsListeners = wsListeners.filter(f => f !== fn); }
|
||||
|
||||
// --- Pull-to-reconnect (#1063) ---
|
||||
// Touch-device pull-down at scrollTop=0 reconnects the WebSocket
|
||||
// (instead of triggering native pull-to-refresh full-page reload).
|
||||
// Visual indicator pulses during pull; toast confirms result.
|
||||
const PULL_THRESHOLD_PX = 140;
|
||||
let _pullToast = null;
|
||||
let _pullToastTimer = null;
|
||||
let _pullIndicator = null;
|
||||
|
||||
function _ensurePullIndicator() {
|
||||
if (_pullIndicator && document.body && typeof document.body.contains === 'function' && document.body.contains(_pullIndicator)) return _pullIndicator;
|
||||
if (_pullIndicator) return _pullIndicator;
|
||||
const el = document.createElement('div');
|
||||
el.id = 'pullReconnectIndicator';
|
||||
el.setAttribute('aria-hidden', 'true');
|
||||
el.innerHTML = '<span class="prr-icon">⟳</span>';
|
||||
el.style.cssText = [
|
||||
'position:fixed', 'top:0', 'left:50%', 'transform:translate(-50%,-100%)',
|
||||
'z-index:99999', 'padding:8px 14px', 'border-radius:0 0 12px 12px',
|
||||
'background:var(--accent,#2563eb)', 'color:#fff', 'font:14px/1 var(--font,system-ui)',
|
||||
'box-shadow:0 2px 8px rgba(0,0,0,.2)', 'pointer-events:none',
|
||||
'transition:transform .15s ease, opacity .15s ease', 'opacity:0',
|
||||
].join(';');
|
||||
document.body.appendChild(el);
|
||||
_pullIndicator = el;
|
||||
return el;
|
||||
}
|
||||
|
||||
function _showPullToast(msg, ok) {
|
||||
try {
|
||||
if (_pullToast && _pullToast.remove) _pullToast.remove();
|
||||
} catch (e) {}
|
||||
if (_pullToastTimer) { try { clearTimeout(_pullToastTimer); } catch (e) {} _pullToastTimer = null; }
|
||||
const el = document.createElement('div');
|
||||
el.className = 'pull-reconnect-toast';
|
||||
el.textContent = msg;
|
||||
el.style.cssText = [
|
||||
'position:fixed', 'top:12px', 'left:50%', 'transform:translateX(-50%)',
|
||||
'z-index:99999', 'padding:8px 16px', 'border-radius:8px',
|
||||
'background:' + (ok ? 'var(--status-green,#16a34a)' : 'var(--status-red,#dc2626)'),
|
||||
'color:#fff', 'font:14px/1.2 var(--font,system-ui)',
|
||||
'box-shadow:0 2px 8px rgba(0,0,0,.2)', 'pointer-events:none',
|
||||
].join(';');
|
||||
document.body.appendChild(el);
|
||||
_pullToast = el;
|
||||
_pullToastTimer = setTimeout(function () {
|
||||
_pullToastTimer = null;
|
||||
try { el.remove(); } catch (e) {}
|
||||
}, 1800);
|
||||
}
|
||||
|
||||
function pullReconnect() {
|
||||
// If WS is connected (readyState OPEN), give a brief "Connected ✓"
|
||||
// confirmation but still cycle so the user sees fresh data.
|
||||
const wasOpen = ws && ws.readyState === 1;
|
||||
if (wasOpen) {
|
||||
_showPullToast('Connected ✓', true);
|
||||
// Fast cycle: close and let onclose reconnect immediately
|
||||
try { ws.close(); } catch (e) {}
|
||||
} else {
|
||||
_showPullToast('Reconnecting…', true);
|
||||
try { if (ws) ws.close(); } catch (e) {}
|
||||
// onclose handler schedules reconnect; force one now in case ws was null
|
||||
try { connectWS(); } catch (e) {}
|
||||
}
|
||||
}
|
||||
|
||||
function _isTouchDevice() {
|
||||
try {
|
||||
return ('ontouchstart' in window) ||
|
||||
(navigator && (navigator.maxTouchPoints > 0 || navigator.msMaxTouchPoints > 0));
|
||||
} catch (e) { return false; }
|
||||
}
|
||||
|
||||
function setupPullToReconnect() {
|
||||
// Always attach listeners (tests + future-proof). Inside the handler we
|
||||
// gate on _isTouchDevice() AND scrollTop=0 so desktop/scrolled pages are
|
||||
// unaffected.
|
||||
let startY = null;
|
||||
let pulling = false;
|
||||
let dist = 0;
|
||||
|
||||
function getScrollTop() {
|
||||
return (document.documentElement && document.documentElement.scrollTop) ||
|
||||
(document.body && document.body.scrollTop) || 0;
|
||||
}
|
||||
|
||||
function onStart(e) {
|
||||
if (!_isTouchDevice()) return;
|
||||
// Strict scrollTop === 0: ignore any negative overscroll, ignore any scrolled state
|
||||
if (getScrollTop() !== 0) { startY = null; pulling = false; return; }
|
||||
const t = e.touches && e.touches[0];
|
||||
startY = t ? t.clientY : null;
|
||||
pulling = false;
|
||||
dist = 0;
|
||||
}
|
||||
|
||||
function onMove(e) {
|
||||
if (startY == null) return;
|
||||
// Cancel gesture if scrollTop leaves 0 (page scrolled mid-pull)
|
||||
if (getScrollTop() !== 0) { startY = null; pulling = false; dist = 0; return; }
|
||||
const t = e.touches && e.touches[0];
|
||||
if (!t) return;
|
||||
const dy = t.clientY - startY;
|
||||
if (dy <= 0) {
|
||||
// Upward swipe / retract. If we were past the commit threshold and the
|
||||
// user retracts back, cancel the gesture so a subsequent touchend does
|
||||
// NOT fire reconnect.
|
||||
if (pulling) {
|
||||
pulling = false;
|
||||
dist = 0;
|
||||
if (_pullIndicator) {
|
||||
_pullIndicator.style.opacity = '0';
|
||||
_pullIndicator.style.transform = 'translate(-50%, -100%)';
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
dist = dy;
|
||||
if (dy > 8) {
|
||||
pulling = true;
|
||||
const ind = _ensurePullIndicator();
|
||||
const pct = Math.min(1, dy / PULL_THRESHOLD_PX);
|
||||
ind.style.opacity = String(pct);
|
||||
ind.style.transform = 'translate(-50%, ' + (-100 + pct * 100) + '%)';
|
||||
const icon = ind.querySelector && ind.querySelector('.prr-icon');
|
||||
if (icon) icon.style.transform = 'rotate(' + Math.round(pct * 360) + 'deg)';
|
||||
// Only block native pull-to-refresh once we've crossed the commit
|
||||
// threshold — below that, let the browser handle natural scroll/bounce.
|
||||
if (dy >= PULL_THRESHOLD_PX && typeof e.preventDefault === 'function' && e.cancelable !== false) {
|
||||
try { e.preventDefault(); } catch (_) {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function onEnd() {
|
||||
const wasPulling = pulling;
|
||||
const finalDist = dist;
|
||||
const stillAtTop = getScrollTop() === 0;
|
||||
startY = null; pulling = false; dist = 0;
|
||||
if (_pullIndicator) {
|
||||
_pullIndicator.style.opacity = '0';
|
||||
_pullIndicator.style.transform = 'translate(-50%, -100%)';
|
||||
}
|
||||
// Trigger only if: gesture was active, crossed threshold, and page is still at scrollTop=0.
|
||||
if (wasPulling && finalDist >= PULL_THRESHOLD_PX && stillAtTop) {
|
||||
try { (window.pullReconnect || pullReconnect)(); } catch (e) {}
|
||||
}
|
||||
}
|
||||
|
||||
document.addEventListener('touchstart', onStart, { passive: true });
|
||||
document.addEventListener('touchmove', onMove, { passive: false });
|
||||
document.addEventListener('touchend', onEnd, { passive: true });
|
||||
document.addEventListener('touchcancel', onEnd, { passive: true });
|
||||
}
|
||||
|
||||
window.pullReconnect = pullReconnect;
|
||||
window.setupPullToReconnect = setupPullToReconnect;
|
||||
window.connectWS = connectWS;
|
||||
|
||||
/* Global escapeHtml — used by multiple pages */
|
||||
function escapeHtml(s) {
|
||||
if (s == null) return '';
|
||||
@@ -546,6 +883,14 @@ function navigate() {
|
||||
return;
|
||||
}
|
||||
|
||||
// Backward-compat redirect: #/roles → #/analytics?tab=roles (issue #1085).
|
||||
// The Roles page was folded into the Analytics tab strip; old links and
|
||||
// bookmarks must keep working.
|
||||
if (location.hash === '#/roles' || location.hash.startsWith('#/roles?') || location.hash.startsWith('#/roles/')) {
|
||||
location.hash = '#/analytics?tab=roles';
|
||||
return;
|
||||
}
|
||||
|
||||
const hash = location.hash.replace('#/', '') || 'packets';
|
||||
const route = hash.split('?')[0];
|
||||
|
||||
@@ -643,6 +988,7 @@ window.addEventListener('timestamp-mode-changed', () => {
|
||||
});
|
||||
window.addEventListener('DOMContentLoaded', () => {
|
||||
connectWS();
|
||||
setupPullToReconnect();
|
||||
|
||||
// --- Dark Mode ---
|
||||
const darkToggle = document.getElementById('darkModeToggle');
|
||||
@@ -715,18 +1061,197 @@ window.addEventListener('DOMContentLoaded', () => {
|
||||
link.addEventListener('click', closeNav);
|
||||
});
|
||||
|
||||
// --- "More" dropdown (tablet Priority+ nav) ---
|
||||
// --- "More" dropdown — JS-driven Priority+ (Issue #1102) ---
|
||||
const navMoreBtn = document.getElementById('navMoreBtn');
|
||||
const navMoreMenu = document.getElementById('navMoreMenu');
|
||||
if (navMoreBtn && navMoreMenu) {
|
||||
// Build More menu dynamically from non-priority nav links (DRY)
|
||||
navMoreMenu.innerHTML = '';
|
||||
document.querySelectorAll('.nav-links a:not([data-priority="high"])').forEach(function(link) {
|
||||
var clone = link.cloneNode(true);
|
||||
clone.setAttribute('role', 'menuitem');
|
||||
clone.addEventListener('click', closeMoreMenu);
|
||||
navMoreMenu.appendChild(clone);
|
||||
const navMoreWrap = document.querySelector('.nav-more-wrap');
|
||||
const navTop = document.querySelector('.top-nav');
|
||||
const navLeft = document.querySelector('.nav-left');
|
||||
const navRightEl = document.querySelector('.nav-right');
|
||||
const linksContainer = document.querySelector('.nav-links');
|
||||
// Belt-and-braces null guards (#1105 MINOR 4): the outer block measures
|
||||
// and mutates all of these; if any are missing the layout math throws
|
||||
// before we can fall back gracefully.
|
||||
if (navMoreBtn && navMoreMenu && navMoreWrap && navLeft && navRightEl && linksContainer && navTop) {
|
||||
// Measure available room and decide which links overflow.
|
||||
// Algorithm: try to fit all links inline. If the link strip doesn't
|
||||
// fit alongside .nav-right + .nav-brand, hide non-priority links one
|
||||
// at a time (right-to-left, lowest priority first) until it does.
|
||||
// Then mirror the hidden links into the "More ▾" menu so nothing
|
||||
// disappears from the user's reach.
|
||||
const allLinks = Array.from(linksContainer.querySelectorAll('.nav-link'));
|
||||
// overflowQueue (#1105 MINOR 6): the order links are removed from the
|
||||
// inline strip when space runs out. Built right-to-left from
|
||||
// non-priority links (lowest priority dropped first) and then high-
|
||||
// priority links as a last-resort tail. `data-priority="high"` is the
|
||||
// only signal — if you ever need finer ordering, switch to a numeric
|
||||
// attribute (e.g. data-overflow-order="3") rather than re-shuffling
|
||||
// index in HTML.
|
||||
const overflowQueue = allLinks.filter(a => a.dataset.priority !== 'high')
|
||||
.reverse() // right-to-left
|
||||
.concat(allLinks.filter(a => a.dataset.priority === 'high').reverse());
|
||||
|
||||
function rebuildMoreMenu() {
|
||||
navMoreMenu.innerHTML = '';
|
||||
const hidden = allLinks.filter(a => a.classList.contains('is-overflow'));
|
||||
hidden.forEach(function(link) {
|
||||
var clone = link.cloneNode(true);
|
||||
// The clone is in the overflow menu, not the inline strip.
|
||||
clone.classList.remove('is-overflow');
|
||||
clone.setAttribute('role', 'menuitem');
|
||||
// cloneNode(true) preserves DOM but NOT event listeners. The
|
||||
// originals get `closeNav` attached up above (#1105 MINOR 5);
|
||||
// mirror that here so a click on the More-menu clone behaves
|
||||
// identically to a click on the inline link (closes the
|
||||
// hamburger panel + dismisses the More menu).
|
||||
clone.addEventListener('click', closeNav);
|
||||
clone.addEventListener('click', closeMoreMenu);
|
||||
navMoreMenu.appendChild(clone);
|
||||
});
|
||||
// If nothing overflows, hide the More button entirely so wide
|
||||
// viewports don't show a useless dropdown trigger.
|
||||
navMoreWrap.classList.toggle('is-hidden', hidden.length === 0);
|
||||
// Refresh active state on the More button (a hidden active link
|
||||
// means the More menu currently "is" the active section).
|
||||
var hasActiveMore = navMoreMenu.querySelector('.nav-link.active');
|
||||
navMoreBtn.classList.toggle('active', !!hasActiveMore);
|
||||
}
|
||||
|
||||
// #1105 MINOR 1: cached intrinsic width of the More button. Captured
|
||||
// the first time `fits()` sees navMoreWrap rendered (display:flex).
|
||||
// Falls back to MORE_BTN_RESERVE_PX (a conservative initial guess
|
||||
// sized for "More ▾" at default font/padding) until that happens.
|
||||
var cachedMoreW = 0;
|
||||
var MORE_BTN_RESERVE_PX = 70;
|
||||
|
||||
function applyNavPriority() {
|
||||
// Skip on mobile (<768px) — hamburger CSS owns that layout.
|
||||
if (window.innerWidth < 768) {
|
||||
allLinks.forEach(a => a.classList.remove('is-overflow'));
|
||||
navMoreWrap.classList.add('is-hidden');
|
||||
return;
|
||||
}
|
||||
// Reset: show everything, then hide as needed.
|
||||
allLinks.forEach(a => a.classList.remove('is-overflow'));
|
||||
navMoreWrap.classList.remove('is-hidden');
|
||||
// #1106: in the 768-1100px narrow-desktop band the CSS already
|
||||
// hides .nav-stats and tightens .nav-link padding (see the
|
||||
// "Nav narrow-desktop tightening" media query in style.css).
|
||||
// The design intent of that band is "show exactly the 5 high-
|
||||
// priority links + More". Pure measurement says everything fits
|
||||
// (~981px needed in a 1080px viewport once nav-stats is gone),
|
||||
// but the design contract — locked by test-nav-priority-1102-
|
||||
// e2e.js #1105 MINOR 7 — is exact identity, not "fits". Force-
|
||||
// collapse all non-high-priority links inside this band so the
|
||||
// overflow menu is non-empty and the high-priority set is the
|
||||
// only thing inline. Above 1100px the measurement loop below
|
||||
// owns the decision (and at 2560px nothing overflows).
|
||||
if (window.innerWidth <= 1100) {
|
||||
allLinks.forEach(a => {
|
||||
if (a.dataset.priority !== 'high') a.classList.add('is-overflow');
|
||||
});
|
||||
rebuildMoreMenu();
|
||||
return;
|
||||
}
|
||||
// Iteratively hide low-priority links until the link strip fits.
|
||||
// .top-nav has overflow:hidden and .nav-left has flex-shrink:1, so
|
||||
// an overflowing strip silently clips rather than pushing
|
||||
// nav-right out — bounding-rect math on .nav-left lies. Instead
|
||||
// measure the *intrinsic* widths of the parts (independent of
|
||||
// current clipping) and compare to the viewport. SAFETY absorbs
|
||||
// the .top-nav side padding + nav-right inner gaps + sub-pixel
|
||||
// rounding (the historic #1055 bug was a 6–20px overlap).
|
||||
//
|
||||
// #1105 MINOR 3: at the 1101px media-query flip `.nav-stats`
|
||||
// toggles from display:none → flex (and vice-versa). The resize
|
||||
// handler is rAF-debounced and runs *after* the layout flip, so
|
||||
// navRightEl.scrollWidth measured here reflects the post-flip
|
||||
// intrinsic width — not stale pre-flip width.
|
||||
const navBrand = document.querySelector('.nav-brand');
|
||||
const SAFETY = 32;
|
||||
// #1105 MINOR 1+2: read both gap values from CSS rather than a
|
||||
// shared `GUTTER = 24` constant. Today `.nav-left` (gap between
|
||||
// brand/links/more/right cells) and `.nav-links` (gap between
|
||||
// individual link items) both resolve to --space-lg = 24px, but
|
||||
// they're conceptually distinct gaps. If --space-lg or .nav-left's
|
||||
// gap diverges in the future, the fit math must follow.
|
||||
const navLeftGap = parseFloat(getComputedStyle(navLeft).columnGap ||
|
||||
getComputedStyle(navLeft).gap || '0') || 0;
|
||||
// #1105 MINOR 1: compute the More-button reserve from its actual
|
||||
// rendered width on first measure, instead of a hard-coded 70px
|
||||
// fallback. Cached so we don't re-measure (offsetWidth is 0 when
|
||||
// display:none; we capture the value the first time it's visible).
|
||||
function fits() {
|
||||
const visibleLinks = allLinks.filter(a => !a.classList.contains('is-overflow'));
|
||||
let linkW = 0;
|
||||
visibleLinks.forEach(a => { linkW += a.getBoundingClientRect().width; });
|
||||
const linkGapPx = parseFloat(getComputedStyle(linksContainer).columnGap ||
|
||||
getComputedStyle(linksContainer).gap || '0') || 0;
|
||||
const linksGap = Math.max(0, visibleLinks.length - 1) * linkGapPx;
|
||||
const brandW = navBrand ? navBrand.getBoundingClientRect().width : 0;
|
||||
// Always reserve space for the More button if anything could
|
||||
// overflow. Measure the live width when visible and cache it
|
||||
// for use when the button is currently hidden (display:none →
|
||||
// getBoundingClientRect() returns 0). MORE_BTN_RESERVE_PX is
|
||||
// the conservative initial fallback used until we get a real
|
||||
// measurement.
|
||||
const moreVis = !navMoreWrap.classList.contains('is-hidden');
|
||||
const liveMoreW = moreVis ? navMoreWrap.getBoundingClientRect().width : 0;
|
||||
if (liveMoreW > 0) cachedMoreW = liveMoreW;
|
||||
const moreW = liveMoreW > 0 ? liveMoreW
|
||||
: (cachedMoreW > 0 ? cachedMoreW : MORE_BTN_RESERVE_PX);
|
||||
const rightW = navRightEl.scrollWidth; // intrinsic, ignores clipping
|
||||
const needed = brandW + navLeftGap + linkW + linksGap + navLeftGap + moreW + navLeftGap + rightW + SAFETY;
|
||||
return needed <= window.innerWidth;
|
||||
}
|
||||
let i = 0;
|
||||
while (!fits() && i < overflowQueue.length) {
|
||||
overflowQueue[i].classList.add('is-overflow');
|
||||
i++;
|
||||
}
|
||||
// #1139 Bug B: floor the More menu at >=2 items. The greedy
|
||||
// fits() loop above is happy to stop after pushing exactly ONE
|
||||
// link into overflow (commonly "🎵 Lab" at ~1600px viewports),
|
||||
// producing a degenerate single-item dropdown. If exactly one
|
||||
// link overflowed, promote one more from the queue so the user
|
||||
// sees a useful menu instead of a one-item fragment. Skip when
|
||||
// nothing overflowed (everything fits inline → More is hidden,
|
||||
// which is the correct UX) and skip when the queue is exhausted.
|
||||
var overflowedCount = allLinks.filter(a => a.classList.contains('is-overflow')).length;
|
||||
if (overflowedCount === 1) {
|
||||
if (i < overflowQueue.length) {
|
||||
overflowQueue[i].classList.add('is-overflow');
|
||||
i++;
|
||||
} else {
|
||||
// Defensive: queue exhausted with exactly 1 overflowed link
|
||||
// means we cannot satisfy the >=2 floor (only one promotable
|
||||
// link existed). Surface it loudly instead of silently
|
||||
// shipping the degenerate single-item dropdown the floor
|
||||
// was added to prevent.
|
||||
console.warn('[nav] More menu floor: overflowQueue exhausted with 1 item; cannot enforce >=2 floor');
|
||||
}
|
||||
}
|
||||
rebuildMoreMenu();
|
||||
}
|
||||
|
||||
// Run once on load, again after fonts settle (label widths shift),
|
||||
// and on resize (debounced via rAF).
|
||||
applyNavPriority();
|
||||
if (document.fonts && document.fonts.ready) {
|
||||
document.fonts.ready.then(applyNavPriority);
|
||||
}
|
||||
let rafId = 0;
|
||||
window.addEventListener('resize', function() {
|
||||
if (rafId) cancelAnimationFrame(rafId);
|
||||
rafId = requestAnimationFrame(applyNavPriority);
|
||||
});
|
||||
// Re-apply on route change too: the active link gets bigger padding
|
||||
// (background pill), so which links fit can shift between pages.
|
||||
window.addEventListener('hashchange', function() {
|
||||
// Defer so the route handler's class toggles run first.
|
||||
requestAnimationFrame(applyNavPriority);
|
||||
});
|
||||
|
||||
navMoreBtn.addEventListener('click', (e) => {
|
||||
e.stopPropagation();
|
||||
const opening = !navMoreMenu.classList.contains('open');
|
||||
|
||||
@@ -0,0 +1,275 @@
|
||||
/* Issue #1061 — Bottom navigation styles.
|
||||
*
|
||||
* Activates at viewports ≤768px. Uses position:fixed so it does not
|
||||
* trigger layout reflow on the rest of the page, plus
|
||||
* env(safe-area-inset-bottom) padding so the iOS home-indicator does
|
||||
* not overlap the tabs. The matching <meta viewport-fit=cover> already
|
||||
* exists in index.html (verified pre-implementation).
|
||||
*
|
||||
* Tokens reused (defined in BOTH :root and dark @media in style.css):
|
||||
* --nav-bg, --nav-text, --nav-text-muted, --nav-active-bg, --accent,
|
||||
* --border, --space-sm.
|
||||
*
|
||||
* Decision: media query (not container query). The rest of the codebase
|
||||
* uses @media exclusively (no @container rules in style.css today), so
|
||||
* a media query keeps things consistent.
|
||||
*
|
||||
* Decision: top-nav suppression = display:none at ≤768px. Spec
|
||||
* forbids duplicate nav UX; the bottom nav covers the 5 high-priority
|
||||
* routes; long-tail routes (Tools/Lab/Perf/Analytics/etc.) remain
|
||||
* reachable by URL. A "More" tab or hamburger fallback is deferred per
|
||||
* the issue body's explicit guidance.
|
||||
*/
|
||||
|
||||
/* #1174 mesh-op review: --bottom-nav-reserve is the contract page-level
|
||||
* full-viewport rules use to subtract the bottom-nav's height from
|
||||
* 100dvh. 0px at desktop (no nav reserved); 56px + safe-area at ≤768px.
|
||||
* Pages opt-in by referencing it (see public/live.css for /live, and
|
||||
* #app.app-fixed in style.css for the SPA fixed-page container). */
|
||||
:root {
|
||||
--bottom-nav-reserve: 0px;
|
||||
}
|
||||
|
||||
/* Default: hidden on wide viewports. The bottom-nav element exists in
|
||||
* the DOM at all widths (build runs at DOMContentLoaded) but is only
|
||||
* rendered to the user at ≤768px. */
|
||||
.bottom-nav {
|
||||
display: none;
|
||||
}
|
||||
|
||||
@media (max-width: 768px) {
|
||||
/* #1174 mesh-op review: set the reserve token at the breakpoint so
|
||||
* page-level full-viewport rules (e.g. .live-page, #app.app-fixed)
|
||||
* automatically subtract the bottom-nav height. */
|
||||
:root {
|
||||
--bottom-nav-reserve: calc(56px + env(safe-area-inset-bottom, 0px));
|
||||
}
|
||||
|
||||
.bottom-nav {
|
||||
display: flex;
|
||||
position: fixed;
|
||||
left: 0;
|
||||
right: 0;
|
||||
bottom: 0;
|
||||
z-index: 1200; /* above nav-links dropdown (1100) */
|
||||
background: var(--nav-bg);
|
||||
border-top: 1px solid var(--border);
|
||||
box-shadow: 0 -4px 16px rgba(0, 0, 0, 0.25);
|
||||
/* env() falls back to 0 outside iOS notch devices. We also keep
|
||||
* a small minimum so the rule resolves to a non-empty value. */
|
||||
padding-bottom: env(safe-area-inset-bottom, 0px);
|
||||
padding-top: 0;
|
||||
/* Distribute 5 tabs evenly. */
|
||||
justify-content: space-around;
|
||||
align-items: stretch;
|
||||
/* No transform — would create a stacking context that traps any
|
||||
* fixed-position descendants (we have none, but cheap insurance). */
|
||||
}
|
||||
|
||||
/* Suppress the inline link bar and right-side cluster — but KEEP
|
||||
* .nav-brand (logo identity). #1174: also hide #hamburger at narrow
|
||||
* widths — the new "More" tab in the bottom-nav now surfaces the
|
||||
* long-tail routes, so the hamburger is redundant on phones. */
|
||||
.top-nav .nav-links,
|
||||
.top-nav .nav-more-wrap,
|
||||
.top-nav .nav-right,
|
||||
.top-nav .nav-stats {
|
||||
display: none !important;
|
||||
}
|
||||
/* #1174: hamburger hidden at ≤768px (replaced by the More tab). */
|
||||
#hamburger {
|
||||
display: none !important;
|
||||
}
|
||||
/* Brand on the left, hamburger on the right at narrow widths. */
|
||||
.top-nav {
|
||||
justify-content: space-between;
|
||||
}
|
||||
|
||||
/* Reserve space at page bottom so fixed-positioned bottom-nav does
|
||||
* not cover the last row of content. 56px tab + 8px breathing room
|
||||
* + safe-area inset. */
|
||||
body {
|
||||
padding-bottom: calc(56px + env(safe-area-inset-bottom, 0px));
|
||||
}
|
||||
}
|
||||
|
||||
/* Tab — anchor element. Each tab is a column with icon over label, sized
|
||||
* to ≥48px tall (the Apple/Google touch-target floor confirmed by
|
||||
* issue #1060). */
|
||||
.bottom-nav-tab {
|
||||
flex: 1 1 0;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
gap: 2px;
|
||||
/* 56px is a comfortable Material/iOS bottom-bar height; it is also
|
||||
* ≥48px (a11y floor) by 8px so labels render without clipping. */
|
||||
min-height: 56px;
|
||||
padding: 6px 4px;
|
||||
color: var(--nav-text-muted);
|
||||
text-decoration: none;
|
||||
font-size: 11px;
|
||||
line-height: 1.1;
|
||||
border-top: 2px solid transparent;
|
||||
/* Reset <button> defaults — the More tab is a <button>; its native
|
||||
* background/border/font would otherwise clash with the <a> tabs. */
|
||||
border-left: 0;
|
||||
border-right: 0;
|
||||
border-bottom: 0;
|
||||
background: transparent;
|
||||
font-family: inherit;
|
||||
cursor: pointer;
|
||||
/* Touch-action: manipulation prevents the iOS double-tap zoom delay
|
||||
* on tabs. */
|
||||
touch-action: manipulation;
|
||||
transition: color 120ms ease, background-color 120ms ease, border-color 120ms ease;
|
||||
}
|
||||
|
||||
.bottom-nav-tab:hover,
|
||||
.bottom-nav-tab:focus-visible {
|
||||
color: var(--nav-text);
|
||||
outline: none;
|
||||
}
|
||||
|
||||
.bottom-nav-tab:focus-visible {
|
||||
/* Keyboard a11y — visible focus ring inside the bar. */
|
||||
outline: 2px solid var(--accent);
|
||||
outline-offset: -2px;
|
||||
}
|
||||
|
||||
.bottom-nav-tab.active {
|
||||
color: var(--nav-text);
|
||||
background: var(--nav-active-bg);
|
||||
border-top-color: var(--accent);
|
||||
}
|
||||
|
||||
.bottom-nav-icon {
|
||||
font-size: 20px;
|
||||
line-height: 1;
|
||||
display: block;
|
||||
}
|
||||
|
||||
.bottom-nav-label {
|
||||
font-weight: 600;
|
||||
letter-spacing: 0.01em;
|
||||
white-space: nowrap;
|
||||
}
|
||||
|
||||
/* Respect reduced-motion preferences — disable the color/border
|
||||
* transition. Existing app already has a reduced-motion block in
|
||||
* style.css; this is the bottom-nav-specific override. */
|
||||
@media (prefers-reduced-motion: reduce) {
|
||||
.bottom-nav-tab {
|
||||
transition: none;
|
||||
}
|
||||
}
|
||||
|
||||
/* ─── #1174: More sheet ───
|
||||
* Bottom-anchored popover that surfaces the long-tail routes (Nodes,
|
||||
* Tools, Observers, Analytics, Perf, Audio Lab). Anchored ABOVE the
|
||||
* bottom-nav (bottom: 56px + safe-area), z-index between the nav and
|
||||
* any modal layer.
|
||||
*/
|
||||
.bottom-nav-sheet {
|
||||
display: none;
|
||||
}
|
||||
|
||||
@media (max-width: 768px) {
|
||||
.bottom-nav-sheet {
|
||||
/* The element uses the `hidden` attribute to be CSS-display none by
|
||||
* default; when we drop `hidden`, we want it to render as a grid. */
|
||||
position: fixed;
|
||||
left: 8px;
|
||||
right: 8px;
|
||||
/* Sit above the 56px tabs + breathing room + safe-area inset. */
|
||||
bottom: calc(56px + env(safe-area-inset-bottom, 0px) + 8px);
|
||||
z-index: 1250; /* above bottom-nav (1200), below modals if any */
|
||||
background: var(--nav-bg);
|
||||
border: 1px solid var(--border);
|
||||
border-radius: 12px;
|
||||
box-shadow: 0 8px 32px rgba(0, 0, 0, 0.35);
|
||||
padding: 8px;
|
||||
max-height: 60vh;
|
||||
overflow-y: auto;
|
||||
/* Display only when not [hidden]. */
|
||||
}
|
||||
.bottom-nav-sheet[hidden] {
|
||||
display: none !important;
|
||||
}
|
||||
.bottom-nav-sheet:not([hidden]) {
|
||||
display: grid;
|
||||
grid-template-columns: repeat(2, 1fr);
|
||||
gap: 6px;
|
||||
}
|
||||
}
|
||||
|
||||
.bottom-nav-sheet-item {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 10px;
|
||||
min-height: 48px;
|
||||
padding: 10px 12px;
|
||||
border-radius: 8px;
|
||||
color: var(--nav-text);
|
||||
text-decoration: none;
|
||||
font-size: 14px;
|
||||
font-weight: 600;
|
||||
background: transparent;
|
||||
border: 1px solid transparent;
|
||||
touch-action: manipulation;
|
||||
transition: background-color 120ms ease, border-color 120ms ease;
|
||||
}
|
||||
|
||||
.bottom-nav-sheet-item:hover,
|
||||
.bottom-nav-sheet-item:focus-visible {
|
||||
background: var(--nav-active-bg);
|
||||
outline: none;
|
||||
}
|
||||
|
||||
.bottom-nav-sheet-item:focus-visible {
|
||||
outline: 2px solid var(--accent);
|
||||
outline-offset: -2px;
|
||||
}
|
||||
|
||||
.bottom-nav-sheet-icon {
|
||||
font-size: 18px;
|
||||
line-height: 1;
|
||||
}
|
||||
|
||||
.bottom-nav-sheet-label {
|
||||
white-space: nowrap;
|
||||
}
|
||||
|
||||
@media (prefers-reduced-motion: reduce) {
|
||||
.bottom-nav-sheet-item {
|
||||
transition: none;
|
||||
}
|
||||
}
|
||||
|
||||
/* ─── #1174 mesh-op review: bottom-nav mesh-alive indicator ───
|
||||
* .nav-stats (top-nav mesh-alive pulse) is hidden at ≤768. Add a thin
|
||||
* 2px top border to the bottom-nav that mirrors the brand-logo's
|
||||
* connected/disconnected state via a class toggled from app.js
|
||||
* (window.__corescopeLogo.setConnected). Cheap, peripheral-vision
|
||||
* visible, no per-tab clutter.
|
||||
*
|
||||
* Default (connected): accent-tinted border. Disconnected: red.
|
||||
* The base bottom-nav rule already declares border-top: 1px solid
|
||||
* var(--border) — we override its color with a slightly heavier
|
||||
* 2px stripe so the connectivity color is the dominant visual.
|
||||
*/
|
||||
@media (max-width: 768px) {
|
||||
.bottom-nav {
|
||||
border-top: 2px solid var(--accent);
|
||||
transition: border-top-color 200ms ease;
|
||||
}
|
||||
.bottom-nav.disconnected {
|
||||
border-top-color: var(--danger, #ef4444);
|
||||
}
|
||||
}
|
||||
@media (prefers-reduced-motion: reduce) {
|
||||
.bottom-nav {
|
||||
transition: none;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,323 @@
|
||||
/* Issue #1061 — Bottom navigation for narrow viewports.
|
||||
* Issue #1174 — Add 6th "More" tab + bottom-anchored sheet for long-tail routes.
|
||||
*
|
||||
* Renders 6 tabs anchored to the bottom on viewports ≤768px:
|
||||
* 1. Home — primary
|
||||
* 2. Packets — primary
|
||||
* 3. Live — primary
|
||||
* 4. Map — primary
|
||||
* 5. Channels — primary
|
||||
* 6. More — toggles a bottom-anchored sheet listing the long-tail
|
||||
* routes (Nodes, Tools, Observers, Analytics, Perf, Audio Lab).
|
||||
* Replaces the hamburger at ≤768px (#1174 design call).
|
||||
*
|
||||
* Tabs are <a href="#/..."> so they reuse the existing hashchange-driven
|
||||
* router in app.js (no full reload, no reimplementation of routing logic).
|
||||
* The "More" tab is a <button> (not <a>) since it toggles UI rather than
|
||||
* navigating to a hash.
|
||||
*
|
||||
* Stable selectors for tests / future automation:
|
||||
* [data-bottom-nav] — the <nav> container
|
||||
* [data-bottom-nav-tab="<route>"] — each tab including "more"
|
||||
* [data-bottom-nav-sheet] — the popover sheet
|
||||
* [data-bottom-nav-more-route="<route>"] — each long-tail route in the sheet
|
||||
*
|
||||
* Active-tab highlight is a class toggle ("active") set on hashchange.
|
||||
* Visual treatment lives in bottom-nav.css and respects
|
||||
* prefers-reduced-motion (transitions disabled).
|
||||
*
|
||||
* Sheet behavior:
|
||||
* - tap More → sheet opens, aria-expanded="true"
|
||||
* - tap More while open → sheet closes (toggle, not push)
|
||||
* - tap any route inside → in-app router navigates AND sheet closes
|
||||
* - tap outside (anywhere not the sheet or the More tab) → sheet closes
|
||||
* - sheet has role="menu" for a11y
|
||||
*
|
||||
* The sheet DOM is built lazily on first open — it's only used at ≤768px
|
||||
* and there's no point sitting in the DOM at desktop widths.
|
||||
*/
|
||||
(function () {
|
||||
'use strict';
|
||||
|
||||
if (typeof document === 'undefined') return;
|
||||
|
||||
// 5 primary tabs + the More toggle. Each entry: { route, hash, label, icon }.
|
||||
// For More, hash is null (not a route).
|
||||
var TABS = [
|
||||
{ route: 'home', hash: '#/home', label: 'Home', icon: '🏠' },
|
||||
{ route: 'packets', hash: '#/packets', label: 'Packets', icon: '📦' },
|
||||
{ route: 'live', hash: '#/live', label: 'Live', icon: '🔴' },
|
||||
{ route: 'map', hash: '#/map', label: 'Map', icon: '🗺️' },
|
||||
{ route: 'channels', hash: '#/channels', label: 'Channels', icon: '💬' },
|
||||
{ route: 'more', hash: null, label: 'More', icon: '☰' },
|
||||
];
|
||||
|
||||
// Long-tail routes surfaced in the More sheet. Mirrors data-route values
|
||||
// from the existing top-nav (public/index.html). Order matches what
|
||||
// operators expect from the desktop top-nav.
|
||||
//
|
||||
// ⚠️ MANUAL SYNC REQUIRED ⚠️
|
||||
// This list is intentionally hardcoded (not generated from
|
||||
// `.top-nav .nav-link[data-route]`) because the top-nav HTML is in
|
||||
// mid-rewrite and not a reliable single-source-of-truth. If you add a
|
||||
// new top-nav route (e.g. a future "Lab" page), you MUST also append
|
||||
// it here, or it will be unreachable on phones at ≤768px (the
|
||||
// hamburger is hidden at that breakpoint — see bottom-nav.css).
|
||||
var MORE_ROUTES = [
|
||||
{ route: 'nodes', hash: '#/nodes', label: 'Nodes', icon: '🖥️' },
|
||||
{ route: 'tools', hash: '#/tools', label: 'Tools', icon: '🛠️' },
|
||||
{ route: 'observers', hash: '#/observers', label: 'Observers', icon: '👁️' },
|
||||
{ route: 'analytics', hash: '#/analytics', label: 'Analytics', icon: '📊' },
|
||||
{ route: 'perf', hash: '#/perf', label: 'Perf', icon: '⚡' },
|
||||
{ route: 'audio-lab', hash: '#/audio-lab', label: 'Audio Lab', icon: '🎵' },
|
||||
];
|
||||
|
||||
var SHEET_ID = 'bottomNavMoreSheet';
|
||||
|
||||
function currentRoute() {
|
||||
// Mirror app.js navigate(): strip "#/" and any trailing "?…" / "/…".
|
||||
var h = (location.hash || '').replace(/^#\//, '');
|
||||
if (!h) return 'packets'; // app.js default
|
||||
var slash = h.indexOf('/');
|
||||
if (slash >= 0) h = h.substring(0, slash);
|
||||
var q = h.indexOf('?');
|
||||
if (q >= 0) h = h.substring(0, q);
|
||||
return h || 'packets';
|
||||
}
|
||||
|
||||
function build() {
|
||||
if (document.querySelector('[data-bottom-nav]')) return;
|
||||
|
||||
var nav = document.createElement('nav');
|
||||
nav.className = 'bottom-nav';
|
||||
nav.setAttribute('data-bottom-nav', '');
|
||||
nav.setAttribute('role', 'navigation');
|
||||
nav.setAttribute('aria-label', 'Bottom navigation');
|
||||
|
||||
TABS.forEach(function (t) {
|
||||
var el;
|
||||
if (t.route === 'more') {
|
||||
// <button> for the toggle: it does not navigate.
|
||||
el = document.createElement('button');
|
||||
el.setAttribute('type', 'button');
|
||||
el.setAttribute('aria-haspopup', 'menu');
|
||||
el.setAttribute('aria-expanded', 'false');
|
||||
el.setAttribute('aria-controls', SHEET_ID);
|
||||
} else {
|
||||
el = document.createElement('a');
|
||||
el.setAttribute('href', t.hash);
|
||||
}
|
||||
el.className = 'bottom-nav-tab';
|
||||
el.setAttribute('data-bottom-nav-tab', t.route);
|
||||
el.setAttribute('data-route', t.route);
|
||||
el.setAttribute('aria-label', t.label);
|
||||
|
||||
var ic = document.createElement('span');
|
||||
ic.className = 'bottom-nav-icon';
|
||||
ic.setAttribute('aria-hidden', 'true');
|
||||
ic.textContent = t.icon;
|
||||
|
||||
var lb = document.createElement('span');
|
||||
lb.className = 'bottom-nav-label';
|
||||
lb.textContent = t.label;
|
||||
|
||||
el.appendChild(ic);
|
||||
el.appendChild(lb);
|
||||
nav.appendChild(el);
|
||||
});
|
||||
|
||||
// Insert after <main> so it's a sibling at the body level — keeps
|
||||
// it out of the <main> scroll container. The CSS pins it bottom:0
|
||||
// via position:fixed so DOM order beyond "after the nav" doesn't
|
||||
// matter for layout, but document order matters for screen readers.
|
||||
var main = document.getElementById('app') || document.querySelector('main');
|
||||
if (main && main.parentNode) {
|
||||
main.parentNode.insertBefore(nav, main.nextSibling);
|
||||
} else {
|
||||
document.body.appendChild(nav);
|
||||
}
|
||||
|
||||
wireMoreSheet();
|
||||
}
|
||||
|
||||
function syncActive() {
|
||||
var route = currentRoute();
|
||||
// #1174 mesh-op review: the More tab represents the long-tail
|
||||
// routes; reflect that in the active-class so users on /tools,
|
||||
// /analytics, etc. still see WHICH tab they're under. Without this
|
||||
// every long-tail route lit up zero tabs.
|
||||
var moreRouteSet = {};
|
||||
for (var k = 0; k < MORE_ROUTES.length; k++) moreRouteSet[MORE_ROUTES[k].route] = 1;
|
||||
var routeIsLongTail = !!moreRouteSet[route];
|
||||
var tabs = document.querySelectorAll('[data-bottom-nav-tab]');
|
||||
for (var i = 0; i < tabs.length; i++) {
|
||||
var t = tabs[i];
|
||||
var tabRoute = t.getAttribute('data-bottom-nav-tab');
|
||||
if (tabRoute === 'more') {
|
||||
// The More tab IS active when the current route belongs to the
|
||||
// long-tail set surfaced by the More sheet. We do NOT add
|
||||
// aria-current here — the tab toggles a sheet, not a single
|
||||
// page, so aria-current="page" would lie. The visual active
|
||||
// class is the user-facing affordance; that's enough.
|
||||
if (routeIsLongTail) t.classList.add('active');
|
||||
else if (!isSheetOpen()) t.classList.remove('active');
|
||||
// If the sheet is open we leave .active alone — openSheet()
|
||||
// owns the class while open.
|
||||
continue;
|
||||
}
|
||||
if (tabRoute === route) {
|
||||
t.classList.add('active');
|
||||
t.setAttribute('aria-current', 'page');
|
||||
} else {
|
||||
t.classList.remove('active');
|
||||
t.removeAttribute('aria-current');
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ── More sheet ──
|
||||
// Built lazily on first open; lives as a sibling of the <nav> so the
|
||||
// bottom-nav's z-index/stacking is independent of the sheet. The sheet
|
||||
// is anchored above the bottom-nav via CSS (bottom: <nav-height>).
|
||||
function getOrBuildSheet() {
|
||||
var existing = document.getElementById(SHEET_ID);
|
||||
if (existing) return existing;
|
||||
|
||||
var sheet = document.createElement('div');
|
||||
sheet.id = SHEET_ID;
|
||||
sheet.className = 'bottom-nav-sheet';
|
||||
sheet.setAttribute('data-bottom-nav-sheet', '');
|
||||
sheet.setAttribute('role', 'menu');
|
||||
sheet.setAttribute('aria-label', 'More navigation');
|
||||
sheet.hidden = true;
|
||||
|
||||
MORE_ROUTES.forEach(function (r) {
|
||||
var a = document.createElement('a');
|
||||
a.className = 'bottom-nav-sheet-item';
|
||||
a.setAttribute('href', r.hash);
|
||||
a.setAttribute('role', 'menuitem');
|
||||
a.setAttribute('data-bottom-nav-more-route', r.route);
|
||||
a.setAttribute('data-route', r.route);
|
||||
|
||||
var ic = document.createElement('span');
|
||||
ic.className = 'bottom-nav-sheet-icon';
|
||||
ic.setAttribute('aria-hidden', 'true');
|
||||
ic.textContent = r.icon;
|
||||
|
||||
var lb = document.createElement('span');
|
||||
lb.className = 'bottom-nav-sheet-label';
|
||||
lb.textContent = r.label;
|
||||
|
||||
a.appendChild(ic);
|
||||
a.appendChild(lb);
|
||||
|
||||
// Tap a route → close sheet (the <a href> handles navigation via
|
||||
// the existing hashchange router in app.js).
|
||||
a.addEventListener('click', function () { closeSheet(); });
|
||||
|
||||
sheet.appendChild(a);
|
||||
});
|
||||
|
||||
// Sit the sheet next to the nav so they share a stacking context.
|
||||
var nav = document.querySelector('[data-bottom-nav]');
|
||||
if (nav && nav.parentNode) {
|
||||
nav.parentNode.insertBefore(sheet, nav);
|
||||
} else {
|
||||
document.body.appendChild(sheet);
|
||||
}
|
||||
return sheet;
|
||||
}
|
||||
|
||||
function isSheetOpen() {
|
||||
var sheet = document.getElementById(SHEET_ID);
|
||||
return !!(sheet && !sheet.hidden);
|
||||
}
|
||||
|
||||
function openSheet() {
|
||||
var sheet = getOrBuildSheet();
|
||||
sheet.hidden = false;
|
||||
sheet.classList.add('open');
|
||||
var moreTab = document.querySelector('[data-bottom-nav-tab="more"]');
|
||||
if (moreTab) {
|
||||
moreTab.setAttribute('aria-expanded', 'true');
|
||||
moreTab.classList.add('active');
|
||||
}
|
||||
}
|
||||
|
||||
function closeSheet() {
|
||||
var sheet = document.getElementById(SHEET_ID);
|
||||
if (sheet) {
|
||||
sheet.hidden = true;
|
||||
sheet.classList.remove('open');
|
||||
}
|
||||
var moreTab = document.querySelector('[data-bottom-nav-tab="more"]');
|
||||
if (moreTab) {
|
||||
moreTab.setAttribute('aria-expanded', 'false');
|
||||
moreTab.classList.remove('active');
|
||||
}
|
||||
}
|
||||
|
||||
function toggleSheet() {
|
||||
if (isSheetOpen()) closeSheet();
|
||||
else openSheet();
|
||||
}
|
||||
|
||||
function wireMoreSheet() {
|
||||
var moreTab = document.querySelector('[data-bottom-nav-tab="more"]');
|
||||
if (!moreTab) return;
|
||||
// Toggle on tap. Use click — covers mouse and synthesized tap.
|
||||
moreTab.addEventListener('click', function (ev) {
|
||||
ev.preventDefault();
|
||||
ev.stopPropagation();
|
||||
toggleSheet();
|
||||
});
|
||||
|
||||
// Outside-click closes the sheet. Listen at document level; ignore
|
||||
// clicks on the sheet itself or on the More tab (handled above).
|
||||
document.addEventListener('click', function (ev) {
|
||||
if (!isSheetOpen()) return;
|
||||
var t = ev.target;
|
||||
var sheet = document.getElementById(SHEET_ID);
|
||||
if (sheet && sheet.contains(t)) return;
|
||||
if (moreTab.contains(t)) return;
|
||||
closeSheet();
|
||||
});
|
||||
|
||||
// Tapping any OTHER bottom-nav tab also closes the sheet.
|
||||
var otherTabs = document.querySelectorAll('[data-bottom-nav-tab]');
|
||||
for (var i = 0; i < otherTabs.length; i++) {
|
||||
var t = otherTabs[i];
|
||||
if (t.getAttribute('data-bottom-nav-tab') === 'more') continue;
|
||||
t.addEventListener('click', function () { closeSheet(); });
|
||||
}
|
||||
|
||||
// Esc closes the sheet (a11y).
|
||||
document.addEventListener('keydown', function (ev) {
|
||||
if (ev.key === 'Escape' && isSheetOpen()) closeSheet();
|
||||
});
|
||||
|
||||
// Hashchange (any nav) also closes — covers programmatic navigation.
|
||||
window.addEventListener('hashchange', function () { closeSheet(); });
|
||||
}
|
||||
|
||||
function init() {
|
||||
// Singleton guard: init() may be invoked twice if (a) DOMContentLoaded
|
||||
// fires AND (b) something else re-imports the script later, or if a
|
||||
// future SPA-like re-mount path is added. The internal `build()` is
|
||||
// idempotent (early-returns on existing [data-bottom-nav]), but the
|
||||
// `hashchange` listener and the document-level outside-click /
|
||||
// keydown listeners in wireMoreSheet() would otherwise stack, leaking
|
||||
// handlers exactly like PR #1180's MQL-leak class. Bail on second call.
|
||||
if (window.__bottomNavInitDone) return;
|
||||
window.__bottomNavInitDone = true;
|
||||
build();
|
||||
syncActive();
|
||||
window.addEventListener('hashchange', syncActive);
|
||||
}
|
||||
|
||||
if (document.readyState === 'loading') {
|
||||
document.addEventListener('DOMContentLoaded', init);
|
||||
} else {
|
||||
init();
|
||||
}
|
||||
})();
|
||||
@@ -24,6 +24,10 @@
|
||||
|
||||
var popoverEl = null;
|
||||
var currentChannel = null;
|
||||
// #1168 Munger #3: use shared ref-counted scroll-lock helper instead of
|
||||
// overwriting body.style.overflow directly. Without this, two cooperating
|
||||
// surfaces (this picker + SlideOver) corrupt overflow last-writer-wins.
|
||||
var scrollLockToken = null;
|
||||
|
||||
function createPopover() {
|
||||
if (popoverEl) return popoverEl;
|
||||
@@ -120,14 +124,22 @@
|
||||
var ph = rect.height;
|
||||
var vw = window.innerWidth;
|
||||
var vh = window.innerHeight;
|
||||
var finalX = x + pw > vw ? Math.max(0, vw - pw - 8) : x;
|
||||
var finalY = y + ph > vh ? Math.max(0, vh - ph - 8) : y;
|
||||
var finalX = x + pw > vw ? Math.max(0, vw - pw - 14) : x;
|
||||
var finalY = y + ph > vh ? Math.max(0, vh - ph - 14) : y;
|
||||
el.style.left = finalX + 'px';
|
||||
el.style.top = finalY + 'px';
|
||||
}
|
||||
|
||||
// Lock background scroll while popover is open
|
||||
document.body.style.overflow = 'hidden';
|
||||
// Lock background scroll while popover is open (#1168 Munger #3:
|
||||
// ref-counted via window.__scrollLock so concurrent modal surfaces
|
||||
// don't corrupt overflow under last-writer-wins).
|
||||
if (window.__scrollLock && scrollLockToken == null) {
|
||||
scrollLockToken = window.__scrollLock.acquire();
|
||||
} else if (!window.__scrollLock) {
|
||||
// Fallback (shouldn't happen — packets.js installs the helper at
|
||||
// load time and is loaded before this picker).
|
||||
document.body.style.overflow = 'hidden';
|
||||
}
|
||||
|
||||
// Focus first swatch for keyboard accessibility
|
||||
var firstSwatch = el.querySelector('.cc-swatch');
|
||||
@@ -143,7 +155,12 @@
|
||||
function hidePopover() {
|
||||
if (popoverEl) popoverEl.style.display = 'none';
|
||||
currentChannel = null;
|
||||
document.body.style.overflow = '';
|
||||
if (window.__scrollLock && scrollLockToken != null) {
|
||||
window.__scrollLock.release(scrollLockToken);
|
||||
scrollLockToken = null;
|
||||
} else if (!window.__scrollLock) {
|
||||
document.body.style.overflow = '';
|
||||
}
|
||||
document.removeEventListener('click', onOutsideClick, true);
|
||||
document.removeEventListener('keydown', onEscape, true);
|
||||
}
|
||||
@@ -228,12 +245,6 @@
|
||||
if (ch) showPopover(ch, e.clientX, e.clientY);
|
||||
});
|
||||
|
||||
feed.addEventListener('contextmenu', function(e) {
|
||||
var item = e.target.closest('.live-feed-item');
|
||||
if (!item || !item._ccChannel) return;
|
||||
e.preventDefault();
|
||||
showPopover(item._ccChannel, e.clientX, e.clientY);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
+185
-41
@@ -15,6 +15,7 @@ window.ChannelDecrypt = (function () {
|
||||
'use strict';
|
||||
|
||||
var STORAGE_KEY = 'corescope_channel_keys';
|
||||
var LABELS_KEY = 'corescope_channel_labels';
|
||||
var CACHE_KEY = 'corescope_channel_cache';
|
||||
|
||||
// ---- Hex utilities ----
|
||||
@@ -37,6 +38,25 @@ window.ChannelDecrypt = (function () {
|
||||
|
||||
// ---- Key derivation ----
|
||||
|
||||
// Detect whether SubtleCrypto is available. SubtleCrypto is only exposed
|
||||
// in **secure contexts** (HTTPS or localhost) — when CoreScope is served
|
||||
// over plain HTTP, `crypto.subtle` is undefined and any digest/HMAC call
|
||||
// throws. We fall back to the vendored pure-JS implementation in
|
||||
// public/vendor/sha256-hmac.js. PR #1021 did the same for AES-ECB.
|
||||
function hasSubtle() {
|
||||
return typeof crypto !== 'undefined' && crypto && crypto.subtle && typeof crypto.subtle.digest === 'function';
|
||||
}
|
||||
|
||||
function pureCryptoOrThrow() {
|
||||
var host = (typeof window !== 'undefined') ? window
|
||||
: (typeof self !== 'undefined') ? self : null;
|
||||
if (!host || !host.PureCrypto || !host.PureCrypto.sha256 || !host.PureCrypto.hmacSha256) {
|
||||
throw new Error('PureCrypto vendor module not loaded (public/vendor/sha256-hmac.js). ' +
|
||||
'crypto.subtle is unavailable (HTTP context) and no fallback present.');
|
||||
}
|
||||
return host.PureCrypto;
|
||||
}
|
||||
|
||||
/**
|
||||
* Derive AES-128 key from channel name: SHA-256("#channelname")[:16].
|
||||
* @param {string} channelName - e.g. "#LongFast"
|
||||
@@ -44,8 +64,12 @@ window.ChannelDecrypt = (function () {
|
||||
*/
|
||||
async function deriveKey(channelName) {
|
||||
var enc = new TextEncoder();
|
||||
var hash = await crypto.subtle.digest('SHA-256', enc.encode(channelName));
|
||||
return new Uint8Array(hash).slice(0, 16);
|
||||
var data = enc.encode(channelName);
|
||||
if (hasSubtle()) {
|
||||
var hash = await crypto.subtle.digest('SHA-256', data);
|
||||
return new Uint8Array(hash).slice(0, 16);
|
||||
}
|
||||
return pureCryptoOrThrow().sha256(data).slice(0, 16);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -54,46 +78,41 @@ window.ChannelDecrypt = (function () {
|
||||
* @returns {Promise<number>} single byte (0-255)
|
||||
*/
|
||||
async function computeChannelHash(key) {
|
||||
var hash = await crypto.subtle.digest('SHA-256', key);
|
||||
return new Uint8Array(hash)[0];
|
||||
if (hasSubtle()) {
|
||||
var hash = await crypto.subtle.digest('SHA-256', key);
|
||||
return new Uint8Array(hash)[0];
|
||||
}
|
||||
return pureCryptoOrThrow().sha256(key)[0];
|
||||
}
|
||||
|
||||
// ---- AES-128-ECB via Web Crypto (CBC with zero IV, block-by-block) ----
|
||||
// ---- AES-128-ECB via vendored pure-JS implementation ----
|
||||
//
|
||||
// Web Crypto exposes AES-CBC/CTR/GCM but NOT raw AES-ECB. The previous
|
||||
// implementation simulated ECB with AES-CBC + zero IV + a dummy PKCS7
|
||||
// padding block; that hack throws OperationError on real ciphertext
|
||||
// because Web Crypto validates PKCS7 padding on the decrypted output
|
||||
// and the dummy padding bytes rarely form a valid PKCS7 sequence
|
||||
// after decryption. We use a pure-JS AES-128 ECB core
|
||||
// (public/vendor/aes-ecb.js, MIT, derived from aes-js by Richard
|
||||
// Moore) so decryption is deterministic across browsers and works in
|
||||
// HTTP contexts.
|
||||
|
||||
/**
|
||||
* Decrypt AES-128-ECB by decrypting each 16-byte block independently
|
||||
* using AES-CBC with a zero IV (equivalent to ECB for single blocks).
|
||||
* Decrypt AES-128-ECB.
|
||||
* @param {Uint8Array} key - 16-byte AES key
|
||||
* @param {Uint8Array} ciphertext - must be multiple of 16 bytes
|
||||
* @returns {Promise<Uint8Array>} plaintext
|
||||
* @param {Uint8Array} ciphertext - must be a non-zero multiple of 16 bytes
|
||||
* @returns {Promise<Uint8Array|null>} plaintext, or null on invalid input
|
||||
*/
|
||||
async function decryptECB(key, ciphertext) {
|
||||
if (ciphertext.length === 0 || ciphertext.length % 16 !== 0) {
|
||||
if (!ciphertext || ciphertext.length === 0 || ciphertext.length % 16 !== 0) {
|
||||
return null;
|
||||
}
|
||||
var cryptoKey = await crypto.subtle.importKey(
|
||||
'raw', key, { name: 'AES-CBC' }, false, ['decrypt']
|
||||
);
|
||||
var zeroIV = new Uint8Array(16);
|
||||
var plaintext = new Uint8Array(ciphertext.length);
|
||||
|
||||
for (var i = 0; i < ciphertext.length; i += 16) {
|
||||
var block = ciphertext.slice(i, i + 16);
|
||||
// Append a dummy block (16 bytes of 0x10 = PKCS7 padding for empty next block)
|
||||
// so Web Crypto doesn't complain about padding
|
||||
var padded = new Uint8Array(32);
|
||||
padded.set(block, 0);
|
||||
// Second block is PKCS7 padding: 16 bytes of 0x10
|
||||
for (var j = 16; j < 32; j++) padded[j] = 16;
|
||||
|
||||
var decrypted = await crypto.subtle.decrypt(
|
||||
{ name: 'AES-CBC', iv: zeroIV }, cryptoKey, padded
|
||||
);
|
||||
var decBytes = new Uint8Array(decrypted);
|
||||
plaintext.set(decBytes.slice(0, 16), i);
|
||||
var host = (typeof window !== 'undefined') ? window
|
||||
: (typeof self !== 'undefined') ? self : null;
|
||||
if (!host || !host.AES_ECB || !host.AES_ECB.decrypt) {
|
||||
throw new Error('AES_ECB vendor module not loaded (public/vendor/aes-ecb.js)');
|
||||
}
|
||||
|
||||
return plaintext;
|
||||
return host.AES_ECB.decrypt(key, ciphertext);
|
||||
}
|
||||
|
||||
// ---- MAC verification ----
|
||||
@@ -111,13 +130,17 @@ window.ChannelDecrypt = (function () {
|
||||
secret.set(key, 0);
|
||||
// remaining 16 bytes are already 0
|
||||
|
||||
var cryptoKey = await crypto.subtle.importKey(
|
||||
'raw', secret, { name: 'HMAC', hash: 'SHA-256' }, false, ['sign']
|
||||
);
|
||||
var sig = await crypto.subtle.sign('HMAC', cryptoKey, ciphertext);
|
||||
var sigBytes = new Uint8Array(sig);
|
||||
|
||||
var macBytes = hexToBytes(macHex);
|
||||
var sigBytes;
|
||||
if (hasSubtle() && typeof crypto.subtle.importKey === 'function' && typeof crypto.subtle.sign === 'function') {
|
||||
var cryptoKey = await crypto.subtle.importKey(
|
||||
'raw', secret, { name: 'HMAC', hash: 'SHA-256' }, false, ['sign']
|
||||
);
|
||||
var sig = await crypto.subtle.sign('HMAC', cryptoKey, ciphertext);
|
||||
sigBytes = new Uint8Array(sig);
|
||||
} else {
|
||||
sigBytes = pureCryptoOrThrow().hmacSha256(secret, ciphertext);
|
||||
}
|
||||
return sigBytes[0] === macBytes[0] && sigBytes[1] === macBytes[1];
|
||||
}
|
||||
|
||||
@@ -187,12 +210,96 @@ window.ChannelDecrypt = (function () {
|
||||
// Alias used by channels.js
|
||||
var decryptPacket = decrypt;
|
||||
|
||||
// ---- Live PSK decrypt (WS path) ----
|
||||
//
|
||||
// Build a Map<channelHashByte, { channelName, keyBytes, keyHex }> from all
|
||||
// stored PSK keys so the WebSocket handler can do an O(1) lookup on each
|
||||
// incoming GRP_TXT packet. Hash byte derivation is async, so we cache the
|
||||
// map between calls and only rebuild when the stored-keys set changes.
|
||||
var _keyMapCache = null;
|
||||
var _keyMapSig = '';
|
||||
|
||||
function _keysSignature(keys) {
|
||||
var names = Object.keys(keys).sort();
|
||||
var sig = '';
|
||||
for (var i = 0; i < names.length; i++) {
|
||||
sig += names[i] + '=' + keys[names[i]] + ';';
|
||||
}
|
||||
return sig;
|
||||
}
|
||||
|
||||
async function buildKeyMap() {
|
||||
var keys = getKeys();
|
||||
var sig = _keysSignature(keys);
|
||||
if (_keyMapCache && _keyMapSig === sig) return _keyMapCache;
|
||||
var map = new Map();
|
||||
var names = Object.keys(keys);
|
||||
for (var i = 0; i < names.length; i++) {
|
||||
var channelName = names[i];
|
||||
var keyHex = keys[channelName];
|
||||
if (!keyHex || typeof keyHex !== 'string') continue;
|
||||
var keyBytes;
|
||||
try { keyBytes = hexToBytes(keyHex); } catch (e) { continue; }
|
||||
if (keyBytes.length !== 16) continue;
|
||||
var hashByte;
|
||||
try { hashByte = await computeChannelHash(keyBytes); } catch (e) { continue; }
|
||||
// First-write-wins on collision (rare): different channel names can
|
||||
// hash to the same byte. The downstream MAC check still gates rendering.
|
||||
if (!map.has(hashByte)) {
|
||||
map.set(hashByte, { channelName: channelName, keyBytes: keyBytes, keyHex: keyHex });
|
||||
}
|
||||
}
|
||||
_keyMapCache = map;
|
||||
_keyMapSig = sig;
|
||||
return map;
|
||||
}
|
||||
|
||||
/**
|
||||
* Attempt to decrypt a live GRP_TXT payload using a prebuilt key map.
|
||||
* Returns { sender, text, channelName, channelHashByte } on success,
|
||||
* or null when no key matches, MAC verification fails, or the payload
|
||||
* is not an encrypted GRP_TXT.
|
||||
*/
|
||||
async function tryDecryptLive(payload, keyMap) {
|
||||
if (!payload || payload.type !== 'GRP_TXT') return null;
|
||||
if (!payload.encryptedData || !payload.mac) return null;
|
||||
if (!keyMap || typeof keyMap.get !== 'function') return null;
|
||||
var hashByte = payload.channelHash;
|
||||
// channelHash arrives as either a number or a hex string in some paths;
|
||||
// normalize to number so Map.get hits.
|
||||
if (typeof hashByte === 'string') {
|
||||
var n = parseInt(hashByte, 16);
|
||||
if (!isFinite(n)) return null;
|
||||
hashByte = n;
|
||||
}
|
||||
if (typeof hashByte !== 'number') return null;
|
||||
var entry = keyMap.get(hashByte);
|
||||
if (!entry) return null;
|
||||
var result;
|
||||
try {
|
||||
result = await decrypt(entry.keyBytes, payload.mac, payload.encryptedData);
|
||||
} catch (e) { return null; }
|
||||
if (!result) return null;
|
||||
return {
|
||||
sender: result.sender || 'Unknown',
|
||||
text: result.message || '',
|
||||
channelName: entry.channelName,
|
||||
channelHashByte: hashByte,
|
||||
timestamp: result.timestamp || null
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
// ---- Key storage (localStorage) ----
|
||||
|
||||
function saveKey(channelName, keyHex) {
|
||||
function saveKey(channelName, keyHex, label) {
|
||||
var keys = getKeys();
|
||||
keys[channelName] = keyHex;
|
||||
try { localStorage.setItem(STORAGE_KEY, JSON.stringify(keys)); } catch (e) { /* quota */ }
|
||||
_keyMapCache = null; // invalidate live-decrypt index
|
||||
if (typeof label === 'string' && label.trim()) {
|
||||
saveLabel(channelName, label.trim());
|
||||
}
|
||||
}
|
||||
|
||||
// Alias used by channels.js
|
||||
@@ -212,8 +319,39 @@ window.ChannelDecrypt = (function () {
|
||||
var keys = getKeys();
|
||||
delete keys[channelName];
|
||||
try { localStorage.setItem(STORAGE_KEY, JSON.stringify(keys)); } catch (e) { /* quota */ }
|
||||
// Also clear cached messages for this channel
|
||||
_keyMapCache = null; // invalidate live-decrypt index
|
||||
// Also clear cached messages and any label for this channel (#1020)
|
||||
clearChannelCache(channelName);
|
||||
var labels = getLabels();
|
||||
if (labels[channelName]) {
|
||||
delete labels[channelName];
|
||||
try { localStorage.setItem(LABELS_KEY, JSON.stringify(labels)); } catch (e) { /* quota */ }
|
||||
}
|
||||
}
|
||||
|
||||
// ---- User-supplied display labels (#1020) ----
|
||||
// Stored separately from keys so we can display friendly names instead of
|
||||
// psk:<hex8> for user-added PSK channels.
|
||||
function getLabels() {
|
||||
try {
|
||||
var raw = localStorage.getItem(LABELS_KEY);
|
||||
return raw ? JSON.parse(raw) : {};
|
||||
} catch (e) { return {}; }
|
||||
}
|
||||
|
||||
function getLabel(channelName) {
|
||||
var labels = getLabels();
|
||||
return labels[channelName] || '';
|
||||
}
|
||||
|
||||
function saveLabel(channelName, label) {
|
||||
var labels = getLabels();
|
||||
if (typeof label === 'string' && label.trim()) {
|
||||
labels[channelName] = label.trim();
|
||||
} else {
|
||||
delete labels[channelName];
|
||||
}
|
||||
try { localStorage.setItem(LABELS_KEY, JSON.stringify(labels)); } catch (e) { /* quota */ }
|
||||
}
|
||||
|
||||
/** Remove cached messages for a specific channel (by name or hash). */
|
||||
@@ -286,10 +424,16 @@ window.ChannelDecrypt = (function () {
|
||||
getKeys: getKeys,
|
||||
getStoredKeys: getStoredKeys,
|
||||
removeKey: removeKey,
|
||||
// #1020: optional user-friendly display labels for stored keys
|
||||
saveLabel: saveLabel,
|
||||
getLabel: getLabel,
|
||||
getLabels: getLabels,
|
||||
clearChannelCache: clearChannelCache,
|
||||
cacheMessages: cacheMessages,
|
||||
getCachedMessages: getCachedMessages,
|
||||
setCache: setCache,
|
||||
getCache: getCache
|
||||
getCache: getCache,
|
||||
buildKeyMap: buildKeyMap,
|
||||
tryDecryptLive: tryDecryptLive
|
||||
};
|
||||
})();
|
||||
|
||||
@@ -0,0 +1,280 @@
|
||||
/**
|
||||
* channel-qr.js — QR code generation + scanning for MeshCore channels.
|
||||
*
|
||||
* URL format (per firmware spec):
|
||||
* meshcore://channel/add?name=<urlencoded>&secret=<32hex>
|
||||
*
|
||||
* Public API (window.ChannelQR):
|
||||
* buildUrl(name, secretHex) → string
|
||||
* parseChannelUrl(url) → {name, secret} | null
|
||||
* generate(name, secretHex, target) → renders QR + URL + Copy Key into `target`
|
||||
* scan() → Promise<{name, secret} | null>
|
||||
*
|
||||
* Self-contained: does NOT touch channels.js / channel-decrypt.js.
|
||||
* The PR that wires the modal into this module is #3.
|
||||
*
|
||||
* Vendored deps (loaded by index.html):
|
||||
* - public/vendor/qrcode.js (davidshimjs/qrcodejs, MIT) — QR rendering
|
||||
* - public/vendor/jsqr.min.js (cozmo/jsQR, Apache-2.0) — QR decoding from camera
|
||||
*/
|
||||
(function (root) {
|
||||
'use strict';
|
||||
|
||||
const SCHEME_PREFIX = 'meshcore://channel/add';
|
||||
const HEX32_RE = /^[0-9a-fA-F]{32}$/;
|
||||
|
||||
function buildUrl(name, secretHex) {
|
||||
return SCHEME_PREFIX + '?name=' + encodeURIComponent(String(name)) +
|
||||
'&secret=' + String(secretHex);
|
||||
}
|
||||
|
||||
/**
|
||||
* parseChannelUrl(url) → { name, secret } | null
|
||||
* Strict: scheme must be `meshcore:`, host+path `//channel/add`,
|
||||
* both `name` and `secret` query params present, secret must be 32 hex chars.
|
||||
*/
|
||||
function parseChannelUrl(url) {
|
||||
if (!url || typeof url !== 'string') return null;
|
||||
if (url.indexOf(SCHEME_PREFIX) !== 0) return null;
|
||||
|
||||
// Strip prefix → query string
|
||||
const rest = url.slice(SCHEME_PREFIX.length);
|
||||
if (rest[0] !== '?' && rest !== '') return null;
|
||||
const qs = rest.slice(1);
|
||||
if (!qs) return null;
|
||||
|
||||
const params = {};
|
||||
const pairs = qs.split('&');
|
||||
for (let i = 0; i < pairs.length; i++) {
|
||||
const eq = pairs[i].indexOf('=');
|
||||
if (eq < 0) continue;
|
||||
const k = pairs[i].slice(0, eq);
|
||||
const v = pairs[i].slice(eq + 1);
|
||||
try { params[k] = decodeURIComponent(v); }
|
||||
catch (_e) { return null; }
|
||||
}
|
||||
|
||||
if (!params.name || !params.secret) return null;
|
||||
if (!HEX32_RE.test(params.secret)) return null;
|
||||
|
||||
return { name: params.name, secret: params.secret.toLowerCase() };
|
||||
}
|
||||
|
||||
// ---------- DOM helpers (browser-only) ----------
|
||||
|
||||
function _hasDom() {
|
||||
return typeof document !== 'undefined' && document.createElement;
|
||||
}
|
||||
|
||||
/**
|
||||
* Render QR + URL + Copy Key button into `target`.
|
||||
*
|
||||
* Uses the vendored Kazuhiko Arase qrcode-generator library (lowercase
|
||||
* `qrcode` global) — `public/vendor/qrcode.js`. This was previously
|
||||
* checking for `root.QRCode` (capital), which never existed and made
|
||||
* every Generate click fall through to "[QR library not loaded]".
|
||||
* (Issue #1087 bug 1.)
|
||||
*/
|
||||
function generate(name, secretHex, target, opts) {
|
||||
if (!_hasDom() || !target) return;
|
||||
target.innerHTML = '';
|
||||
opts = opts || {};
|
||||
var qrOnly = !!opts.qrOnly;
|
||||
|
||||
const url = buildUrl(name, secretHex);
|
||||
|
||||
const qrBox = document.createElement('div');
|
||||
qrBox.className = 'channel-qr-canvas';
|
||||
qrBox.style.display = 'inline-block';
|
||||
target.appendChild(qrBox);
|
||||
|
||||
var qrFactory = (typeof root.qrcode === 'function') ? root.qrcode :
|
||||
(typeof root.QRCode === 'function') ? root.QRCode : null;
|
||||
|
||||
if (qrFactory) {
|
||||
try {
|
||||
// Kazuhiko Arase API: qrcode(typeNumber, errorCorrectionLevel)
|
||||
// typeNumber=0 → auto-detect smallest version that fits.
|
||||
var qr = qrFactory(0, 'M');
|
||||
qr.addData(url);
|
||||
qr.make();
|
||||
// createImgTag(cellSize, margin) → an <img src="data:image/gif;base64,...">.
|
||||
// Cell size 4 with margin 4 yields a ~192px image for short URLs.
|
||||
qrBox.innerHTML = qr.createImgTag(4, 4);
|
||||
var img = qrBox.querySelector('img');
|
||||
if (img) {
|
||||
img.alt = 'QR for ' + name;
|
||||
img.style.display = 'block';
|
||||
img.style.maxWidth = '192px';
|
||||
img.style.height = 'auto';
|
||||
}
|
||||
} catch (e) {
|
||||
qrBox.textContent = '[QR render failed: ' + (e && e.message || e) + ']';
|
||||
}
|
||||
} else {
|
||||
qrBox.textContent = '[QR library not loaded]';
|
||||
}
|
||||
|
||||
// #1101: in qrOnly mode (Share modal), the host renders the hex
|
||||
// key field + Copy button BELOW the QR. Skip the inline URL line
|
||||
// and inline Copy Key button here so the QR box contains JUST the
|
||||
// QR image — no overlap, no redundant affordances.
|
||||
if (qrOnly) return;
|
||||
|
||||
const urlLine = document.createElement('div');
|
||||
urlLine.className = 'channel-qr-url';
|
||||
urlLine.style.cssText = 'font-family:monospace;font-size:11px;word-break:break-all;margin-top:6px;';
|
||||
urlLine.textContent = url;
|
||||
target.appendChild(urlLine);
|
||||
|
||||
const copyBtn = document.createElement('button');
|
||||
copyBtn.type = 'button';
|
||||
copyBtn.className = 'channel-qr-copy';
|
||||
copyBtn.textContent = '📋 Copy Key';
|
||||
copyBtn.style.cssText = 'margin-top:6px;';
|
||||
copyBtn.addEventListener('click', function () {
|
||||
const text = secretHex;
|
||||
const done = function () {
|
||||
const orig = copyBtn.textContent;
|
||||
copyBtn.textContent = '✓ Copied';
|
||||
setTimeout(function () { copyBtn.textContent = orig; }, 1200);
|
||||
};
|
||||
if (root.navigator && root.navigator.clipboard && root.navigator.clipboard.writeText) {
|
||||
root.navigator.clipboard.writeText(text).then(done, function () {
|
||||
// Fallback: select text in a temp input
|
||||
_fallbackCopy(text); done();
|
||||
});
|
||||
} else {
|
||||
_fallbackCopy(text); done();
|
||||
}
|
||||
});
|
||||
target.appendChild(copyBtn);
|
||||
}
|
||||
|
||||
function _fallbackCopy(text) {
|
||||
if (!_hasDom()) return;
|
||||
const ta = document.createElement('textarea');
|
||||
ta.value = text;
|
||||
ta.style.cssText = 'position:fixed;opacity:0;';
|
||||
document.body.appendChild(ta);
|
||||
ta.select();
|
||||
try { document.execCommand('copy'); } catch (_e) {}
|
||||
document.body.removeChild(ta);
|
||||
}
|
||||
|
||||
// ---------- Camera scan ----------
|
||||
|
||||
/**
|
||||
* scan() → Promise<{name, secret} | null>
|
||||
*
|
||||
* Opens a small modal with a live camera preview, decodes via jsQR,
|
||||
* resolves with the parsed channel info on first valid match. Closes
|
||||
* camera on resolve/reject. Resolves with `null` if user cancels or
|
||||
* camera permission is denied (graceful fallback path).
|
||||
*/
|
||||
function scan() {
|
||||
if (!_hasDom()) return Promise.resolve(null);
|
||||
const nav = root.navigator;
|
||||
if (!nav || !nav.mediaDevices || !nav.mediaDevices.getUserMedia ||
|
||||
typeof root.jsQR !== 'function') {
|
||||
_showCameraFallback();
|
||||
return Promise.resolve(null);
|
||||
}
|
||||
|
||||
return new Promise(function (resolve) {
|
||||
const overlay = document.createElement('div');
|
||||
overlay.className = 'channel-qr-scan-overlay';
|
||||
overlay.style.cssText = 'position:fixed;inset:0;background:rgba(0,0,0,0.85);' +
|
||||
'display:flex;flex-direction:column;align-items:center;justify-content:center;z-index:99999;';
|
||||
|
||||
const video = document.createElement('video');
|
||||
video.setAttribute('playsinline', 'true');
|
||||
video.style.cssText = 'max-width:90vw;max-height:60vh;background:#000;';
|
||||
overlay.appendChild(video);
|
||||
|
||||
const status = document.createElement('div');
|
||||
status.style.cssText = 'color:#fff;margin-top:12px;font-family:sans-serif;';
|
||||
status.textContent = 'Point camera at a MeshCore channel QR…';
|
||||
overlay.appendChild(status);
|
||||
|
||||
const cancelBtn = document.createElement('button');
|
||||
cancelBtn.type = 'button';
|
||||
cancelBtn.textContent = 'Cancel';
|
||||
cancelBtn.style.cssText = 'margin-top:12px;';
|
||||
overlay.appendChild(cancelBtn);
|
||||
|
||||
document.body.appendChild(overlay);
|
||||
|
||||
const canvas = document.createElement('canvas');
|
||||
const ctx = canvas.getContext('2d');
|
||||
let stream = null;
|
||||
let rafId = 0;
|
||||
let done = false;
|
||||
|
||||
function cleanup(result) {
|
||||
if (done) return;
|
||||
done = true;
|
||||
if (rafId) cancelAnimationFrame(rafId);
|
||||
if (stream) {
|
||||
stream.getTracks().forEach(function (t) { try { t.stop(); } catch (_e) {} });
|
||||
}
|
||||
if (overlay.parentNode) overlay.parentNode.removeChild(overlay);
|
||||
resolve(result);
|
||||
}
|
||||
|
||||
cancelBtn.addEventListener('click', function () { cleanup(null); });
|
||||
|
||||
function tick() {
|
||||
if (done) return;
|
||||
if (video.readyState === video.HAVE_ENOUGH_DATA) {
|
||||
canvas.width = video.videoWidth;
|
||||
canvas.height = video.videoHeight;
|
||||
ctx.drawImage(video, 0, 0, canvas.width, canvas.height);
|
||||
let imgData;
|
||||
try { imgData = ctx.getImageData(0, 0, canvas.width, canvas.height); }
|
||||
catch (_e) { rafId = requestAnimationFrame(tick); return; }
|
||||
const code = root.jsQR(imgData.data, imgData.width, imgData.height, {
|
||||
inversionAttempts: 'dontInvert',
|
||||
});
|
||||
if (code && code.data) {
|
||||
const parsed = parseChannelUrl(code.data);
|
||||
if (parsed) { cleanup(parsed); return; }
|
||||
status.textContent = 'QR found but not a MeshCore channel — keep trying…';
|
||||
}
|
||||
}
|
||||
rafId = requestAnimationFrame(tick);
|
||||
}
|
||||
|
||||
nav.mediaDevices.getUserMedia({ video: { facingMode: 'environment' } })
|
||||
.then(function (s) {
|
||||
stream = s;
|
||||
video.srcObject = s;
|
||||
video.play().then(function () { tick(); }, function () { tick(); });
|
||||
})
|
||||
.catch(function () {
|
||||
status.textContent = 'Camera not available — paste key manually.';
|
||||
setTimeout(function () { cleanup(null); }, 1800);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function _showCameraFallback() {
|
||||
if (!_hasDom()) return;
|
||||
const note = document.createElement('div');
|
||||
note.className = 'channel-qr-fallback';
|
||||
note.style.cssText = 'position:fixed;bottom:20px;left:50%;transform:translateX(-50%);' +
|
||||
'background:#222;color:#fff;padding:10px 14px;border-radius:6px;z-index:99999;';
|
||||
note.textContent = 'Camera not available — paste key manually.';
|
||||
document.body.appendChild(note);
|
||||
setTimeout(function () {
|
||||
if (note.parentNode) note.parentNode.removeChild(note);
|
||||
}, 2500);
|
||||
}
|
||||
|
||||
root.ChannelQR = {
|
||||
buildUrl: buildUrl,
|
||||
parseChannelUrl: parseChannelUrl,
|
||||
generate: generate,
|
||||
scan: scan,
|
||||
};
|
||||
})(typeof window !== 'undefined' ? window : globalThis);
|
||||
+738
-135
File diff suppressed because it is too large
Load Diff
+91
-7
@@ -23,8 +23,58 @@ function comparePacketSets(hashesA, hashesB) {
|
||||
return { onlyA: onlyA, onlyB: onlyB, both: both };
|
||||
}
|
||||
|
||||
/**
|
||||
* Filter packets by route type.
|
||||
* mode: 'all' | 'flood' | 'direct'
|
||||
* Flood = route_type 0 (TransportFlood) or 1 (Flood)
|
||||
* Direct = route_type 2 (Direct) or 3 (TransportDirect)
|
||||
*/
|
||||
function filterPacketsByRoute(packets, mode) {
|
||||
if (!packets || mode === 'all') return packets || [];
|
||||
if (mode === 'flood') {
|
||||
return packets.filter(function (p) { return p.route_type === 0 || p.route_type === 1; });
|
||||
}
|
||||
if (mode === 'direct') {
|
||||
return packets.filter(function (p) { return p.route_type === 2 || p.route_type === 3; });
|
||||
}
|
||||
return packets;
|
||||
}
|
||||
|
||||
/**
|
||||
* Compute asymmetric overlap statistics between two observer packet sets.
|
||||
* Given a comparePacketSets() result, returns:
|
||||
* - totalA / totalB: unique packet count for each observer
|
||||
* - shared: packets seen by both
|
||||
* - onlyA / onlyB: exclusive packet counts
|
||||
* - aSeesOfB: percentage of B's packets that A also saw (rounded to 0.1%)
|
||||
* - bSeesOfA: percentage of A's packets that B also saw (rounded to 0.1%)
|
||||
* Returns 0% (not NaN) when a denominator is zero.
|
||||
*/
|
||||
function computeOverlapStats(cmp) {
|
||||
var onlyA = (cmp && cmp.onlyA && cmp.onlyA.length) || 0;
|
||||
var onlyB = (cmp && cmp.onlyB && cmp.onlyB.length) || 0;
|
||||
var shared = (cmp && cmp.both && cmp.both.length) || 0;
|
||||
var totalA = onlyA + shared;
|
||||
var totalB = onlyB + shared;
|
||||
var aSeesOfB = totalB > 0 ? Math.round((shared / totalB) * 1000) / 10 : 0;
|
||||
var bSeesOfA = totalA > 0 ? Math.round((shared / totalA) * 1000) / 10 : 0;
|
||||
return {
|
||||
totalA: totalA,
|
||||
totalB: totalB,
|
||||
shared: shared,
|
||||
onlyA: onlyA,
|
||||
onlyB: onlyB,
|
||||
aSeesOfB: aSeesOfB,
|
||||
bSeesOfA: bSeesOfA,
|
||||
};
|
||||
}
|
||||
|
||||
// Expose for testing
|
||||
if (typeof window !== 'undefined') window.comparePacketSets = comparePacketSets;
|
||||
if (typeof window !== 'undefined') {
|
||||
window.comparePacketSets = comparePacketSets;
|
||||
window.filterPacketsByRoute = filterPacketsByRoute;
|
||||
window.computeOverlapStats = computeOverlapStats;
|
||||
}
|
||||
|
||||
(function () {
|
||||
var PAYLOAD_LABELS = { 0: 'Request', 1: 'Response', 2: 'Direct Msg', 3: 'ACK', 4: 'Advert', 5: 'Channel Msg', 7: 'Anon Req', 8: 'Path', 9: 'Trace', 11: 'Control' };
|
||||
@@ -36,6 +86,7 @@ if (typeof window !== 'undefined') window.comparePacketSets = comparePacketSets;
|
||||
var packetsA = [];
|
||||
var packetsB = [];
|
||||
var currentView = 'summary';
|
||||
var routeFilter = 'all';
|
||||
|
||||
function init(app, routeParam) {
|
||||
// Parse preselected observers from URL: #/compare?a=ID1&b=ID2
|
||||
@@ -47,6 +98,7 @@ if (typeof window !== 'undefined') window.comparePacketSets = comparePacketSets;
|
||||
packetsA = [];
|
||||
packetsB = [];
|
||||
currentView = 'summary';
|
||||
routeFilter = 'all';
|
||||
|
||||
app.innerHTML = '<div class="compare-page" style="padding:16px">' +
|
||||
'<div class="page-header" style="display:flex;align-items:center;gap:12px;margin-bottom:16px">' +
|
||||
@@ -76,6 +128,7 @@ if (typeof window !== 'undefined') window.comparePacketSets = comparePacketSets;
|
||||
comparisonResult = null;
|
||||
packetsA = [];
|
||||
packetsB = [];
|
||||
routeFilter = 'all';
|
||||
}
|
||||
|
||||
async function loadObservers() {
|
||||
@@ -115,6 +168,14 @@ if (typeof window !== 'undefined') window.comparePacketSets = comparePacketSets;
|
||||
'<select id="compareObsB" class="compare-select">' + optionsHtml + '</select>' +
|
||||
'</div>' +
|
||||
'<button id="compareBtn" class="compare-btn" disabled>Compare</button>' +
|
||||
'<div class="compare-select-group">' +
|
||||
'<label for="compareRouteFilter">Packet Type</label>' +
|
||||
'<select id="compareRouteFilter" class="compare-select">' +
|
||||
'<option value="all">All packets</option>' +
|
||||
'<option value="flood">Flood only</option>' +
|
||||
'<option value="direct">Direct only</option>' +
|
||||
'</select>' +
|
||||
'</div>' +
|
||||
'</div>';
|
||||
|
||||
var ddA = document.getElementById('compareObsA');
|
||||
@@ -124,6 +185,13 @@ if (typeof window !== 'undefined') window.comparePacketSets = comparePacketSets;
|
||||
if (selA) ddA.value = selA;
|
||||
if (selB) ddB.value = selB;
|
||||
|
||||
var ddRoute = document.getElementById('compareRouteFilter');
|
||||
ddRoute.value = routeFilter;
|
||||
ddRoute.addEventListener('change', function () {
|
||||
routeFilter = ddRoute.value;
|
||||
if (comparisonResult) runComparison();
|
||||
});
|
||||
|
||||
function updateBtn() {
|
||||
selA = ddA.value || null;
|
||||
selB = ddB.value || null;
|
||||
@@ -162,16 +230,20 @@ if (typeof window !== 'undefined') window.comparePacketSets = comparePacketSets;
|
||||
packetsA = results[0].packets || [];
|
||||
packetsB = results[1].packets || [];
|
||||
|
||||
var hashesA = new Set(packetsA.map(function (p) { return p.hash; }));
|
||||
var hashesB = new Set(packetsB.map(function (p) { return p.hash; }));
|
||||
// Apply flood/direct filter (#928)
|
||||
var filteredA = filterPacketsByRoute(packetsA, routeFilter);
|
||||
var filteredB = filterPacketsByRoute(packetsB, routeFilter);
|
||||
|
||||
var hashesA = new Set(filteredA.map(function (p) { return p.hash; }));
|
||||
var hashesB = new Set(filteredB.map(function (p) { return p.hash; }));
|
||||
|
||||
comparisonResult = comparePacketSets(hashesA, hashesB);
|
||||
|
||||
// Build hash→packet lookups for detail rendering
|
||||
comparisonResult.packetMapA = new Map();
|
||||
comparisonResult.packetMapB = new Map();
|
||||
packetsA.forEach(function (p) { comparisonResult.packetMapA.set(p.hash, p); });
|
||||
packetsB.forEach(function (p) { comparisonResult.packetMapB.set(p.hash, p); });
|
||||
filteredA.forEach(function (p) { comparisonResult.packetMapA.set(p.hash, p); });
|
||||
filteredB.forEach(function (p) { comparisonResult.packetMapB.set(p.hash, p); });
|
||||
|
||||
currentView = 'summary';
|
||||
renderComparison();
|
||||
@@ -296,12 +368,24 @@ if (typeof window !== 'undefined') window.comparePacketSets = comparePacketSets;
|
||||
|
||||
if (currentView === 'summary') {
|
||||
// Textual summary
|
||||
var stats = computeOverlapStats(r);
|
||||
var total = r.onlyA.length + r.onlyB.length + r.both.length;
|
||||
var overlap = total > 0 ? (r.both.length / total * 100).toFixed(1) : '0.0';
|
||||
el.innerHTML =
|
||||
'<div class="compare-summary-text">' +
|
||||
'<p>In the last 24 hours, <strong>' + nameA + '</strong> saw <strong>' + (r.onlyA.length + r.both.length).toLocaleString() + '</strong> unique packets ' +
|
||||
'and <strong>' + nameB + '</strong> saw <strong>' + (r.onlyB.length + r.both.length).toLocaleString() + '</strong> unique packets.</p>' +
|
||||
'<p>In the last 24 hours, <strong>' + nameA + '</strong> saw <strong>' + stats.totalA.toLocaleString() + '</strong> unique packets ' +
|
||||
'and <strong>' + nameB + '</strong> saw <strong>' + stats.totalB.toLocaleString() + '</strong> unique packets.</p>' +
|
||||
// #671 — asymmetric reference-observer comparison
|
||||
'<div class="compare-asymmetric" style="display:flex;gap:12px;flex-wrap:wrap;margin:12px 0">' +
|
||||
'<div class="compare-asym-card" style="flex:1;min-width:240px;padding:12px;border:1px solid var(--border, #333);border-radius:6px">' +
|
||||
'<div style="font-size:1.6em;font-weight:bold">' + stats.aSeesOfB.toFixed(1) + '%</div>' +
|
||||
'<div class="text-muted">' + nameA + ' saw <strong>' + stats.shared.toLocaleString() + '</strong> of ' + nameB + '\u2019s ' + stats.totalB.toLocaleString() + ' packets</div>' +
|
||||
'</div>' +
|
||||
'<div class="compare-asym-card" style="flex:1;min-width:240px;padding:12px;border:1px solid var(--border, #333);border-radius:6px">' +
|
||||
'<div style="font-size:1.6em;font-weight:bold">' + stats.bSeesOfA.toFixed(1) + '%</div>' +
|
||||
'<div class="text-muted">' + nameB + ' saw <strong>' + stats.shared.toLocaleString() + '</strong> of ' + nameA + '\u2019s ' + stats.totalA.toLocaleString() + ' packets</div>' +
|
||||
'</div>' +
|
||||
'</div>' +
|
||||
'<p><strong>' + r.both.length.toLocaleString() + '</strong> packets (' + overlap + '%) were seen by both observers. ' +
|
||||
'<strong>' + r.onlyA.length.toLocaleString() + '</strong> were exclusive to ' + nameA + ' and ' +
|
||||
'<strong>' + r.onlyB.length.toLocaleString() + '</strong> were exclusive to ' + nameB + '.</p>' +
|
||||
|
||||
+132
-8
@@ -33,7 +33,7 @@
|
||||
'meshcore-live-heatmap-opacity'
|
||||
];
|
||||
|
||||
var VALID_SECTIONS = ['branding', 'theme', 'themeDark', 'nodeColors', 'typeColors', 'home', 'timestamps', 'heatmapOpacity', 'liveHeatmapOpacity', 'distanceUnit'];
|
||||
var VALID_SECTIONS = ['branding', 'theme', 'themeDark', 'nodeColors', 'typeColors', 'home', 'timestamps', 'heatmapOpacity', 'liveHeatmapOpacity', 'distanceUnit', 'favorites', 'myNodes'];
|
||||
var OBJECT_SECTIONS = ['branding', 'theme', 'themeDark', 'nodeColors', 'typeColors', 'home', 'timestamps'];
|
||||
var SCALAR_SECTIONS = ['heatmapOpacity', 'liveHeatmapOpacity'];
|
||||
var DISTANCE_UNIT_VALUES = ['km', 'mi', 'auto'];
|
||||
@@ -53,6 +53,52 @@
|
||||
|
||||
var THEME_COLOR_KEYS = Object.keys(THEME_CSS_MAP).filter(function (k) { return k !== 'font' && k !== 'mono'; });
|
||||
|
||||
// ── Brand logo swap helper (PR #1137) ──
|
||||
// The default navbar brand logo is an inline <svg class="brand-logo"> so it
|
||||
// inherits page CSS vars (--logo-text / --logo-accent / etc.). When an
|
||||
// operator overrides branding.logoUrl in the customizer they expect a
|
||||
// remote image — swap the inline <svg> for an <img>. Going back to the
|
||||
// default URL or clearing the override swaps the <img> back to the inline
|
||||
// <svg>. Layout dimensions (width=111 height=36) are preserved either way.
|
||||
function _setBrandLogoUrl(url, alt) {
|
||||
var node = document.querySelector('.nav-brand .brand-logo');
|
||||
if (!node) return;
|
||||
if (url) {
|
||||
if (node.tagName.toLowerCase() === 'img') {
|
||||
node.setAttribute('src', url);
|
||||
if (alt != null) node.setAttribute('alt', alt);
|
||||
return;
|
||||
}
|
||||
// swap inline <svg> → <img>
|
||||
var img = document.createElement('img');
|
||||
img.className = 'brand-logo';
|
||||
img.setAttribute('src', url);
|
||||
img.setAttribute('alt', alt || node.getAttribute('aria-label') || 'Brand');
|
||||
img.setAttribute('width', '125');
|
||||
img.setAttribute('height', '36');
|
||||
node.parentNode.replaceChild(img, node);
|
||||
} else {
|
||||
if (node.tagName.toLowerCase() !== 'img') {
|
||||
if (alt != null) node.setAttribute('aria-label', alt);
|
||||
return;
|
||||
}
|
||||
// swap <img> → inline <svg> by clearing the src; here we just keep the
|
||||
// <img> in place because we don't have the SVG markup at runtime
|
||||
// (it lives in index.html). The next page reload restores the inline
|
||||
// SVG. Setting src to the default URL is a graceful intermediate.
|
||||
node.setAttribute('src', 'img/corescope-logo.svg');
|
||||
if (alt != null) node.setAttribute('alt', alt);
|
||||
}
|
||||
}
|
||||
function _setBrandAlt(alt) {
|
||||
var node = document.querySelector('.nav-brand .brand-logo');
|
||||
if (!node) return;
|
||||
if (node.tagName.toLowerCase() === 'img') node.setAttribute('alt', alt);
|
||||
else node.setAttribute('aria-label', alt);
|
||||
var brandLink = document.querySelector('.nav-brand');
|
||||
if (brandLink) brandLink.setAttribute('aria-label', alt + ' home');
|
||||
}
|
||||
|
||||
// ── Presets (copied from v1 customize.js) ──
|
||||
var PRESETS = {
|
||||
default: {
|
||||
@@ -313,9 +359,17 @@
|
||||
function readOverrides() {
|
||||
try {
|
||||
var raw = localStorage.getItem(STORAGE_KEY);
|
||||
if (raw == null) return {};
|
||||
var parsed = JSON.parse(raw);
|
||||
if (parsed == null || typeof parsed !== 'object' || Array.isArray(parsed)) return {};
|
||||
var parsed = (raw != null) ? JSON.parse(raw) : {};
|
||||
if (parsed == null || typeof parsed !== 'object' || Array.isArray(parsed)) parsed = {};
|
||||
// Include favorites and claimed nodes from their own localStorage keys
|
||||
try {
|
||||
var favs = JSON.parse(localStorage.getItem('meshcore-favorites') || '[]');
|
||||
if (Array.isArray(favs) && favs.length) parsed.favorites = favs;
|
||||
} catch (e) { /* ignore */ }
|
||||
try {
|
||||
var myNodes = JSON.parse(localStorage.getItem('meshcore-my-nodes') || '[]');
|
||||
if (Array.isArray(myNodes) && myNodes.length) parsed.myNodes = myNodes;
|
||||
} catch (e) { /* ignore */ }
|
||||
return parsed;
|
||||
} catch (e) {
|
||||
return {};
|
||||
@@ -386,14 +440,28 @@
|
||||
|
||||
function writeOverrides(delta) {
|
||||
if (delta == null || typeof delta !== 'object') return;
|
||||
// Extract favorites/myNodes and store in their own localStorage keys
|
||||
if (Array.isArray(delta.favorites)) {
|
||||
try { localStorage.setItem('meshcore-favorites', JSON.stringify(delta.favorites)); } catch (e) { /* ignore */ }
|
||||
}
|
||||
if (Array.isArray(delta.myNodes)) {
|
||||
try { localStorage.setItem('meshcore-my-nodes', JSON.stringify(delta.myNodes)); } catch (e) { /* ignore */ }
|
||||
}
|
||||
// Build theme-only delta (without favorites/myNodes)
|
||||
var themeDelta = {};
|
||||
for (var k in delta) {
|
||||
if (delta.hasOwnProperty(k) && k !== 'favorites' && k !== 'myNodes') {
|
||||
themeDelta[k] = delta[k];
|
||||
}
|
||||
}
|
||||
// If empty, remove key entirely
|
||||
var keys = Object.keys(delta);
|
||||
var keys = Object.keys(themeDelta);
|
||||
if (keys.length === 0) {
|
||||
try { localStorage.removeItem(STORAGE_KEY); } catch (e) { /* ignore */ }
|
||||
_updateSaveStatus('saved');
|
||||
return;
|
||||
}
|
||||
var validated = _validateDelta(delta);
|
||||
var validated = _validateDelta(themeDelta);
|
||||
try {
|
||||
localStorage.setItem(STORAGE_KEY, JSON.stringify(validated));
|
||||
_updateSaveStatus('saved');
|
||||
@@ -446,7 +514,7 @@
|
||||
return window.matchMedia && window.matchMedia('(prefers-color-scheme: dark)').matches;
|
||||
}
|
||||
|
||||
function applyCSS(effectiveConfig) {
|
||||
function applyCSS(effectiveConfig, userOverrides) {
|
||||
var dark = isDarkMode();
|
||||
var themeSection = dark
|
||||
? Object.assign({}, effectiveConfig.theme || {}, effectiveConfig.themeDark || {})
|
||||
@@ -461,6 +529,19 @@
|
||||
}
|
||||
}
|
||||
|
||||
// Logo brand colors mirror --accent / --accent-hover ONLY when an
|
||||
// operator has actually overridden them via the customizer. We check
|
||||
// userOverrides (not the merged effective config), so the server-default
|
||||
// accent (#4a9eff) does NOT clobber the sage/teal :root brand defaults
|
||||
// out-of-the-box. When an operator picks a theme, customizer writes the
|
||||
// override to localStorage, the override flows through here, and the
|
||||
// wordmark recolors to follow the chosen accent.
|
||||
var ovTheme = (userOverrides && (dark
|
||||
? Object.assign({}, userOverrides.theme || {}, userOverrides.themeDark || {})
|
||||
: (userOverrides.theme || {}))) || {};
|
||||
if (ovTheme.accent) root.setProperty('--logo-accent', ovTheme.accent);
|
||||
if (ovTheme.accentHover) root.setProperty('--logo-accent-hi', ovTheme.accentHover);
|
||||
|
||||
// Derived vars
|
||||
if (themeSection.background) root.setProperty('--content-bg', themeSection.contentBg || themeSection.background);
|
||||
if (themeSection.surface1) root.setProperty('--card-bg', themeSection.cardBg || themeSection.surface1);
|
||||
@@ -522,10 +603,12 @@
|
||||
if (br) {
|
||||
if (br.siteName) {
|
||||
document.title = br.siteName;
|
||||
_setBrandAlt(br.siteName);
|
||||
var brandEl = document.querySelector('.brand-text');
|
||||
if (brandEl) brandEl.textContent = br.siteName;
|
||||
}
|
||||
if (br.logoUrl) {
|
||||
_setBrandLogoUrl(br.logoUrl, br.siteName || null);
|
||||
var iconEl = document.querySelector('.brand-icon');
|
||||
if (iconEl) iconEl.innerHTML = '<img src="' + br.logoUrl + '" style="height:24px" onerror="this.style.display=\'none\'">';
|
||||
}
|
||||
@@ -544,7 +627,7 @@
|
||||
var overrides = readOverrides();
|
||||
var effective = computeEffective(_serverDefaults || {}, overrides);
|
||||
window.SITE_CONFIG = effective;
|
||||
applyCSS(effective);
|
||||
applyCSS(effective, overrides);
|
||||
}
|
||||
|
||||
// ── setOverride / clearOverride ──
|
||||
@@ -758,6 +841,17 @@
|
||||
if (key === 'distanceUnit' && DISTANCE_UNIT_VALUES.indexOf(obj[key]) === -1) {
|
||||
errors.push('Invalid distanceUnit: "' + obj[key] + '" — must be km, mi, or auto');
|
||||
}
|
||||
// Validate favorites and myNodes arrays
|
||||
if (key === 'favorites') {
|
||||
if (!Array.isArray(obj[key])) {
|
||||
errors.push('"favorites" must be an array of public key strings');
|
||||
}
|
||||
}
|
||||
if (key === 'myNodes') {
|
||||
if (!Array.isArray(obj[key])) {
|
||||
errors.push('"myNodes" must be an array of node objects');
|
||||
}
|
||||
}
|
||||
}
|
||||
return { valid: errors.length === 0, errors: errors };
|
||||
}
|
||||
@@ -1108,6 +1202,9 @@
|
||||
'<option value="km"' + (distUnit === 'km' ? ' selected' : '') + '>Kilometers (km)</option>' +
|
||||
'<option value="mi"' + (distUnit === 'mi' ? ' selected' : '') + '>Miles (mi)</option>' +
|
||||
'</select></div>' +
|
||||
'<p class="cust-section-title" style="font-size:14px;margin:16px 0 8px">Gesture Hints</p>' +
|
||||
'<p style="font-size:12px;color:var(--text-muted);margin-bottom:8px">Re-show first-visit gesture discoverability hints (swipe rows, swipe tabs, edge-swipe drawer, pull-to-refresh).</p>' +
|
||||
'<button type="button" class="cust-dl-btn" data-cv2-reset-hints data-reset-gesture-hints>↺ Reset gesture hints</button>' +
|
||||
'</div>';
|
||||
}
|
||||
|
||||
@@ -1311,6 +1408,9 @@
|
||||
// Optimistic CSS update (Decision #12)
|
||||
var cssVar = THEME_CSS_MAP[key];
|
||||
if (cssVar) document.documentElement.style.setProperty(cssVar, inp.value);
|
||||
// Mirror to logo brand vars so the wordmark recolors live too.
|
||||
if (key === 'accent') document.documentElement.style.setProperty('--logo-accent', inp.value);
|
||||
if (key === 'accentHover') document.documentElement.style.setProperty('--logo-accent-hi', inp.value);
|
||||
// Update hex display
|
||||
var hex = inp.parentElement.querySelector('.cust-hex');
|
||||
if (hex) hex.textContent = inp.value;
|
||||
@@ -1327,11 +1427,13 @@
|
||||
setOverride(section, key, inp.value);
|
||||
// Live branding updates
|
||||
if (section === 'branding' && key === 'siteName') {
|
||||
_setBrandAlt(inp.value);
|
||||
var el = document.querySelector('.brand-text');
|
||||
if (el) el.textContent = inp.value;
|
||||
document.title = inp.value;
|
||||
}
|
||||
if (section === 'branding' && key === 'logoUrl') {
|
||||
_setBrandLogoUrl(inp.value || '', null);
|
||||
var iconEl = document.querySelector('.brand-icon');
|
||||
if (iconEl) {
|
||||
if (inp.value) iconEl.innerHTML = '<img src="' + inp.value + '" style="height:24px" onerror="this.style.display=\'none\'">';
|
||||
@@ -1510,6 +1612,19 @@
|
||||
_runPipeline();
|
||||
_renderPanel(container);
|
||||
});
|
||||
|
||||
// Reset gesture hints (#1065)
|
||||
var hintsBtn = container.querySelector('[data-cv2-reset-hints]');
|
||||
if (hintsBtn) hintsBtn.addEventListener('click', function () {
|
||||
if (window.GestureHints && typeof window.GestureHints.reset === 'function') {
|
||||
window.GestureHints.reset();
|
||||
} else {
|
||||
// Fallback: clear known keys directly.
|
||||
['row-swipe', 'tab-swipe', 'edge-drawer', 'pull-refresh'].forEach(function (k) {
|
||||
try { localStorage.removeItem('meshcore-gesture-hints-' + k); } catch (_e) {}
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// ── Panel toggle ──
|
||||
@@ -1573,6 +1688,13 @@
|
||||
for (var key in THEME_CSS_MAP) {
|
||||
if (themeSection[key]) root.setProperty(THEME_CSS_MAP[key], themeSection[key]);
|
||||
}
|
||||
// Mirror accent → logo brand vars ONLY when present in overrides (so the
|
||||
// server-default accent never clobbers the sage/teal :root brand defaults).
|
||||
var ovTheme = dark
|
||||
? Object.assign({}, earlyOverrides.theme || {}, earlyOverrides.themeDark || {})
|
||||
: (earlyOverrides.theme || {});
|
||||
if (ovTheme.accent) root.setProperty('--logo-accent', ovTheme.accent);
|
||||
if (ovTheme.accentHover) root.setProperty('--logo-accent-hi', ovTheme.accentHover);
|
||||
if (themeSection.background) root.setProperty('--content-bg', themeSection.contentBg || themeSection.background);
|
||||
if (themeSection.surface1) root.setProperty('--card-bg', themeSection.cardBg || themeSection.surface1);
|
||||
// Apply node/type colors from overrides early
|
||||
@@ -1599,11 +1721,13 @@
|
||||
var overrides = readOverrides();
|
||||
if (overrides.branding) {
|
||||
if (overrides.branding.siteName) {
|
||||
_setBrandAlt(overrides.branding.siteName);
|
||||
var brandEl = document.querySelector('.brand-text');
|
||||
if (brandEl) brandEl.textContent = overrides.branding.siteName;
|
||||
document.title = overrides.branding.siteName;
|
||||
}
|
||||
if (overrides.branding.logoUrl) {
|
||||
_setBrandLogoUrl(overrides.branding.logoUrl, overrides.branding.siteName || null);
|
||||
var iconEl = document.querySelector('.brand-icon');
|
||||
if (iconEl) iconEl.innerHTML = '<img src="' + overrides.branding.logoUrl + '" style="height:24px" onerror="this.style.display=\'none\'">';
|
||||
}
|
||||
|
||||
@@ -7,6 +7,36 @@
|
||||
let originalValues = {};
|
||||
let activeTab = 'branding';
|
||||
|
||||
// ── Brand logo swap helpers (PR #1137) ──
|
||||
// Default brand logo is an inline <svg.brand-logo>; an operator override
|
||||
// (branding.logoUrl) swaps it for an <img.brand-logo>. Going back to empty
|
||||
// restores the inline default on next reload (intermediate state shows the
|
||||
// bundled SVG via <img>). Kept in customize.js for v1 parity.
|
||||
function _v1SetBrandLogoUrl(url) {
|
||||
var node = document.querySelector('.nav-brand .brand-logo');
|
||||
if (!node) return;
|
||||
if (url) {
|
||||
if (node.tagName.toLowerCase() === 'img') { node.setAttribute('src', url); return; }
|
||||
var img = document.createElement('img');
|
||||
img.className = 'brand-logo';
|
||||
img.setAttribute('src', url);
|
||||
img.setAttribute('alt', node.getAttribute('aria-label') || 'Brand');
|
||||
img.setAttribute('width', '111');
|
||||
img.setAttribute('height', '36');
|
||||
node.parentNode.replaceChild(img, node);
|
||||
} else if (node.tagName.toLowerCase() === 'img') {
|
||||
node.setAttribute('src', 'img/corescope-logo.svg');
|
||||
}
|
||||
}
|
||||
function _v1SetBrandAlt(alt) {
|
||||
var node = document.querySelector('.nav-brand .brand-logo');
|
||||
if (!node) return;
|
||||
if (node.tagName.toLowerCase() === 'img') node.setAttribute('alt', alt);
|
||||
else node.setAttribute('aria-label', alt);
|
||||
var brandLink = document.querySelector('.nav-brand');
|
||||
if (brandLink) brandLink.setAttribute('aria-label', alt + ' home');
|
||||
}
|
||||
|
||||
const DEFAULTS = {
|
||||
branding: {
|
||||
siteName: 'CoreScope',
|
||||
@@ -513,6 +543,9 @@
|
||||
for (var key in THEME_CSS_MAP) {
|
||||
if (t[key]) document.documentElement.style.setProperty(THEME_CSS_MAP[key], t[key]);
|
||||
}
|
||||
// Mirror accent → logo brand vars so the wordmark follows the theme.
|
||||
if (t.accent) document.documentElement.style.setProperty('--logo-accent', t.accent);
|
||||
if (t.accentHover) document.documentElement.style.setProperty('--logo-accent-hi', t.accentHover);
|
||||
// Derived vars that reference other vars — need explicit override
|
||||
if (t.background) {
|
||||
document.documentElement.style.setProperty('--content-bg', t.background);
|
||||
@@ -1006,11 +1039,18 @@
|
||||
}
|
||||
// Live DOM updates for branding
|
||||
if (inp.dataset.key === 'branding.siteName') {
|
||||
// Post-rebrand (PR #1137): the navbar brand is an inline <svg>;
|
||||
// mutate aria-label (a11y label on the <svg>/<a>) + document title.
|
||||
// Legacy .brand-text fallback retained for any operator who shipped
|
||||
// a custom build that still uses the text node.
|
||||
_v1SetBrandAlt(inp.value);
|
||||
var brandEl = document.querySelector('.brand-text');
|
||||
if (brandEl) brandEl.textContent = inp.value;
|
||||
document.title = inp.value;
|
||||
}
|
||||
if (inp.dataset.key === 'branding.logoUrl') {
|
||||
// Swap the navbar logo: empty → restore inline default; URL → <img>.
|
||||
_v1SetBrandLogoUrl(inp.value || '');
|
||||
var iconEl = document.querySelector('.brand-icon');
|
||||
if (iconEl) {
|
||||
if (inp.value) { iconEl.innerHTML = '<img src="' + inp.value + '" style="height:24px" onerror="this.style.display=\'none\'">'; }
|
||||
@@ -1410,6 +1450,9 @@
|
||||
for (const [key, val] of Object.entries(themeData)) {
|
||||
if (THEME_CSS_MAP[key]) document.documentElement.style.setProperty(THEME_CSS_MAP[key], val);
|
||||
}
|
||||
// Mirror accent → logo brand vars (matches applyThemePreview()).
|
||||
if (themeData.accent) document.documentElement.style.setProperty('--logo-accent', themeData.accent);
|
||||
if (themeData.accentHover) document.documentElement.style.setProperty('--logo-accent-hi', themeData.accentHover);
|
||||
// Derived vars
|
||||
if (themeData.background) document.documentElement.style.setProperty('--content-bg', themeData.background);
|
||||
if (themeData.surface1) document.documentElement.style.setProperty('--card-bg', themeData.surface1);
|
||||
@@ -1441,11 +1484,13 @@
|
||||
const userTheme = JSON.parse(saved);
|
||||
if (userTheme.branding) {
|
||||
if (userTheme.branding.siteName) {
|
||||
_v1SetBrandAlt(userTheme.branding.siteName);
|
||||
const brandEl = document.querySelector('.brand-text');
|
||||
if (brandEl) brandEl.textContent = userTheme.branding.siteName;
|
||||
document.title = userTheme.branding.siteName;
|
||||
}
|
||||
if (userTheme.branding.logoUrl) {
|
||||
_v1SetBrandLogoUrl(userTheme.branding.logoUrl);
|
||||
const iconEl = document.querySelector('.brand-icon');
|
||||
if (iconEl) iconEl.innerHTML = '<img src="' + userTheme.branding.logoUrl + '" style="height:24px" onerror="this.style.display=\'none\'">';
|
||||
}
|
||||
|
||||
@@ -0,0 +1,450 @@
|
||||
/* filter-ux.js — Wireshark-style filter UX (issue #966)
|
||||
*
|
||||
* Owns:
|
||||
* - Help popover (filter syntax, fields, operators, examples)
|
||||
* - Autocomplete dropdown (field names, operators, type/route values, payload.*)
|
||||
* - Right-click context menu on packet table cells → "Filter by this value"
|
||||
* - Saved-filter dropdown (localStorage, with starter defaults)
|
||||
*
|
||||
* Pure-logic helpers (SavedFilters, buildCellFilterClause, appendClauseToExpr)
|
||||
* are unit-tested in test-packet-filter-ux.js. DOM glue is exercised by
|
||||
* test-filter-ux-e2e.js (Playwright).
|
||||
*/
|
||||
(function() {
|
||||
'use strict';
|
||||
|
||||
var LS_KEY = 'corescope_saved_filters_v1';
|
||||
|
||||
// ── Saved filters store ────────────────────────────────────────────────
|
||||
var DEFAULT_FILTERS = [
|
||||
{ name: 'Adverts only', expr: 'type == ADVERT', builtin: true },
|
||||
{ name: 'Channel traffic', expr: 'type == GRP_TXT', builtin: true },
|
||||
{ name: 'Direct messages', expr: 'type == TXT_MSG', builtin: true },
|
||||
{ name: 'Strong signal (SNR > 5)', expr: 'snr > 5', builtin: true },
|
||||
{ name: 'Multi-hop (hops > 1)', expr: 'hops > 1', builtin: true },
|
||||
{ name: 'Repeater adverts', expr: 'type == ADVERT && payload.flags.repeater == true', builtin: true },
|
||||
{ name: 'Recent (last 5 min)', expr: 'age < 5m', builtin: true },
|
||||
];
|
||||
|
||||
function _getStore() {
|
||||
try {
|
||||
var raw = window.localStorage.getItem(LS_KEY);
|
||||
if (!raw) return [];
|
||||
var parsed = JSON.parse(raw);
|
||||
return Array.isArray(parsed) ? parsed : [];
|
||||
} catch (e) { return []; }
|
||||
}
|
||||
function _setStore(arr) {
|
||||
try { window.localStorage.setItem(LS_KEY, JSON.stringify(arr)); } catch (e) {}
|
||||
}
|
||||
|
||||
var SavedFilters = {
|
||||
defaults: function() { return DEFAULT_FILTERS.slice(); },
|
||||
list: function() {
|
||||
// Defaults first, then user filters (deduped by name — user wins on collision)
|
||||
var user = _getStore();
|
||||
var userNames = {};
|
||||
for (var i = 0; i < user.length; i++) userNames[user[i].name] = true;
|
||||
var defaults = DEFAULT_FILTERS.filter(function(d) { return !userNames[d.name]; });
|
||||
return defaults.concat(user);
|
||||
},
|
||||
save: function(name, expr) {
|
||||
if (!name || !expr) return;
|
||||
var user = _getStore();
|
||||
var idx = -1;
|
||||
for (var i = 0; i < user.length; i++) { if (user[i].name === name) { idx = i; break; } }
|
||||
var entry = { name: name, expr: expr, ts: Date.now() };
|
||||
if (idx >= 0) user[idx] = entry; else user.push(entry);
|
||||
_setStore(user);
|
||||
},
|
||||
delete: function(name) {
|
||||
var user = _getStore();
|
||||
_setStore(user.filter(function(f) { return f.name !== name; }));
|
||||
},
|
||||
};
|
||||
|
||||
// ── Right-click filter clause builders ─────────────────────────────────
|
||||
// Numeric strings stay unquoted; identifiers from TYPE_VALUES/ROUTE_VALUES
|
||||
// stay unquoted; everything else gets double-quoted.
|
||||
function _isNumericString(s) {
|
||||
if (typeof s !== 'string') return false;
|
||||
return /^-?\d+(\.\d+)?$/.test(s.trim());
|
||||
}
|
||||
function _isBareIdentifier(s) {
|
||||
return typeof s === 'string' && /^[A-Z_][A-Z0-9_]*$/.test(s);
|
||||
}
|
||||
function buildCellFilterClause(field, value, op) {
|
||||
op = op || '==';
|
||||
if (value == null) value = '';
|
||||
var v = String(value);
|
||||
var rendered;
|
||||
if (op === 'contains' || op === 'starts_with' || op === 'ends_with') {
|
||||
// String-only ops: always quote
|
||||
rendered = '"' + v.replace(/"/g, '\\"') + '"';
|
||||
} else if (_isNumericString(v)) {
|
||||
rendered = v;
|
||||
} else if (_isBareIdentifier(v)) {
|
||||
rendered = v;
|
||||
} else {
|
||||
rendered = '"' + v.replace(/"/g, '\\"') + '"';
|
||||
}
|
||||
return field + ' ' + op + ' ' + rendered;
|
||||
}
|
||||
function appendClauseToExpr(expr, clause) {
|
||||
if (!expr || !expr.trim()) return clause;
|
||||
return expr.trim() + ' && ' + clause;
|
||||
}
|
||||
|
||||
// ── DOM glue (only runs in browser, after init()) ──────────────────────
|
||||
var _ctxMenu = null;
|
||||
|
||||
function _h(tag, attrs, html) {
|
||||
var el = document.createElement(tag);
|
||||
if (attrs) for (var k in attrs) {
|
||||
if (k === 'class') el.className = attrs[k];
|
||||
else if (k === 'style') el.setAttribute('style', attrs[k]);
|
||||
else if (k.indexOf('data-') === 0) el.setAttribute(k, attrs[k]);
|
||||
else el[k] = attrs[k];
|
||||
}
|
||||
if (html != null) el.innerHTML = html;
|
||||
return el;
|
||||
}
|
||||
function _esc(s) {
|
||||
return String(s == null ? '' : s).replace(/[&<>"']/g, function(c) {
|
||||
return { '&': '&', '<': '<', '>': '>', '"': '"', "'": ''' }[c];
|
||||
});
|
||||
}
|
||||
|
||||
function _buildHelpHtml() {
|
||||
var PF = window.PacketFilter;
|
||||
var rows = (PF.FIELDS || []).map(function(f) {
|
||||
return '<tr><td class="fux-mono">' + _esc(f.name) + '</td><td>' + _esc(f.desc) + '</td></tr>';
|
||||
}).join('');
|
||||
var ops = (PF.OPERATORS || []).map(function(o) {
|
||||
return '<tr><td class="fux-mono">' + _esc(o.op) + '</td><td>' + _esc(o.desc) +
|
||||
'</td><td class="fux-mono">' + _esc(o.example) + '</td></tr>';
|
||||
}).join('');
|
||||
var examples = [
|
||||
'type == ADVERT',
|
||||
'type == GRP_TXT && size > 50',
|
||||
'payload.name contains "Gilroy"',
|
||||
'payload.flags.repeater == true',
|
||||
'snr > 5 && rssi > -90',
|
||||
'hops < 2',
|
||||
'observer == "Dorrington" && type == ADVERT',
|
||||
'(type == ADVERT || type == ACK) && snr > 0',
|
||||
'age < 1h',
|
||||
'time after "2025-01-01"',
|
||||
].map(function(e) { return '<li class="fux-mono">' + _esc(e) + '</li>'; }).join('');
|
||||
return [
|
||||
// NOTE(#1122): "Filter syntax" heading is provided by the popover header;
|
||||
// do NOT repeat it here or the panel renders the label twice.
|
||||
'<p>Wireshark-style boolean expressions over packet fields. Combine with <code>&&</code>, <code>||</code>, <code>!</code>, and parentheses. Strings are case-insensitive. Tip: append <code>?filter=…</code> to the URL to share a filter.</p>',
|
||||
'<h4>Fields</h4>',
|
||||
'<table class="fux-table"><thead><tr><th>Name</th><th>Description</th></tr></thead><tbody>' + rows + '</tbody></table>',
|
||||
'<h4>Operators</h4>',
|
||||
'<table class="fux-table"><thead><tr><th>Op</th><th>Meaning</th><th>Example</th></tr></thead><tbody>' + ops + '</tbody></table>',
|
||||
'<h4>Examples</h4>',
|
||||
'<ul class="fux-examples">' + examples + '</ul>',
|
||||
'<h4>Tips</h4>',
|
||||
'<ul>',
|
||||
'<li>Right-click any cell in the packet table to add a clause for that value.</li>',
|
||||
'<li>Type a partial field name to autocomplete; Tab/Enter accepts, Esc dismisses.</li>',
|
||||
'<li>Save commonly-used expressions via the ★ Save button — they appear in the Saved dropdown.</li>',
|
||||
'</ul>',
|
||||
].join('');
|
||||
}
|
||||
|
||||
function _showHelp() {
|
||||
var existing = document.getElementById('filterHelpPopover');
|
||||
if (existing) {
|
||||
// Toggle: also remove the backdrop wrapper if present
|
||||
var wrap = existing.closest('.modal-overlay');
|
||||
(wrap || existing).remove();
|
||||
return;
|
||||
}
|
||||
// #1122: Render as a real centered modal inside .modal-overlay so the
|
||||
// help panel never floats over the packet table rows.
|
||||
var overlay = _h('div', { class: 'modal-overlay fux-help-overlay', role: 'presentation' });
|
||||
var pop = _h('div', { id: 'filterHelpPopover', class: 'modal fux-popover', role: 'dialog', 'aria-modal': 'true', 'aria-label': 'Filter syntax help' });
|
||||
pop.innerHTML =
|
||||
'<div class="fux-popover-header"><strong>Filter syntax</strong>' +
|
||||
'<button type="button" class="fux-popover-close" aria-label="Close">✕</button></div>' +
|
||||
'<div class="fux-popover-body">' + _buildHelpHtml() + '</div>';
|
||||
overlay.appendChild(pop);
|
||||
document.body.appendChild(overlay);
|
||||
// #1124 (MAJOR-2): focus management. Save the trigger so we can restore
|
||||
// focus on close, then move focus to the close button. Trap Tab cycles
|
||||
// inside the modal until it closes.
|
||||
var trigger = document.activeElement;
|
||||
var closeBtn = pop.querySelector('.fux-popover-close');
|
||||
function _focusables() {
|
||||
return Array.prototype.slice.call(pop.querySelectorAll(
|
||||
'a[href], button:not([disabled]), input:not([disabled]), select:not([disabled]), textarea:not([disabled]), [tabindex]:not([tabindex="-1"])'
|
||||
));
|
||||
}
|
||||
function close() {
|
||||
overlay.remove();
|
||||
document.removeEventListener('keydown', onKey);
|
||||
// Restore focus to the original trigger if still in the DOM.
|
||||
if (trigger && typeof trigger.focus === 'function' && document.body.contains(trigger)) {
|
||||
try { trigger.focus(); } catch (e) {}
|
||||
}
|
||||
}
|
||||
function onKey(ev) {
|
||||
if (ev.key === 'Escape') { close(); return; }
|
||||
if (ev.key !== 'Tab') return;
|
||||
var f = _focusables();
|
||||
if (!f.length) { ev.preventDefault(); return; }
|
||||
var first = f[0], last = f[f.length - 1];
|
||||
var active = document.activeElement;
|
||||
if (ev.shiftKey) {
|
||||
if (active === first || !pop.contains(active)) { last.focus(); ev.preventDefault(); }
|
||||
} else {
|
||||
if (active === last || !pop.contains(active)) { first.focus(); ev.preventDefault(); }
|
||||
}
|
||||
}
|
||||
closeBtn.addEventListener('click', close);
|
||||
overlay.addEventListener('click', function(ev) {
|
||||
// Click on backdrop (not inside the modal) closes
|
||||
if (ev.target === overlay) close();
|
||||
});
|
||||
document.addEventListener('keydown', onKey);
|
||||
// Move focus to the close button (first interactive element).
|
||||
try { closeBtn.focus(); } catch (e) {}
|
||||
}
|
||||
|
||||
// ── Autocomplete ───────────────────────────────────────────────────────
|
||||
function _wireAutocomplete(input) {
|
||||
var dd = _h('div', { id: 'filterAcDropdown', class: 'fux-ac-dropdown', role: 'listbox' });
|
||||
dd.style.display = 'none';
|
||||
input.parentNode.appendChild(dd);
|
||||
var sel = -1, items = [];
|
||||
|
||||
function _gatherPayloadKeys() {
|
||||
// Best-effort: scan the first ~50 visible packets for decoded_json keys
|
||||
var keys = {};
|
||||
try {
|
||||
var rows = document.querySelectorAll('#pktTable tbody tr');
|
||||
for (var r = 0; r < rows.length && r < 50; r++) {
|
||||
var dj = rows[r].getAttribute('data-decoded');
|
||||
if (!dj) continue;
|
||||
var obj = JSON.parse(dj);
|
||||
for (var k in obj) keys[k] = true;
|
||||
}
|
||||
} catch (e) {}
|
||||
return Object.keys(keys);
|
||||
}
|
||||
|
||||
function close() { dd.style.display = 'none'; sel = -1; items = []; input.removeAttribute('aria-activedescendant'); }
|
||||
function render() {
|
||||
if (!items.length) { close(); return; }
|
||||
dd.innerHTML = items.map(function(it, i) {
|
||||
return '<div class="fux-ac-item' + (i === sel ? ' active' : '') + '" id="fux-ac-' + i +
|
||||
'" role="option" data-idx="' + i + '">' +
|
||||
'<span class="fux-ac-val">' + _esc(it.value) + '</span>' +
|
||||
(it.desc ? '<span class="fux-ac-desc">' + _esc(it.desc) + '</span>' : '') +
|
||||
'</div>';
|
||||
}).join('');
|
||||
dd.style.display = 'block';
|
||||
if (sel >= 0) input.setAttribute('aria-activedescendant', 'fux-ac-' + sel);
|
||||
}
|
||||
function accept(idx) {
|
||||
if (!items[idx]) return;
|
||||
var rs = items._replaceStart, re = items._replaceEnd;
|
||||
var val = items[idx].value;
|
||||
var v = input.value;
|
||||
var newVal = v.slice(0, rs) + val + v.slice(re);
|
||||
var caret = rs + val.length;
|
||||
// Append space + helpful next char for fields (so user can type op)
|
||||
if (items[idx].kind === 'field') { newVal = newVal.slice(0, caret) + ' ' + newVal.slice(caret); caret++; }
|
||||
input.value = newVal;
|
||||
input.setSelectionRange(caret, caret);
|
||||
close();
|
||||
// Trigger filter recompile
|
||||
input.dispatchEvent(new Event('input', { bubbles: true }));
|
||||
}
|
||||
|
||||
function refresh() {
|
||||
var PF = window.PacketFilter;
|
||||
if (!PF || !PF.suggest) return close();
|
||||
var r = PF.suggest(input.value, input.selectionStart || 0, { payloadKeys: _gatherPayloadKeys() });
|
||||
items = (r && r.suggestions) ? r.suggestions.slice(0, 12) : [];
|
||||
items._replaceStart = r ? r.replaceStart : 0;
|
||||
items._replaceEnd = r ? r.replaceEnd : 0;
|
||||
sel = items.length ? 0 : -1;
|
||||
render();
|
||||
}
|
||||
input.addEventListener('input', refresh);
|
||||
input.addEventListener('focus', refresh);
|
||||
input.addEventListener('blur', function() { setTimeout(close, 150); });
|
||||
input.addEventListener('keydown', function(ev) {
|
||||
if (dd.style.display === 'none') return;
|
||||
if (ev.key === 'ArrowDown') { sel = (sel + 1) % items.length; render(); ev.preventDefault(); }
|
||||
else if (ev.key === 'ArrowUp') { sel = (sel - 1 + items.length) % items.length; render(); ev.preventDefault(); }
|
||||
else if (ev.key === 'Tab' || ev.key === 'Enter') {
|
||||
if (sel >= 0) { accept(sel); ev.preventDefault(); }
|
||||
} else if (ev.key === 'Escape') { close(); ev.preventDefault(); }
|
||||
});
|
||||
dd.addEventListener('mousedown', function(ev) {
|
||||
var target = ev.target.closest('.fux-ac-item');
|
||||
if (!target) return;
|
||||
ev.preventDefault();
|
||||
accept(parseInt(target.getAttribute('data-idx'), 10));
|
||||
});
|
||||
}
|
||||
|
||||
// ── Right-click context menu ───────────────────────────────────────────
|
||||
function _showContextMenu(x, y, field, value) {
|
||||
if (_ctxMenu) { _ctxMenu.remove(); _ctxMenu = null; }
|
||||
var input = document.getElementById('packetFilterInput');
|
||||
if (!input) return;
|
||||
var menu = _h('div', { id: 'filterContextMenu', class: 'fux-ctx-menu', role: 'menu' });
|
||||
var ops = [
|
||||
{ label: 'Filter ' + field + ' == "' + value + '"', op: '==' },
|
||||
{ label: 'Filter ' + field + ' != "' + value + '"', op: '!=' },
|
||||
{ label: 'Filter ' + field + ' contains "' + value + '"', op: 'contains' },
|
||||
];
|
||||
menu.innerHTML = ops.map(function(o, i) {
|
||||
return '<button type="button" class="fux-ctx-item" data-idx="' + i + '" role="menuitem">' + _esc(o.label) + '</button>';
|
||||
}).join('');
|
||||
menu.style.left = x + 'px';
|
||||
menu.style.top = y + 'px';
|
||||
document.body.appendChild(menu);
|
||||
_ctxMenu = menu;
|
||||
menu.addEventListener('click', function(ev) {
|
||||
var btn = ev.target.closest('.fux-ctx-item');
|
||||
if (!btn) return;
|
||||
var op = ops[parseInt(btn.getAttribute('data-idx'), 10)].op;
|
||||
var clause = buildCellFilterClause(field, value, op);
|
||||
input.value = appendClauseToExpr(input.value, clause);
|
||||
input.dispatchEvent(new Event('input', { bubbles: true }));
|
||||
menu.remove(); _ctxMenu = null;
|
||||
});
|
||||
function dismiss(ev) {
|
||||
if (_ctxMenu && !_ctxMenu.contains(ev.target)) { _ctxMenu.remove(); _ctxMenu = null;
|
||||
document.removeEventListener('mousedown', dismiss);
|
||||
document.removeEventListener('keydown', escDismiss);
|
||||
}
|
||||
}
|
||||
function escDismiss(ev) { if (ev.key === 'Escape') dismiss({ target: document.body }); }
|
||||
setTimeout(function() {
|
||||
document.addEventListener('mousedown', dismiss);
|
||||
document.addEventListener('keydown', escDismiss);
|
||||
}, 0);
|
||||
}
|
||||
|
||||
function _wireContextMenu() {
|
||||
// Delegated listener on the table — extracts field+value from data-* attrs.
|
||||
var tbl = document.getElementById('pktTable');
|
||||
if (!tbl) return;
|
||||
tbl.addEventListener('contextmenu', function(ev) {
|
||||
var cell = ev.target.closest('td[data-filter-field]');
|
||||
if (!cell) return;
|
||||
var field = cell.getAttribute('data-filter-field');
|
||||
var value = cell.getAttribute('data-filter-value');
|
||||
if (!field || value == null || value === '') return;
|
||||
ev.preventDefault();
|
||||
_showContextMenu(ev.pageX, ev.pageY, field, value);
|
||||
});
|
||||
}
|
||||
|
||||
// ── Saved filters dropdown ─────────────────────────────────────────────
|
||||
function _renderSavedDropdown(container, input) {
|
||||
var btn = _h('button', { type: 'button', class: 'fux-saved-trigger', id: 'filterSavedTrigger', title: 'Saved filters' }, '★ Saved ▾');
|
||||
var menu = _h('div', { class: 'fux-saved-menu hidden', id: 'filterSavedMenu', role: 'menu' });
|
||||
container.appendChild(btn);
|
||||
container.appendChild(menu);
|
||||
|
||||
function build() {
|
||||
var list = SavedFilters.list();
|
||||
var rows = list.map(function(f, i) {
|
||||
var del = f.builtin ? '' :
|
||||
'<button type="button" class="fux-saved-del" data-name="' + _esc(f.name) + '" title="Delete">✕</button>';
|
||||
return '<div class="fux-saved-item" data-idx="' + i + '">' +
|
||||
'<span class="fux-saved-name">' + _esc(f.name) + '</span>' +
|
||||
'<span class="fux-saved-expr fux-mono">' + _esc(f.expr) + '</span>' +
|
||||
del + '</div>';
|
||||
}).join('');
|
||||
menu.innerHTML =
|
||||
'<div class="fux-saved-header">Saved filters</div>' +
|
||||
rows +
|
||||
'<div class="fux-saved-footer">' +
|
||||
'<button type="button" id="filterSaveCurrent" class="fux-saved-save">+ Save current expression</button>' +
|
||||
'</div>';
|
||||
}
|
||||
|
||||
btn.addEventListener('click', function(ev) {
|
||||
ev.stopPropagation();
|
||||
build();
|
||||
menu.classList.toggle('hidden');
|
||||
});
|
||||
document.addEventListener('click', function(ev) {
|
||||
if (!menu.contains(ev.target) && ev.target !== btn) menu.classList.add('hidden');
|
||||
});
|
||||
menu.addEventListener('click', function(ev) {
|
||||
var del = ev.target.closest('.fux-saved-del');
|
||||
if (del) {
|
||||
SavedFilters.delete(del.getAttribute('data-name'));
|
||||
build();
|
||||
ev.stopPropagation();
|
||||
return;
|
||||
}
|
||||
if (ev.target.id === 'filterSaveCurrent') {
|
||||
var expr = (input.value || '').trim();
|
||||
if (!expr) { alert('Type a filter expression first.'); return; }
|
||||
var name = prompt('Name this filter:', '');
|
||||
if (name && name.trim()) {
|
||||
SavedFilters.save(name.trim(), expr);
|
||||
build();
|
||||
}
|
||||
return;
|
||||
}
|
||||
var item = ev.target.closest('.fux-saved-item');
|
||||
if (item) {
|
||||
var list = SavedFilters.list();
|
||||
var f = list[parseInt(item.getAttribute('data-idx'), 10)];
|
||||
if (f) {
|
||||
input.value = f.expr;
|
||||
input.dispatchEvent(new Event('input', { bubbles: true }));
|
||||
menu.classList.add('hidden');
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// ── Init: idempotent, called by packets.js after filter input renders ──
|
||||
function init() {
|
||||
var input = document.getElementById('packetFilterInput');
|
||||
if (!input || input.dataset.fuxInit === '1') return;
|
||||
input.dataset.fuxInit = '1';
|
||||
|
||||
// Help icon + saved-filters dropdown — injected next to the input
|
||||
var wrap = input.parentNode;
|
||||
if (wrap) {
|
||||
var bar = document.getElementById('filterUxBar');
|
||||
if (!bar) {
|
||||
bar = _h('div', { id: 'filterUxBar', class: 'fux-bar' });
|
||||
var helpBtn = _h('button', { type: 'button', class: 'fux-help-btn', id: 'filterHelpBtn',
|
||||
'aria-label': 'Filter syntax help', title: 'Filter syntax help' }, 'ⓘ Help');
|
||||
helpBtn.addEventListener('click', _showHelp);
|
||||
bar.appendChild(helpBtn);
|
||||
_renderSavedDropdown(bar, input);
|
||||
wrap.appendChild(bar);
|
||||
}
|
||||
}
|
||||
|
||||
_wireAutocomplete(input);
|
||||
_wireContextMenu();
|
||||
}
|
||||
|
||||
var _exports = {
|
||||
SavedFilters: SavedFilters,
|
||||
buildCellFilterClause: buildCellFilterClause,
|
||||
appendClauseToExpr: appendClauseToExpr,
|
||||
init: init,
|
||||
_showHelp: _showHelp, // exposed for E2E
|
||||
};
|
||||
if (typeof window !== 'undefined') window.FilterUX = _exports;
|
||||
if (typeof module !== 'undefined' && module.exports) module.exports = _exports;
|
||||
})();
|
||||
Binary file not shown.
@@ -26,6 +26,12 @@
|
||||
#btnCopy { padding: 6px 14px; background: #1a4a7a; color: #7ec8e3; border-radius: 6px; border: none; cursor: pointer; font-size: 0.85rem; white-space: nowrap; align-self: flex-end; }
|
||||
#btnCopy:hover { background: #2a6aaa; }
|
||||
#btnCopy.copied { background: #1a6a3a; color: #7effa0; }
|
||||
#btnSaveDraft { background: #1a5a3a; color: #7effa0; }
|
||||
#btnSaveDraft:hover { background: #2a7a4a; }
|
||||
#btnLoadDraft { background: #3a3a1a; color: #ffe07e; }
|
||||
#btnLoadDraft:hover { background: #5a5a2a; }
|
||||
#btnDownload { background: #1a4a7a; color: #7ec8e3; }
|
||||
#btnDownload:hover { background: #2a6aaa; }
|
||||
#counter { font-size: 0.8rem; color: #888; padding-top: 6px; white-space: nowrap; }
|
||||
.bufferRow { display: flex; align-items: center; gap: 8px; }
|
||||
.bufferRow label { font-size: 0.85rem; color: #aaa; }
|
||||
@@ -45,6 +51,8 @@
|
||||
<div class="controls">
|
||||
<button id="btnUndo">↩ Undo</button>
|
||||
<button id="btnClear">✕ Clear</button>
|
||||
<button id="btnSaveDraft">💾 Save Draft</button>
|
||||
<button id="btnLoadDraft">📂 Load Draft</button>
|
||||
</div>
|
||||
<div class="bufferRow">
|
||||
<label for="bufferKm">Buffer km:</label>
|
||||
@@ -63,16 +71,18 @@
|
||||
<div style="display:flex;flex-direction:column;gap:8px;align-items:flex-end">
|
||||
<span id="counter">0 points</span>
|
||||
<button id="btnCopy">Copy</button>
|
||||
<button id="btnDownload">⬇ Download</button>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Instructions: paste the output into config.json as a top-level "geo_filter" key, then restart the server -->
|
||||
<div id="help-bar">
|
||||
Copy the JSON above → paste as a top-level key in <code>config.json</code> → restart the server.
|
||||
<strong>Save Draft</strong> preserves your polygon across sessions. <strong>Download</strong> exports a JSON snippet → paste as a top-level key in <code>config.json</code> → restart the server.
|
||||
Nodes with no GPS fix always pass through. Remove the <code>geo_filter</code> block to disable filtering.
|
||||
· <a href="/geofilter-docs.html">Documentation</a>
|
||||
</div>
|
||||
|
||||
<script src="geofilter-draft.js"></script>
|
||||
<script>
|
||||
const map = L.map('map').setView([50.5, 4.4], 8);
|
||||
|
||||
@@ -166,6 +176,40 @@ document.getElementById('btnCopy').addEventListener('click', function() {
|
||||
setTimeout(() => { btn.textContent = 'Copy'; btn.classList.remove('copied'); }, 2000);
|
||||
});
|
||||
});
|
||||
|
||||
document.getElementById('btnSaveDraft').addEventListener('click', function() {
|
||||
if (points.length < 3) return;
|
||||
const bufferKm = parseFloat(document.getElementById('bufferKm').value) || 0;
|
||||
GeofilterDraft.saveDraft(points, bufferKm);
|
||||
const btn = document.getElementById('btnSaveDraft');
|
||||
btn.textContent = '✓ Saved';
|
||||
setTimeout(() => { btn.textContent = '💾 Save Draft'; }, 2000);
|
||||
});
|
||||
|
||||
document.getElementById('btnLoadDraft').addEventListener('click', function() {
|
||||
const draft = GeofilterDraft.loadDraft();
|
||||
if (!draft || !draft.polygon || draft.polygon.length < 3) return;
|
||||
// Clear current
|
||||
markers.forEach(m => map.removeLayer(m));
|
||||
markers = [];
|
||||
points = draft.polygon.slice();
|
||||
if (draft.bufferKm != null) document.getElementById('bufferKm').value = draft.bufferKm;
|
||||
// Recreate markers
|
||||
points.forEach(function(pt, i) {
|
||||
const marker = L.circleMarker([pt[0], pt[1]], {
|
||||
radius: 6, color: '#4a9eff', weight: 2, fillColor: '#4a9eff', fillOpacity: 0.9
|
||||
}).addTo(map).bindTooltip(String(i + 1), { permanent: true, direction: 'top', offset: [0, -8], className: 'pt-label' });
|
||||
markers.push(marker);
|
||||
});
|
||||
render();
|
||||
map.fitBounds(L.polygon(points).getBounds().pad(0.2));
|
||||
});
|
||||
|
||||
document.getElementById('btnDownload').addEventListener('click', function() {
|
||||
if (points.length < 3) return;
|
||||
const bufferKm = parseFloat(document.getElementById('bufferKm').value) || 0;
|
||||
GeofilterDraft.downloadConfig(points, bufferKm);
|
||||
});
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
|
||||
@@ -69,6 +69,16 @@
|
||||
<p>Both the server and the ingestor read <code>geo_filter</code> from <code>config.json</code>. Restart both after changing this section.</p>
|
||||
<p>To disable filtering entirely, remove the <code>geo_filter</code> block.</p>
|
||||
|
||||
<h2>Builder workflow: Save Draft, Load Draft, Download</h2>
|
||||
<p>The <a href="/geofilter-builder.html">GeoFilter Builder</a> lets you draw a polygon on a map and produce the <code>geo_filter</code> snippet without hand-editing JSON. Three buttons drive the workflow:</p>
|
||||
<ul>
|
||||
<li><strong>💾 Save Draft</strong> — writes the current polygon and <code>bufferKm</code> to your browser's <code>localStorage</code> under the key <code>geofilter-draft</code>. Drafts persist across page reloads and browser restarts so you can iterate on a shape over multiple sessions.</li>
|
||||
<li><strong>📂 Load Draft</strong> — restores the most recently saved draft into the builder. The current polygon is replaced. If no draft exists the button is a no-op.</li>
|
||||
<li><strong>⬇ Download</strong> — exports the current polygon and <code>bufferKm</code> as <code>geofilter-config-snippet.json</code> — a single JSON object containing a top-level <code>geo_filter</code> block. Open the file, copy the <code>geo_filter</code> entry, and paste it into your <code>config.json</code>.</li>
|
||||
</ul>
|
||||
<div class="note"><p>Drafts are stored locally in your browser only — they are not uploaded anywhere. Clearing site data or switching browsers will lose the draft. Use <strong>Download</strong> to keep a portable copy.</p></div>
|
||||
<p>After pasting the snippet into <code>config.json</code>, restart the server and ingestor for the new filter to take effect.</p>
|
||||
|
||||
<h2>Coordinate ordering</h2>
|
||||
<div class="warn"><p><strong>Important:</strong> Coordinates are <code>[lat, lon]</code> — latitude first, longitude second. This is the opposite of GeoJSON, which uses <code>[lon, lat]</code>. Swapping them will place your polygon in the wrong location.</p></div>
|
||||
|
||||
|
||||
@@ -0,0 +1,46 @@
|
||||
// Geofilter draft save/load/download helpers.
|
||||
// Exposes GeofilterDraft global with: saveDraft, loadDraft, clearDraft, buildConfigSnippet, downloadConfig
|
||||
(function () {
|
||||
'use strict';
|
||||
var STORAGE_KEY = 'geofilter-draft';
|
||||
|
||||
function saveDraft(polygon, bufferKm) {
|
||||
localStorage.setItem(STORAGE_KEY, JSON.stringify({ polygon: polygon, bufferKm: bufferKm }));
|
||||
}
|
||||
|
||||
function loadDraft() {
|
||||
var raw = localStorage.getItem(STORAGE_KEY);
|
||||
if (!raw) return null;
|
||||
try { return JSON.parse(raw); } catch (e) { return null; }
|
||||
}
|
||||
|
||||
function clearDraft() {
|
||||
localStorage.removeItem(STORAGE_KEY);
|
||||
}
|
||||
|
||||
function buildConfigSnippet(polygon, bufferKm) {
|
||||
return JSON.stringify({ geo_filter: { bufferKm: bufferKm, polygon: polygon } }, null, 2);
|
||||
}
|
||||
|
||||
function downloadConfig(polygon, bufferKm) {
|
||||
var snippet = buildConfigSnippet(polygon, bufferKm);
|
||||
var blob = new Blob([snippet], { type: 'application/json' });
|
||||
var url = URL.createObjectURL(blob);
|
||||
var a = document.createElement('a');
|
||||
a.href = url;
|
||||
a.download = 'geofilter-config-snippet.json';
|
||||
document.body.appendChild(a);
|
||||
a.click();
|
||||
document.body.removeChild(a);
|
||||
URL.revokeObjectURL(url);
|
||||
}
|
||||
|
||||
// Export
|
||||
(typeof window !== 'undefined' ? window : this).GeofilterDraft = {
|
||||
saveDraft: saveDraft,
|
||||
loadDraft: loadDraft,
|
||||
clearDraft: clearDraft,
|
||||
buildConfigSnippet: buildConfigSnippet,
|
||||
downloadConfig: downloadConfig
|
||||
};
|
||||
})();
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user