fix: use latest advert for node hash size instead of historical mode (#341)

## Summary

Fixes #303 — Repeater hash stats now reflect the **latest advert**
instead of the historical mode (most frequent).

When a node is reconfigured (e.g. from 1-byte to 2-byte hash size), the
analytics and node detail pages now show the updated value immediately
after the next advert is received.

## Changes

### cmd/server/store.go

1. **computeNodeHashSizeInfo** — Changed hash size determination from
statistical mode to latest advert. The most recent advert in
chronological order now determines hash_size. The hash_sizes_seen and
hash_size_inconsistent tracking is preserved for multi-byte analytics.

2. **computeAnalyticsHashSizes** — Two fixes:
- **yNode keyed by pubKey** instead of name, so same-name nodes with
different public keys are counted separately in distributionByRepeaters.
- **Zero-hop adverts included** — advert originator tracking now happens
before the hops check, so zero-hop adverts contribute to per-node stats.

### cmd/server/routes_test.go

Added 4 new tests:
- TestGetNodeHashSizeInfoLatestWins — 4 historical 1-byte adverts + 1
recent 2-byte advert → hash size should be 2 (not 1 from mode)
- TestGetNodeHashSizeInfoNoAdverts — node with no ADVERT packets →
graceful nil, no crash
- TestAnalyticsHashSizeSameNameDifferentPubkey — two nodes named
"SameName" with different pubkeys → counted as 2 separate entries
- Updated TestGetNodeHashSizeInfoDominant comment to reflect new
behavior

## Context

Community report from contributor @kizniche: after reconfiguring a
repeater from 1-byte to 2-byte hash and sending a flood advert, the
analytics page still showed 1-byte. Root cause was the mode-based
computation which required many new adverts to shift the majority. The
upstream firmware bug causing stale path bytes
(meshcore-dev/MeshCore#2154) has been fixed, making the latest advert
reliable.

## Testing

- `go vet ./...` — clean
- `go test ./... -count=1` — all tests pass (including 4 new ones)
- `cmd/ingestor` tests — pass

---------

Co-authored-by: Kpa-clawbot <259247574+Kpa-clawbot@users.noreply.github.com>
Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com>
This commit is contained in:
Kpa-clawbot
2026-03-31 22:26:32 -07:00
committed by GitHub
parent 81bf3b4b12
commit 4cdc554b40
2 changed files with 239 additions and 64 deletions

View File

@@ -2086,8 +2086,8 @@ t.Error("expected inconsistent flag to be true for flip-flop pattern")
}
func TestGetNodeHashSizeInfoDominant(t *testing.T) {
// A node that sends mostly 2-byte adverts but occasionally 1-byte (pathByte=0x00
// on direct sends) should report HashSize=2, not 1.
// A node with mostly 2-byte adverts and an occasional 1-byte advert; the
// latest advert (2-byte) determines the reported hash size.
db := setupTestDB(t)
seedTestData(t, db)
store := NewPacketStore(db, nil)
@@ -2103,7 +2103,7 @@ raw1byte := "04" + "00" + "aabb" // pathByte=0x00 → hashSize=1 (direct send, n
raw2byte := "04" + "40" + "aabb" // pathByte=0x40 → hashSize=2
payloadType := 4
// 1 packet with hashSize=1, 4 packets with hashSize=2
// 1 packet with hashSize=1, 4 packets with hashSize=2 (latest is 2-byte)
raws := []string{raw1byte, raw2byte, raw2byte, raw2byte, raw2byte}
for i, raw := range raws {
tx := &StoreTx{
@@ -2124,10 +2124,194 @@ if ni == nil {
t.Fatal("expected hash info for test node")
}
if ni.HashSize != 2 {
t.Errorf("HashSize=%d, want 2 (dominant size should win over occasional 1-byte)", ni.HashSize)
t.Errorf("HashSize=%d, want 2 (latest advert should determine hash size)", ni.HashSize)
}
}
func TestGetNodeHashSizeInfoLatestWins(t *testing.T) {
// A node reconfigured from 1-byte to 2-byte hash should show 2-byte
// even when it has many more historical 1-byte adverts (issue #303).
db := setupTestDB(t)
seedTestData(t, db)
store := NewPacketStore(db, nil)
if err := store.Load(); err != nil {
t.Fatalf("store.Load failed: %v", err)
}
pk := "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc"
db.conn.Exec("INSERT OR IGNORE INTO nodes (public_key, name, role) VALUES (?, 'LatestWins', 'repeater')", pk)
decoded := `{"name":"LatestWins","pubKey":"` + pk + `"}`
raw1byte := "04" + "00" + "aabb" // pathByte=0x00 → hashSize=1
raw2byte := "04" + "40" + "aabb" // pathByte=0x40 → hashSize=2
payloadType := 4
// 4 historical 1-byte adverts, then 1 recent 2-byte advert (latest).
// Mode would pick 1 (majority), but latest-wins should pick 2.
raws := []string{raw1byte, raw1byte, raw1byte, raw1byte, raw2byte}
for i, raw := range raws {
tx := &StoreTx{
ID: 7000 + i,
RawHex: raw,
Hash: "latest" + strconv.Itoa(i),
FirstSeen: "2024-01-01T0" + strconv.Itoa(i) + ":00:00Z",
PayloadType: &payloadType,
DecodedJSON: decoded,
}
store.packets = append(store.packets, tx)
store.byPayloadType[4] = append(store.byPayloadType[4], tx)
}
info := store.GetNodeHashSizeInfo()
ni := info[pk]
if ni == nil {
t.Fatal("expected hash info for test node")
}
if ni.HashSize != 2 {
t.Errorf("HashSize=%d, want 2 (latest advert should win over historical mode)", ni.HashSize)
}
if len(ni.AllSizes) != 2 {
t.Errorf("AllSizes count=%d, want 2", len(ni.AllSizes))
}
if !ni.AllSizes[1] || !ni.AllSizes[2] {
t.Error("AllSizes should contain both 1 and 2")
}
}
func TestGetNodeHashSizeInfoNoAdverts(t *testing.T) {
// A node with no ADVERT packets should not appear in hash size info.
db := setupTestDB(t)
seedTestData(t, db)
store := NewPacketStore(db, nil)
if err := store.Load(); err != nil {
t.Fatalf("store.Load failed: %v", err)
}
pk := "dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd"
db.conn.Exec("INSERT OR IGNORE INTO nodes (public_key, name, role) VALUES (?, 'NoAdverts', 'repeater')", pk)
// Add a non-advert packet (payload_type=2 = TXT_MSG)
payloadType := 2
tx := &StoreTx{
ID: 6000,
RawHex: "0440aabb",
Hash: "noadverts0",
FirstSeen: "2024-01-01T00:00:00Z",
PayloadType: &payloadType,
DecodedJSON: `{"pubKey":"` + pk + `"}`,
}
store.packets = append(store.packets, tx)
store.byPayloadType[2] = append(store.byPayloadType[2], tx)
info := store.GetNodeHashSizeInfo()
if ni := info[pk]; ni != nil {
t.Errorf("expected nil hash info for node with no adverts, got HashSize=%d", ni.HashSize)
}
}
func TestHashAnalyticsZeroHopAdvert(t *testing.T) {
// A zero-hop advert (pathByte=0x00, no relay path) should contribute to
// distributionByRepeaters (per-node tracking) but NOT inflate total or
// distribution (which only count relayed packets).
db := setupTestDB(t)
seedTestData(t, db)
store := NewPacketStore(db, nil)
if err := store.Load(); err != nil {
t.Fatalf("store.Load failed: %v", err)
}
// Capture baseline from seed data (bypass cache via computeAnalyticsHashSizes)
baseline := store.computeAnalyticsHashSizes("")
baseTotal, _ := baseline["total"].(int)
baseDist, _ := baseline["distribution"].(map[string]int)
baseDist1 := baseDist["1"]
pk := "eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee"
db.conn.Exec("INSERT OR IGNORE INTO nodes (public_key, name, role) VALUES (?, 'ZeroHop', 'repeater')", pk)
decoded := `{"name":"ZeroHop","pubKey":"` + pk + `"}`
// header 0x05 → routeType=1 (FLOOD), pathByte=0x00 → hashSize=1
raw := "05" + "00" + "aabb"
payloadType := 4
tx := &StoreTx{
ID: 8000,
RawHex: raw,
Hash: "zerohop0",
FirstSeen: "2024-01-01T00:00:00Z",
PayloadType: &payloadType,
DecodedJSON: decoded,
// No PathJSON → txGetParsedPath returns nil (zero hops)
}
store.packets = append(store.packets, tx)
store.byPayloadType[4] = append(store.byPayloadType[4], tx)
result := store.computeAnalyticsHashSizes("")
// distributionByRepeaters should include the zero-hop advert's node
distByRepeaters, ok := result["distributionByRepeaters"].(map[string]int)
if !ok {
t.Fatal("distributionByRepeaters missing or wrong type")
}
if distByRepeaters["1"] < 1 {
t.Errorf("distributionByRepeaters[\"1\"]=%d, want >=1 (zero-hop advert should be tracked per-node)", distByRepeaters["1"])
}
// total and distribution must NOT have increased from the baseline
total, _ := result["total"].(int)
dist, _ := result["distribution"].(map[string]int)
if total != baseTotal {
t.Errorf("total=%d, want %d (zero-hop adverts must not inflate total)", total, baseTotal)
}
if dist["1"] != baseDist1 {
t.Errorf("distribution[\"1\"]=%d, want %d (zero-hop adverts must not inflate distribution)", dist["1"], baseDist1)
}
}
func TestAnalyticsHashSizeSameNameDifferentPubkey(t *testing.T) {
// Two nodes named "SameName" with different pubkeys should be counted
// separately in distributionByRepeaters (issue #303, byNode keying fix).
db := setupTestDB(t)
seedTestData(t, db)
store := NewPacketStore(db, nil)
if err := store.Load(); err != nil {
t.Fatalf("store.Load failed: %v", err)
}
pk1 := "aaaa111122223333444455556666777788889999aaaabbbbccccddddeeee1111"
pk2 := "aaaa111122223333444455556666777788889999aaaabbbbccccddddeeee2222"
decoded1 := `{"name":"SameName","pubKey":"` + pk1 + `"}`
decoded2 := `{"name":"SameName","pubKey":"` + pk2 + `"}`
raw2byte := "05" + "40" + "aabb" // header routeType=1 (FLOOD), pathByte=0x40 → hashSize=2
payloadType := 4
for i, decoded := range []string{decoded1, decoded2} {
tx := &StoreTx{
ID: 6100 + i,
RawHex: raw2byte,
Hash: "samename" + strconv.Itoa(i),
FirstSeen: "2024-01-01T00:00:00Z",
PayloadType: &payloadType,
DecodedJSON: decoded,
PathJSON: `["AABB"]`,
}
store.packets = append(store.packets, tx)
store.byPayloadType[4] = append(store.byPayloadType[4], tx)
}
result := store.GetAnalyticsHashSizes("")
distByRepeaters, ok := result["distributionByRepeaters"].(map[string]int)
if !ok {
t.Fatal("distributionByRepeaters missing or wrong type")
}
if distByRepeaters["2"] < 2 {
t.Errorf("distributionByRepeaters[\"2\"]=%d, want >=2 (same-name nodes with different pubkeys should be counted separately)", distByRepeaters["2"])
}
}
func TestAnalyticsHashSizesNoNullArrays(t *testing.T) {
_, router := setupTestServer(t)
req := httptest.NewRequest("GET", "/api/analytics/hash-sizes", nil)

View File

@@ -3837,10 +3837,6 @@ func (s *PacketStore) computeAnalyticsHashSizes(region string) map[string]interf
if tx.RawHex == "" {
continue
}
hops := txGetParsedPath(tx)
if len(hops) == 0 {
continue
}
if regionObs != nil {
match := false
for _, obs := range tx.Observations {
@@ -3881,6 +3877,48 @@ func (s *PacketStore) computeAnalyticsHashSizes(region string) map[string]interf
if hashSize > 3 {
continue
}
// Track originator from advert packets (including zero-hop adverts,
// keyed by pubKey so same-name nodes don't merge).
if tx.PayloadType != nil && *tx.PayloadType == 4 && tx.DecodedJSON != "" {
var d map[string]interface{}
if json.Unmarshal([]byte(tx.DecodedJSON), &d) == nil {
pk := ""
if v, ok := d["pubKey"].(string); ok {
pk = v
} else if v, ok := d["public_key"].(string); ok {
pk = v
}
if pk != "" {
name := ""
if n, ok := d["name"].(string); ok {
name = n
}
if name == "" {
if len(pk) >= 8 {
name = pk[:8]
} else {
name = pk
}
}
if byNode[pk] == nil {
byNode[pk] = map[string]interface{}{
"hashSize": hashSize, "packets": 0,
"lastSeen": tx.FirstSeen, "name": name,
}
}
byNode[pk]["packets"] = byNode[pk]["packets"].(int) + 1
byNode[pk]["hashSize"] = hashSize
byNode[pk]["lastSeen"] = tx.FirstSeen
}
}
}
// Distribution/hourly/uniqueHops only for packets with relay hops
hops := txGetParsedPath(tx)
if len(hops) == 0 {
continue
}
total++
sizeKey := strconv.Itoa(hashSize)
@@ -3912,41 +3950,6 @@ func (s *PacketStore) computeAnalyticsHashSizes(region string) map[string]interf
}
uniqueHops[hop]["count"] = uniqueHops[hop]["count"].(int) + 1
}
// Track originator from advert packets
if tx.PayloadType != nil && *tx.PayloadType == 4 && tx.DecodedJSON != "" {
var d map[string]interface{}
if json.Unmarshal([]byte(tx.DecodedJSON), &d) == nil {
name := ""
if n, ok := d["name"].(string); ok {
name = n
}
if name == "" {
if pk, ok := d["pubKey"].(string); ok && pk != "" {
name = pk[:8]
} else if pk, ok := d["public_key"].(string); ok && pk != "" {
name = pk[:8]
}
}
if name != "" {
if byNode[name] == nil {
var pubkey interface{}
if pk, ok := d["pubKey"].(string); ok {
pubkey = pk
} else if pk, ok := d["public_key"].(string); ok {
pubkey = pk
}
byNode[name] = map[string]interface{}{
"hashSize": hashSize, "packets": 0,
"lastSeen": tx.FirstSeen, "pubkey": pubkey,
}
}
byNode[name]["packets"] = byNode[name]["packets"].(int) + 1
byNode[name]["hashSize"] = hashSize
byNode[name]["lastSeen"] = tx.FirstSeen
}
}
}
}
// Sort hourly data
@@ -3988,12 +3991,12 @@ func (s *PacketStore) computeAnalyticsHashSizes(region string) map[string]interf
// Multi-byte nodes
multiByteNodes := make([]map[string]interface{}, 0)
for name, data := range byNode {
for pk, data := range byNode {
if data["hashSize"].(int) > 1 {
multiByteNodes = append(multiByteNodes, map[string]interface{}{
"name": name, "hashSize": data["hashSize"],
"name": data["name"], "hashSize": data["hashSize"],
"packets": data["packets"], "lastSeen": data["lastSeen"],
"pubkey": data["pubkey"],
"pubkey": pk,
})
}
}
@@ -4089,25 +4092,13 @@ func (s *PacketStore) computeNodeHashSizeInfo() map[string]*hashSizeNodeInfo {
ni.Seq = append(ni.Seq, hs)
}
// Post-process: compute dominant hash size (mode) and flip-flop flag.
// Using the last-seen value would misreport nodes that occasionally send
// with pathByte=0x00 (hashSize=1) when transmitting directly with no
// relay hops, even though their true hash size is 2 or 3.
// Post-process: use latest advert hash size and compute flip-flop flag.
// The most recent advert reflects the node's current hash size
// configuration. The upstream firmware bug causing stale path bytes in
// flood adverts was fixed (meshcore-dev/MeshCore#2154).
for _, ni := range info {
// Dominant hash size: pick the most frequently observed size.
// On a tie, prefer the larger value (more specific).
counts := make(map[int]int, len(ni.AllSizes))
for _, hs := range ni.Seq {
counts[hs]++
}
best, bestCount := 1, 0
for hs, cnt := range counts {
if cnt > bestCount || (cnt == bestCount && hs > best) {
best = hs
bestCount = cnt
}
}
ni.HashSize = best
// Use the most recent advert's hash size (last in chronological order).
ni.HashSize = ni.Seq[len(ni.Seq)-1]
// Flip-flop (inconsistent) flag: need >= 3 observations,
// >= 2 unique sizes, and >= 2 transitions in the sequence.