mirror of
https://github.com/Kpa-clawbot/meshcore-analyzer.git
synced 2026-05-12 12:54:44 +00:00
a8e1cea683
## Problem The firmware computes packet content hash as: ``` SHA256(payload_type_byte + [path_len for TRACE] + payload) ``` Where `payload_type_byte = (header >> 2) & 0x0F` — just the payload type bits (2-5). CoreScope was using the **full header byte** in its hash computation, which includes route type bits (0-1) and version bits (6-7). This meant the same logical packet produced different content hashes depending on route type — breaking dedup and packet lookup. **Firmware reference:** `Packet.cpp::calculatePacketHash()` uses `getPayloadType()` which returns `(header >> PH_TYPE_SHIFT) & PH_TYPE_MASK`. ## Fix - Extract only payload type bits: `payloadType := (headerByte >> 2) & 0x0F` - Include `path_len` byte in hash for TRACE packets (matching firmware behavior) - Applied to both `cmd/server/decoder.go` and `cmd/ingestor/decoder.go` ## Tests Added - **Route type independence:** Same payload with FLOOD vs DIRECT route types produces identical hash - **TRACE path_len inclusion:** TRACE packets with different `path_len` produce different hashes - **Firmware compatibility:** Hash output matches manual computation of firmware algorithm ## Migration Impact Existing packets in the DB have content hashes computed with the old (incorrect) formula. Options: 1. **Recompute hashes** via migration (recommended for clean state) 2. **Dual lookup** — check both old and new hash on queries (backward compat) 3. **Accept the break** — old hashes become stale, new packets get correct hashes Recommend option 1 (migration) as a follow-up. The volume of affected packets depends on how many distinct route types were seen for the same logical packet. Fixes #786 --------- Co-authored-by: you <you@example.com>
137 lines
3.0 KiB
Go
137 lines
3.0 KiB
Go
// migrate-fixture-hashes recomputes content hashes in a fixture DB using the
|
|
// current ComputeContentHash formula. Run once; idempotent.
|
|
package main
|
|
|
|
import (
|
|
"crypto/sha256"
|
|
"database/sql"
|
|
"encoding/hex"
|
|
"fmt"
|
|
"log"
|
|
"os"
|
|
|
|
_ "modernc.org/sqlite"
|
|
)
|
|
|
|
func computeContentHash(rawHex string) string {
|
|
buf, err := hex.DecodeString(rawHex)
|
|
if err != nil || len(buf) < 2 {
|
|
if len(rawHex) >= 16 {
|
|
return rawHex[:16]
|
|
}
|
|
return rawHex
|
|
}
|
|
|
|
headerByte := buf[0]
|
|
offset := 1
|
|
routeType := int(headerByte & 0x03)
|
|
if routeType == 2 || routeType == 3 { // transport
|
|
offset += 4
|
|
}
|
|
if offset >= len(buf) {
|
|
if len(rawHex) >= 16 {
|
|
return rawHex[:16]
|
|
}
|
|
return rawHex
|
|
}
|
|
pathByte := buf[offset]
|
|
offset++
|
|
hashSize := int((pathByte>>6)&0x3) + 1
|
|
hashCount := int(pathByte & 0x3F)
|
|
pathBytes := hashSize * hashCount
|
|
|
|
payloadStart := offset + pathBytes
|
|
if payloadStart > len(buf) {
|
|
if len(rawHex) >= 16 {
|
|
return rawHex[:16]
|
|
}
|
|
return rawHex
|
|
}
|
|
|
|
payload := buf[payloadStart:]
|
|
payloadType := (headerByte >> 2) & 0x0F
|
|
toHash := []byte{payloadType}
|
|
|
|
// TRACE = payload type 7
|
|
if int(payloadType) == 7 {
|
|
toHash = append(toHash, pathByte, 0x00)
|
|
}
|
|
toHash = append(toHash, payload...)
|
|
|
|
h := sha256.Sum256(toHash)
|
|
return hex.EncodeToString(h[:])[:16]
|
|
}
|
|
|
|
func main() {
|
|
if len(os.Args) < 2 {
|
|
fmt.Fprintf(os.Stderr, "usage: %s <db-path>\n", os.Args[0])
|
|
os.Exit(1)
|
|
}
|
|
dbPath := os.Args[1]
|
|
|
|
db, err := sql.Open("sqlite", dbPath)
|
|
if err != nil {
|
|
log.Fatal(err)
|
|
}
|
|
defer db.Close()
|
|
|
|
rows, err := db.Query("SELECT id, raw_hex, hash FROM transmissions")
|
|
if err != nil {
|
|
log.Fatal(err)
|
|
}
|
|
|
|
type update struct {
|
|
id int
|
|
newHash string
|
|
}
|
|
var updates []update
|
|
|
|
for rows.Next() {
|
|
var id int
|
|
var rawHex, oldHash string
|
|
if err := rows.Scan(&id, &rawHex, &oldHash); err != nil {
|
|
log.Printf("scan: %v", err)
|
|
continue
|
|
}
|
|
newHash := computeContentHash(rawHex)
|
|
if newHash != oldHash {
|
|
updates = append(updates, update{id, newHash})
|
|
}
|
|
}
|
|
rows.Close()
|
|
|
|
if len(updates) == 0 {
|
|
fmt.Println("All hashes already match current formula.")
|
|
return
|
|
}
|
|
|
|
tx, err := db.Begin()
|
|
if err != nil {
|
|
log.Fatal(err)
|
|
}
|
|
stmt, err := tx.Prepare("UPDATE transmissions SET hash = ? WHERE id = ?")
|
|
if err != nil {
|
|
log.Fatal(err)
|
|
}
|
|
merged := 0
|
|
for _, u := range updates {
|
|
if _, err := stmt.Exec(u.newHash, u.id); err != nil {
|
|
// UNIQUE constraint = duplicate (same content, different old hash).
|
|
// Move observations to the surviving tx, then delete the dup.
|
|
log.Printf("update id %d: %v — merging duplicate", u.id, err)
|
|
// Find surviving tx id
|
|
var survID int
|
|
if err2 := tx.QueryRow("SELECT id FROM transmissions WHERE hash = ?", u.newHash).Scan(&survID); err2 == nil {
|
|
tx.Exec("UPDATE observations SET transmission_id = ? WHERE transmission_id = ?", survID, u.id)
|
|
tx.Exec("DELETE FROM transmissions WHERE id = ?", u.id)
|
|
merged++
|
|
}
|
|
}
|
|
}
|
|
stmt.Close()
|
|
if err := tx.Commit(); err != nil {
|
|
log.Fatal(err)
|
|
}
|
|
fmt.Printf("Migrated %d hashes, merged %d duplicates.\n", len(updates)-merged, merged)
|
|
}
|