Compare commits

..

4 Commits

Author SHA1 Message Date
KpaBap
095d50acc4 Merge branch 'master' into fix/remove-packets-v-fallbacks 2026-03-28 15:15:52 -07:00
KpaBap
aec178d41a Merge branch 'master' into fix/remove-packets-v-fallbacks 2026-03-28 15:14:50 -07:00
Kpa-clawbot
f3638a6a0c fix: address PR #220 review comments
Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com>
2026-03-28 15:04:54 -07:00
Kpa-clawbot
b455e5a594 refactor: remove all packets_v SQL fallbacks — store handles all queries
Remove DB fallback paths from all route handlers. The in-memory
PacketStore now handles all packet/node/analytics queries. Handlers
return empty results or 404 when no store is available instead of
falling back to direct DB queries.

- Remove else-DB branches from handlePacketDetail, handleNodeHealth,
  handleNodeAnalytics, handleBulkHealth, handlePacketTimestamps, etc.
- Remove unused DB methods (GetPacketByHash, GetTransmissionByID,
  GetPacketByID, GetObservationsForHash, GetTimestamps, GetNodeHealth,
  GetNodeAnalytics, GetBulkHealth, etc.)
- Remove packets_v VIEW creation from schema
- Update tests for new behavior (no-store returns 404/empty, not 500)

Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com>
2026-03-28 14:05:55 -07:00
5 changed files with 63 additions and 333 deletions

View File

@@ -26,14 +26,13 @@ type MQTTLegacy struct {
// Config holds the ingestor configuration, compatible with the Node.js config.json format.
type Config struct {
DBPath string `json:"dbPath"`
MQTT *MQTTLegacy `json:"mqtt,omitempty"`
MQTTSources []MQTTSource `json:"mqttSources,omitempty"`
LogLevel string `json:"logLevel,omitempty"`
ChannelKeysPath string `json:"channelKeysPath,omitempty"`
ChannelKeys map[string]string `json:"channelKeys,omitempty"`
HashChannels []string `json:"hashChannels,omitempty"`
Retention *RetentionConfig `json:"retention,omitempty"`
DBPath string `json:"dbPath"`
MQTT *MQTTLegacy `json:"mqtt,omitempty"`
MQTTSources []MQTTSource `json:"mqttSources,omitempty"`
LogLevel string `json:"logLevel,omitempty"`
ChannelKeysPath string `json:"channelKeysPath,omitempty"`
ChannelKeys map[string]string `json:"channelKeys,omitempty"`
Retention *RetentionConfig `json:"retention,omitempty"`
}
// RetentionConfig controls how long stale nodes are kept before being moved to inactive_nodes.

View File

@@ -512,64 +512,34 @@ func firstNonEmpty(vals ...string) string {
return ""
}
// deriveHashtagChannelKey derives an AES-128 key from a channel name.
// Same algorithm as Node.js: SHA-256(channelName) → first 32 hex chars (16 bytes).
func deriveHashtagChannelKey(channelName string) string {
h := sha256.Sum256([]byte(channelName))
return hex.EncodeToString(h[:16])
}
// loadChannelKeys loads channel decryption keys from config and/or a JSON file.
// Merge priority: rainbow (lowest) → derived from hashChannels → explicit config (highest).
// Priority: CHANNEL_KEYS_PATH env var > cfg.ChannelKeysPath > channel-rainbow.json next to config.
func loadChannelKeys(cfg *Config, configPath string) map[string]string {
keys := make(map[string]string)
// 1. Rainbow table keys (lowest priority)
// Determine file path for rainbow keys
keysPath := os.Getenv("CHANNEL_KEYS_PATH")
if keysPath == "" {
keysPath = cfg.ChannelKeysPath
}
if keysPath == "" {
// Default: look for channel-rainbow.json next to config file
keysPath = filepath.Join(filepath.Dir(configPath), "channel-rainbow.json")
}
rainbowCount := 0
if data, err := os.ReadFile(keysPath); err == nil {
var fileKeys map[string]string
if err := json.Unmarshal(data, &fileKeys); err == nil {
for k, v := range fileKeys {
keys[k] = v
}
rainbowCount = len(fileKeys)
log.Printf("Loaded %d channel keys from %s", rainbowCount, keysPath)
log.Printf("Loaded %d channel keys from %s", len(fileKeys), keysPath)
} else {
log.Printf("Warning: failed to parse channel keys file %s: %v", keysPath, err)
}
}
// 2. Derived keys from hashChannels (middle priority)
derivedCount := 0
for _, raw := range cfg.HashChannels {
trimmed := strings.TrimSpace(raw)
if trimmed == "" {
continue
}
channelName := trimmed
if !strings.HasPrefix(channelName, "#") {
channelName = "#" + channelName
}
// Skip if explicit config already has this key
if _, exists := cfg.ChannelKeys[channelName]; exists {
continue
}
keys[channelName] = deriveHashtagChannelKey(channelName)
derivedCount++
}
if derivedCount > 0 {
log.Printf("[channels] %d derived from hashChannels", derivedCount)
}
// 3. Explicit config keys (highest priority — overrides rainbow + derived)
// Merge inline config keys (override file keys)
for k, v := range cfg.ChannelKeys {
keys[k] = v
}

View File

@@ -3,8 +3,6 @@ package main
import (
"encoding/json"
"math"
"os"
"path/filepath"
"testing"
"time"
)
@@ -494,132 +492,3 @@ func TestAdvertRole(t *testing.T) {
})
}
}
func TestDeriveHashtagChannelKey(t *testing.T) {
// Test vectors validated against Node.js server-helpers.js
tests := []struct {
name string
want string
}{
{"#General", "649af2cab73ed5a890890a5485a0c004"},
{"#test", "9cd8fcf22a47333b591d96a2b848b73f"},
{"#MeshCore", "dcf73f393fa217f6b28fcec6ffc411ad"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := deriveHashtagChannelKey(tt.name)
if got != tt.want {
t.Errorf("deriveHashtagChannelKey(%q) = %q, want %q", tt.name, got, tt.want)
}
})
}
// Deterministic
k1 := deriveHashtagChannelKey("#foo")
k2 := deriveHashtagChannelKey("#foo")
if k1 != k2 {
t.Error("deriveHashtagChannelKey should be deterministic")
}
// Returns 32-char hex string (16 bytes)
if len(k1) != 32 {
t.Errorf("key length = %d, want 32", len(k1))
}
// Different inputs → different keys
k3 := deriveHashtagChannelKey("#bar")
if k1 == k3 {
t.Error("different inputs should produce different keys")
}
}
func TestLoadChannelKeysMergePriority(t *testing.T) {
dir := t.TempDir()
cfgPath := filepath.Join(dir, "config.json")
// Create a rainbow file with two keys: #rainbow (unique) and #override (to be overridden)
rainbowPath := filepath.Join(dir, "channel-rainbow.json")
t.Setenv("CHANNEL_KEYS_PATH", rainbowPath)
rainbow := map[string]string{
"#rainbow": "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"#override": "rainbow_value_should_be_overridden",
}
rainbowJSON, err := json.Marshal(rainbow)
if err != nil {
t.Fatal(err)
}
if err := os.WriteFile(rainbowPath, rainbowJSON, 0o644); err != nil {
t.Fatal(err)
}
cfg := &Config{
HashChannels: []string{"General", "#override"},
ChannelKeys: map[string]string{"#override": "explicit_wins"},
}
keys := loadChannelKeys(cfg, cfgPath)
// Rainbow key loaded
if keys["#rainbow"] != "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" {
t.Errorf("rainbow key missing or wrong: %q", keys["#rainbow"])
}
// HashChannels derived #General
expected := deriveHashtagChannelKey("#General")
if keys["#General"] != expected {
t.Errorf("#General = %q, want %q (derived)", keys["#General"], expected)
}
// Explicit config wins over both rainbow and derived
if keys["#override"] != "explicit_wins" {
t.Errorf("#override = %q, want explicit_wins", keys["#override"])
}
}
func TestLoadChannelKeysHashChannelsNormalization(t *testing.T) {
t.Setenv("CHANNEL_KEYS_PATH", "")
dir := t.TempDir()
cfgPath := filepath.Join(dir, "config.json")
cfg := &Config{
HashChannels: []string{
"NoPound", // should become #NoPound
"#HasPound", // stays #HasPound
" Spaced ", // trimmed → #Spaced
"", // skipped
},
}
keys := loadChannelKeys(cfg, cfgPath)
if _, ok := keys["#NoPound"]; !ok {
t.Error("should derive key for #NoPound (auto-prefixed)")
}
if _, ok := keys["#HasPound"]; !ok {
t.Error("should derive key for #HasPound")
}
if _, ok := keys["#Spaced"]; !ok {
t.Error("should derive key for #Spaced (trimmed)")
}
if len(keys) != 3 {
t.Errorf("expected 3 keys, got %d", len(keys))
}
}
func TestLoadChannelKeysSkipExplicit(t *testing.T) {
t.Setenv("CHANNEL_KEYS_PATH", "")
dir := t.TempDir()
cfgPath := filepath.Join(dir, "config.json")
cfg := &Config{
HashChannels: []string{"General"},
ChannelKeys: map[string]string{"#General": "my_explicit_key"},
}
keys := loadChannelKeys(cfg, cfgPath)
// Explicit key should win — hashChannels derivation should be skipped
if keys["#General"] != "my_explicit_key" {
t.Errorf("#General = %q, want my_explicit_key", keys["#General"])
}
}

View File

@@ -62,7 +62,7 @@ type StoreObs struct {
type PacketStore struct {
mu sync.RWMutex
db *DB
packets []*StoreTx // sorted by first_seen ASC (oldest first; newest at tail)
packets []*StoreTx // sorted by first_seen DESC
byHash map[string]*StoreTx // hash → *StoreTx
byTxID map[int]*StoreTx // transmission_id → *StoreTx
byObsID map[int]*StoreObs // observation_id → *StoreObs
@@ -176,7 +176,7 @@ func (s *PacketStore) Load() error {
FROM transmissions t
LEFT JOIN observations o ON o.transmission_id = t.id
LEFT JOIN observers obs ON obs.rowid = o.observer_idx
ORDER BY t.first_seen ASC, o.timestamp DESC`
ORDER BY t.first_seen DESC, o.timestamp DESC`
} else {
loadSQL = `SELECT t.id, t.raw_hex, t.hash, t.first_seen, t.route_type,
t.payload_type, t.payload_version, t.decoded_json,
@@ -184,7 +184,7 @@ func (s *PacketStore) Load() error {
o.snr, o.rssi, o.score, o.path_json, o.timestamp
FROM transmissions t
LEFT JOIN observations o ON o.transmission_id = t.id
ORDER BY t.first_seen ASC, o.timestamp DESC`
ORDER BY t.first_seen DESC, o.timestamp DESC`
}
rows, err := s.db.conn.Query(loadSQL)
@@ -368,32 +368,28 @@ func (s *PacketStore) QueryPackets(q PacketQuery) *PacketResult {
results := s.filterPackets(q)
total := len(results)
// results is oldest-first (ASC). For DESC (default) read backwards from the tail;
// for ASC read forwards. Both are O(page_size) — no sort copy needed.
start := q.Offset
if start >= total {
return &PacketResult{Packets: []map[string]interface{}{}, Total: total}
}
pageSize := q.Limit
if start+pageSize > total {
pageSize = total - start
if q.Order == "ASC" {
sorted := make([]*StoreTx, len(results))
copy(sorted, results)
sort.Slice(sorted, func(i, j int) bool {
return sorted[i].FirstSeen < sorted[j].FirstSeen
})
results = sorted
}
packets := make([]map[string]interface{}, 0, pageSize)
if q.Order == "ASC" {
for _, tx := range results[start : start+pageSize] {
packets = append(packets, txToMap(tx))
}
} else {
// DESC: newest items are at the tail; page 0 = last pageSize items reversed
endIdx := total - start
startIdx := endIdx - pageSize
if startIdx < 0 {
startIdx = 0
}
for i := endIdx - 1; i >= startIdx; i-- {
packets = append(packets, txToMap(results[i]))
}
// Paginate
start := q.Offset
if start >= len(results) {
return &PacketResult{Packets: []map[string]interface{}{}, Total: total}
}
end := start + q.Limit
if end > len(results) {
end = len(results)
}
packets := make([]map[string]interface{}, 0, end-start)
for _, tx := range results[start:end] {
packets = append(packets, txToMap(tx))
}
return &PacketResult{Packets: packets, Total: total}
}
@@ -723,16 +719,15 @@ func (s *PacketStore) GetTimestamps(since string) []string {
s.mu.RLock()
defer s.mu.RUnlock()
// packets sorted oldest-first — scan from tail until we reach items older than since
// packets sorted newest first — scan from start until older than since
var result []string
for i := len(s.packets) - 1; i >= 0; i-- {
tx := s.packets[i]
for _, tx := range s.packets {
if tx.FirstSeen <= since {
break
}
result = append(result, tx.FirstSeen)
}
// result is currently newest-first; reverse to return ASC order
// Reverse to get ASC order
for i, j := 0, len(result)-1; i < j; i, j = i+1, j-1 {
result[i], result[j] = result[j], result[i]
}
@@ -782,30 +777,23 @@ func (s *PacketStore) QueryMultiNodePackets(pubkeys []string, limit, offset int,
total := len(filtered)
// filtered is oldest-first (built by iterating s.packets forward).
// Apply same DESC/ASC pagination logic as QueryPackets.
if order == "ASC" {
sort.Slice(filtered, func(i, j int) bool {
return filtered[i].FirstSeen < filtered[j].FirstSeen
})
}
if offset >= total {
return &PacketResult{Packets: []map[string]interface{}{}, Total: total}
}
pageSize := limit
if offset+pageSize > total {
pageSize = total - offset
end := offset + limit
if end > total {
end = total
}
packets := make([]map[string]interface{}, 0, pageSize)
if order == "ASC" {
for _, tx := range filtered[offset : offset+pageSize] {
packets = append(packets, txToMap(tx))
}
} else {
endIdx := total - offset
startIdx := endIdx - pageSize
if startIdx < 0 {
startIdx = 0
}
for i := endIdx - 1; i >= startIdx; i-- {
packets = append(packets, txToMap(filtered[i]))
}
packets := make([]map[string]interface{}, 0, end-offset)
for _, tx := range filtered[offset:end] {
packets = append(packets, txToMap(tx))
}
return &PacketResult{Packets: packets, Total: total}
}
@@ -938,14 +926,15 @@ func (s *PacketStore) IngestNewFromDB(sinceID, limit int) ([]map[string]interfac
DecodedJSON: r.decodedJSON,
}
s.byHash[r.hash] = tx
s.packets = append(s.packets, tx) // oldest-first; new items go to tail
// Prepend (newest first)
s.packets = append([]*StoreTx{tx}, s.packets...)
s.byTxID[r.txID] = tx
s.indexByNode(tx)
if tx.PayloadType != nil {
pt := *tx.PayloadType
// Append to maintain oldest-first order (matches Load ordering)
// Prepend to maintain newest-first order (matches Load ordering)
// so GetChannelMessages reverse iteration stays correct
s.byPayloadType[pt] = append(s.byPayloadType[pt], tx)
s.byPayloadType[pt] = append([]*StoreTx{tx}, s.byPayloadType[pt]...)
}
if _, exists := broadcastTxs[r.txID]; !exists {
@@ -1090,6 +1079,8 @@ func (s *PacketStore) IngestNewFromDB(sinceID, limit int) ([]map[string]interfac
s.cacheMu.Unlock()
}
log.Printf("[poller] IngestNewFromDB: found %d new txs, maxID %d->%d", len(result), sinceID, newMaxID)
return result, newMaxID
}
@@ -1272,7 +1263,8 @@ func (s *PacketStore) IngestNewObservations(sinceObsID, limit int) int {
s.subpathCache = make(map[string]*cachedResult)
s.cacheMu.Unlock()
// analytics caches cleared; no per-cycle log to avoid stdout overhead
log.Printf("[poller] IngestNewObservations: updated %d existing txs, maxObsID %d->%d",
len(updatedTxs), sinceObsID, newMaxObsID)
}
return newMaxObsID
@@ -1896,7 +1888,7 @@ func (s *PacketStore) GetChannelMessages(channelHash string, limit, offset int)
msgMap := map[string]*msgEntry{}
var msgOrder []string
// Iterate type-5 packets oldest-first (byPayloadType is ASC = oldest first)
// Iterate type-5 packets oldest-first (byPayloadType is in load order = newest first)
type decodedMsg struct {
Type string `json:"type"`
Channel string `json:"channel"`
@@ -1907,7 +1899,8 @@ func (s *PacketStore) GetChannelMessages(channelHash string, limit, offset int)
}
grpTxts := s.byPayloadType[5]
for _, tx := range grpTxts {
for i := len(grpTxts) - 1; i >= 0; i-- {
tx := grpTxts[i]
if tx.DecodedJSON == "" {
continue
}
@@ -4076,13 +4069,13 @@ func (s *PacketStore) GetNodeHealth(pubkey string) (map[string]interface{}, erro
lhVal = lastHeard
}
// Recent packets (up to 20, newest first — read from tail of oldest-first slice)
// Recent packets (up to 20, newest first — packets are already sorted DESC)
recentLimit := 20
if len(packets) < recentLimit {
recentLimit = len(packets)
}
recentPackets := make([]map[string]interface{}, 0, recentLimit)
for i := len(packets) - 1; i >= len(packets)-recentLimit; i-- {
for i := 0; i < recentLimit; i++ {
p := txToMap(packets[i])
delete(p, "observations")
recentPackets = append(recentPackets, p)

View File

@@ -1,101 +0,0 @@
# CoreScope Migration Guide
MeshCore Analyzer has been renamed to **CoreScope**. This document covers what you need to update.
## What Changed
- **Repository name**: `meshcore-analyzer``corescope`
- **Docker image name**: `meshcore-analyzer:latest``corescope:latest`
- **Docker container prefixes**: `meshcore-*``corescope-*`
- **Default site name**: "MeshCore Analyzer" → "CoreScope"
## What Did NOT Change
- **Data directories** — `~/meshcore-data/` stays as-is
- **Database filename** — `meshcore.db` is unchanged
- **MQTT topics** — `meshcore/#` topics are protocol-level and unchanged
- **Browser state** — Favorites, localStorage keys, and settings are preserved
- **Config file format** — `config.json` structure is the same
---
## 1. Git Remote Update
Update your local clone to point to the new repository URL:
```bash
git remote set-url origin https://github.com/Kpa-clawbot/corescope.git
git pull
```
## 2. Docker (manage.sh) Users
Rebuild with the new image name:
```bash
./manage.sh stop
git pull
./manage.sh setup
```
The new image is `corescope:latest`. You can clean up the old image:
```bash
docker rmi meshcore-analyzer:latest
```
## 3. Docker Compose Users
Rebuild containers with the new names:
```bash
docker compose down
git pull
docker compose build
docker compose up -d
```
Container names change from `meshcore-*` to `corescope-*`. Old containers are removed by `docker compose down`.
## 4. Data Directories
**No action required.** The data directory `~/meshcore-data/` and database file `meshcore.db` are unchanged. Your existing data carries over automatically.
## 5. Config
If you customized `branding.siteName` in your `config.json`, update it to your preferred name. Otherwise the new default "CoreScope" applies automatically.
No other config keys changed.
## 6. MQTT
**No action required.** MQTT topics (`meshcore/#`) are protocol-level and are not affected by the rename.
## 7. Browser
**No action required.** Bookmarks/favorites will continue to work at the same host and port. localStorage keys are unchanged, so your settings and preferences are preserved.
## 8. CI/CD
If you have custom CI/CD pipelines that reference:
- The old repository URL (`meshcore-analyzer`)
- The old Docker image name (`meshcore-analyzer:latest`)
- Old container names (`meshcore-*`)
Update those references to use the new names.
---
## Summary Checklist
| Item | Action Required? | What to Do |
|------|-----------------|------------|
| Git remote | ✅ Yes | `git remote set-url origin …corescope.git` |
| Docker image | ✅ Yes | Rebuild; optionally `docker rmi` old image |
| Docker Compose | ✅ Yes | `docker compose down && build && up` |
| Data directories | ❌ No | Unchanged |
| Config | ⚠️ Maybe | Only if you customized `branding.siteName` |
| MQTT | ❌ No | Topics unchanged |
| Browser | ❌ No | Settings preserved |
| CI/CD | ⚠️ Maybe | Update if referencing old repo/image names |