mirror of
https://github.com/Kpa-clawbot/meshcore-analyzer.git
synced 2026-05-15 11:25:07 +00:00
Compare commits
8 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| a1a67e89fb | |||
| 91fcbc5adc | |||
| 380b1b1e28 | |||
| 03cfd114da | |||
| df90de77a7 | |||
| 7b97c532a1 | |||
| e0c2d37041 | |||
| f5d0ce066b |
+33
-6
@@ -1,17 +1,44 @@
|
||||
# MeshCore Analyzer — Environment Configuration
|
||||
# Copy to .env and customize. All values have sensible defaults in docker-compose.yml.
|
||||
# Copy to .env and customize. All values have sensible defaults.
|
||||
#
|
||||
# This file is read by BOTH docker compose AND manage.sh — one source of truth.
|
||||
# Each environment keeps config + data together in one directory:
|
||||
# ~/meshcore-data/config.json, meshcore.db, Caddyfile, theme.json
|
||||
# ~/meshcore-staging-data/config.json, meshcore.db, Caddyfile
|
||||
|
||||
# --- Production ---
|
||||
PROD_HTTP_PORT=80
|
||||
PROD_HTTPS_PORT=443
|
||||
PROD_MQTT_PORT=1883
|
||||
# Data directory (database, theme, etc.)
|
||||
# Default: ~/meshcore-data
|
||||
# Used by: docker compose, manage.sh
|
||||
PROD_DATA_DIR=~/meshcore-data
|
||||
|
||||
# HTTP port for web UI
|
||||
# Default: 80
|
||||
# Used by: docker compose
|
||||
PROD_HTTP_PORT=80
|
||||
|
||||
# HTTPS port for web UI (TLS via Caddy)
|
||||
# Default: 443
|
||||
# Used by: docker compose
|
||||
PROD_HTTPS_PORT=443
|
||||
|
||||
# MQTT port for observer connections
|
||||
# Default: 1883
|
||||
# Used by: docker compose
|
||||
PROD_MQTT_PORT=1883
|
||||
|
||||
# --- Staging (HTTP only, no HTTPS) ---
|
||||
STAGING_HTTP_PORT=81
|
||||
STAGING_MQTT_PORT=1884
|
||||
# Data directory
|
||||
# Default: ~/meshcore-staging-data
|
||||
# Used by: docker compose
|
||||
STAGING_DATA_DIR=~/meshcore-staging-data
|
||||
|
||||
# HTTP port
|
||||
# Default: 81
|
||||
# Used by: docker compose
|
||||
STAGING_HTTP_PORT=81
|
||||
|
||||
# MQTT port
|
||||
# Default: 1884
|
||||
# Used by: docker compose
|
||||
STAGING_MQTT_PORT=1884
|
||||
|
||||
@@ -26,13 +26,14 @@ type MQTTLegacy struct {
|
||||
|
||||
// Config holds the ingestor configuration, compatible with the Node.js config.json format.
|
||||
type Config struct {
|
||||
DBPath string `json:"dbPath"`
|
||||
MQTT *MQTTLegacy `json:"mqtt,omitempty"`
|
||||
MQTTSources []MQTTSource `json:"mqttSources,omitempty"`
|
||||
LogLevel string `json:"logLevel,omitempty"`
|
||||
ChannelKeysPath string `json:"channelKeysPath,omitempty"`
|
||||
ChannelKeys map[string]string `json:"channelKeys,omitempty"`
|
||||
Retention *RetentionConfig `json:"retention,omitempty"`
|
||||
DBPath string `json:"dbPath"`
|
||||
MQTT *MQTTLegacy `json:"mqtt,omitempty"`
|
||||
MQTTSources []MQTTSource `json:"mqttSources,omitempty"`
|
||||
LogLevel string `json:"logLevel,omitempty"`
|
||||
ChannelKeysPath string `json:"channelKeysPath,omitempty"`
|
||||
ChannelKeys map[string]string `json:"channelKeys,omitempty"`
|
||||
HashChannels []string `json:"hashChannels,omitempty"`
|
||||
Retention *RetentionConfig `json:"retention,omitempty"`
|
||||
}
|
||||
|
||||
// RetentionConfig controls how long stale nodes are kept before being moved to inactive_nodes.
|
||||
|
||||
+35
-5
@@ -512,34 +512,64 @@ func firstNonEmpty(vals ...string) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// deriveHashtagChannelKey derives an AES-128 key from a channel name.
|
||||
// Same algorithm as Node.js: SHA-256(channelName) → first 32 hex chars (16 bytes).
|
||||
func deriveHashtagChannelKey(channelName string) string {
|
||||
h := sha256.Sum256([]byte(channelName))
|
||||
return hex.EncodeToString(h[:16])
|
||||
}
|
||||
|
||||
// loadChannelKeys loads channel decryption keys from config and/or a JSON file.
|
||||
// Priority: CHANNEL_KEYS_PATH env var > cfg.ChannelKeysPath > channel-rainbow.json next to config.
|
||||
// Merge priority: rainbow (lowest) → derived from hashChannels → explicit config (highest).
|
||||
func loadChannelKeys(cfg *Config, configPath string) map[string]string {
|
||||
keys := make(map[string]string)
|
||||
|
||||
// Determine file path for rainbow keys
|
||||
// 1. Rainbow table keys (lowest priority)
|
||||
keysPath := os.Getenv("CHANNEL_KEYS_PATH")
|
||||
if keysPath == "" {
|
||||
keysPath = cfg.ChannelKeysPath
|
||||
}
|
||||
if keysPath == "" {
|
||||
// Default: look for channel-rainbow.json next to config file
|
||||
keysPath = filepath.Join(filepath.Dir(configPath), "channel-rainbow.json")
|
||||
}
|
||||
|
||||
rainbowCount := 0
|
||||
if data, err := os.ReadFile(keysPath); err == nil {
|
||||
var fileKeys map[string]string
|
||||
if err := json.Unmarshal(data, &fileKeys); err == nil {
|
||||
for k, v := range fileKeys {
|
||||
keys[k] = v
|
||||
}
|
||||
log.Printf("Loaded %d channel keys from %s", len(fileKeys), keysPath)
|
||||
rainbowCount = len(fileKeys)
|
||||
log.Printf("Loaded %d channel keys from %s", rainbowCount, keysPath)
|
||||
} else {
|
||||
log.Printf("Warning: failed to parse channel keys file %s: %v", keysPath, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Merge inline config keys (override file keys)
|
||||
// 2. Derived keys from hashChannels (middle priority)
|
||||
derivedCount := 0
|
||||
for _, raw := range cfg.HashChannels {
|
||||
trimmed := strings.TrimSpace(raw)
|
||||
if trimmed == "" {
|
||||
continue
|
||||
}
|
||||
channelName := trimmed
|
||||
if !strings.HasPrefix(channelName, "#") {
|
||||
channelName = "#" + channelName
|
||||
}
|
||||
// Skip if explicit config already has this key
|
||||
if _, exists := cfg.ChannelKeys[channelName]; exists {
|
||||
continue
|
||||
}
|
||||
keys[channelName] = deriveHashtagChannelKey(channelName)
|
||||
derivedCount++
|
||||
}
|
||||
if derivedCount > 0 {
|
||||
log.Printf("[channels] %d derived from hashChannels", derivedCount)
|
||||
}
|
||||
|
||||
// 3. Explicit config keys (highest priority — overrides rainbow + derived)
|
||||
for k, v := range cfg.ChannelKeys {
|
||||
keys[k] = v
|
||||
}
|
||||
|
||||
@@ -3,6 +3,8 @@ package main
|
||||
import (
|
||||
"encoding/json"
|
||||
"math"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
@@ -492,3 +494,132 @@ func TestAdvertRole(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeriveHashtagChannelKey(t *testing.T) {
|
||||
// Test vectors validated against Node.js server-helpers.js
|
||||
tests := []struct {
|
||||
name string
|
||||
want string
|
||||
}{
|
||||
{"#General", "649af2cab73ed5a890890a5485a0c004"},
|
||||
{"#test", "9cd8fcf22a47333b591d96a2b848b73f"},
|
||||
{"#MeshCore", "dcf73f393fa217f6b28fcec6ffc411ad"},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := deriveHashtagChannelKey(tt.name)
|
||||
if got != tt.want {
|
||||
t.Errorf("deriveHashtagChannelKey(%q) = %q, want %q", tt.name, got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Deterministic
|
||||
k1 := deriveHashtagChannelKey("#foo")
|
||||
k2 := deriveHashtagChannelKey("#foo")
|
||||
if k1 != k2 {
|
||||
t.Error("deriveHashtagChannelKey should be deterministic")
|
||||
}
|
||||
|
||||
// Returns 32-char hex string (16 bytes)
|
||||
if len(k1) != 32 {
|
||||
t.Errorf("key length = %d, want 32", len(k1))
|
||||
}
|
||||
|
||||
// Different inputs → different keys
|
||||
k3 := deriveHashtagChannelKey("#bar")
|
||||
if k1 == k3 {
|
||||
t.Error("different inputs should produce different keys")
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadChannelKeysMergePriority(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
cfgPath := filepath.Join(dir, "config.json")
|
||||
|
||||
// Create a rainbow file with two keys: #rainbow (unique) and #override (to be overridden)
|
||||
rainbowPath := filepath.Join(dir, "channel-rainbow.json")
|
||||
t.Setenv("CHANNEL_KEYS_PATH", rainbowPath)
|
||||
rainbow := map[string]string{
|
||||
"#rainbow": "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
|
||||
"#override": "rainbow_value_should_be_overridden",
|
||||
}
|
||||
rainbowJSON, err := json.Marshal(rainbow)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := os.WriteFile(rainbowPath, rainbowJSON, 0o644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
cfg := &Config{
|
||||
HashChannels: []string{"General", "#override"},
|
||||
ChannelKeys: map[string]string{"#override": "explicit_wins"},
|
||||
}
|
||||
|
||||
keys := loadChannelKeys(cfg, cfgPath)
|
||||
|
||||
// Rainbow key loaded
|
||||
if keys["#rainbow"] != "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" {
|
||||
t.Errorf("rainbow key missing or wrong: %q", keys["#rainbow"])
|
||||
}
|
||||
|
||||
// HashChannels derived #General
|
||||
expected := deriveHashtagChannelKey("#General")
|
||||
if keys["#General"] != expected {
|
||||
t.Errorf("#General = %q, want %q (derived)", keys["#General"], expected)
|
||||
}
|
||||
|
||||
// Explicit config wins over both rainbow and derived
|
||||
if keys["#override"] != "explicit_wins" {
|
||||
t.Errorf("#override = %q, want explicit_wins", keys["#override"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadChannelKeysHashChannelsNormalization(t *testing.T) {
|
||||
t.Setenv("CHANNEL_KEYS_PATH", "")
|
||||
dir := t.TempDir()
|
||||
cfgPath := filepath.Join(dir, "config.json")
|
||||
|
||||
cfg := &Config{
|
||||
HashChannels: []string{
|
||||
"NoPound", // should become #NoPound
|
||||
"#HasPound", // stays #HasPound
|
||||
" Spaced ", // trimmed → #Spaced
|
||||
"", // skipped
|
||||
},
|
||||
}
|
||||
|
||||
keys := loadChannelKeys(cfg, cfgPath)
|
||||
|
||||
if _, ok := keys["#NoPound"]; !ok {
|
||||
t.Error("should derive key for #NoPound (auto-prefixed)")
|
||||
}
|
||||
if _, ok := keys["#HasPound"]; !ok {
|
||||
t.Error("should derive key for #HasPound")
|
||||
}
|
||||
if _, ok := keys["#Spaced"]; !ok {
|
||||
t.Error("should derive key for #Spaced (trimmed)")
|
||||
}
|
||||
if len(keys) != 3 {
|
||||
t.Errorf("expected 3 keys, got %d", len(keys))
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadChannelKeysSkipExplicit(t *testing.T) {
|
||||
t.Setenv("CHANNEL_KEYS_PATH", "")
|
||||
dir := t.TempDir()
|
||||
cfgPath := filepath.Join(dir, "config.json")
|
||||
|
||||
cfg := &Config{
|
||||
HashChannels: []string{"General"},
|
||||
ChannelKeys: map[string]string{"#General": "my_explicit_key"},
|
||||
}
|
||||
|
||||
keys := loadChannelKeys(cfg, cfgPath)
|
||||
|
||||
// Explicit key should win — hashChannels derivation should be skipped
|
||||
if keys["#General"] != "my_explicit_key" {
|
||||
t.Errorf("#General = %q, want my_explicit_key", keys["#General"])
|
||||
}
|
||||
}
|
||||
|
||||
+55
-48
@@ -62,7 +62,7 @@ type StoreObs struct {
|
||||
type PacketStore struct {
|
||||
mu sync.RWMutex
|
||||
db *DB
|
||||
packets []*StoreTx // sorted by first_seen DESC
|
||||
packets []*StoreTx // sorted by first_seen ASC (oldest first; newest at tail)
|
||||
byHash map[string]*StoreTx // hash → *StoreTx
|
||||
byTxID map[int]*StoreTx // transmission_id → *StoreTx
|
||||
byObsID map[int]*StoreObs // observation_id → *StoreObs
|
||||
@@ -176,7 +176,7 @@ func (s *PacketStore) Load() error {
|
||||
FROM transmissions t
|
||||
LEFT JOIN observations o ON o.transmission_id = t.id
|
||||
LEFT JOIN observers obs ON obs.rowid = o.observer_idx
|
||||
ORDER BY t.first_seen DESC, o.timestamp DESC`
|
||||
ORDER BY t.first_seen ASC, o.timestamp DESC`
|
||||
} else {
|
||||
loadSQL = `SELECT t.id, t.raw_hex, t.hash, t.first_seen, t.route_type,
|
||||
t.payload_type, t.payload_version, t.decoded_json,
|
||||
@@ -184,7 +184,7 @@ func (s *PacketStore) Load() error {
|
||||
o.snr, o.rssi, o.score, o.path_json, o.timestamp
|
||||
FROM transmissions t
|
||||
LEFT JOIN observations o ON o.transmission_id = t.id
|
||||
ORDER BY t.first_seen DESC, o.timestamp DESC`
|
||||
ORDER BY t.first_seen ASC, o.timestamp DESC`
|
||||
}
|
||||
|
||||
rows, err := s.db.conn.Query(loadSQL)
|
||||
@@ -368,28 +368,32 @@ func (s *PacketStore) QueryPackets(q PacketQuery) *PacketResult {
|
||||
results := s.filterPackets(q)
|
||||
total := len(results)
|
||||
|
||||
if q.Order == "ASC" {
|
||||
sorted := make([]*StoreTx, len(results))
|
||||
copy(sorted, results)
|
||||
sort.Slice(sorted, func(i, j int) bool {
|
||||
return sorted[i].FirstSeen < sorted[j].FirstSeen
|
||||
})
|
||||
results = sorted
|
||||
}
|
||||
|
||||
// Paginate
|
||||
// results is oldest-first (ASC). For DESC (default) read backwards from the tail;
|
||||
// for ASC read forwards. Both are O(page_size) — no sort copy needed.
|
||||
start := q.Offset
|
||||
if start >= len(results) {
|
||||
if start >= total {
|
||||
return &PacketResult{Packets: []map[string]interface{}{}, Total: total}
|
||||
}
|
||||
end := start + q.Limit
|
||||
if end > len(results) {
|
||||
end = len(results)
|
||||
pageSize := q.Limit
|
||||
if start+pageSize > total {
|
||||
pageSize = total - start
|
||||
}
|
||||
|
||||
packets := make([]map[string]interface{}, 0, end-start)
|
||||
for _, tx := range results[start:end] {
|
||||
packets = append(packets, txToMap(tx))
|
||||
packets := make([]map[string]interface{}, 0, pageSize)
|
||||
if q.Order == "ASC" {
|
||||
for _, tx := range results[start : start+pageSize] {
|
||||
packets = append(packets, txToMap(tx))
|
||||
}
|
||||
} else {
|
||||
// DESC: newest items are at the tail; page 0 = last pageSize items reversed
|
||||
endIdx := total - start
|
||||
startIdx := endIdx - pageSize
|
||||
if startIdx < 0 {
|
||||
startIdx = 0
|
||||
}
|
||||
for i := endIdx - 1; i >= startIdx; i-- {
|
||||
packets = append(packets, txToMap(results[i]))
|
||||
}
|
||||
}
|
||||
return &PacketResult{Packets: packets, Total: total}
|
||||
}
|
||||
@@ -719,15 +723,16 @@ func (s *PacketStore) GetTimestamps(since string) []string {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
|
||||
// packets sorted newest first — scan from start until older than since
|
||||
// packets sorted oldest-first — scan from tail until we reach items older than since
|
||||
var result []string
|
||||
for _, tx := range s.packets {
|
||||
for i := len(s.packets) - 1; i >= 0; i-- {
|
||||
tx := s.packets[i]
|
||||
if tx.FirstSeen <= since {
|
||||
break
|
||||
}
|
||||
result = append(result, tx.FirstSeen)
|
||||
}
|
||||
// Reverse to get ASC order
|
||||
// result is currently newest-first; reverse to return ASC order
|
||||
for i, j := 0, len(result)-1; i < j; i, j = i+1, j-1 {
|
||||
result[i], result[j] = result[j], result[i]
|
||||
}
|
||||
@@ -777,23 +782,30 @@ func (s *PacketStore) QueryMultiNodePackets(pubkeys []string, limit, offset int,
|
||||
|
||||
total := len(filtered)
|
||||
|
||||
if order == "ASC" {
|
||||
sort.Slice(filtered, func(i, j int) bool {
|
||||
return filtered[i].FirstSeen < filtered[j].FirstSeen
|
||||
})
|
||||
}
|
||||
|
||||
// filtered is oldest-first (built by iterating s.packets forward).
|
||||
// Apply same DESC/ASC pagination logic as QueryPackets.
|
||||
if offset >= total {
|
||||
return &PacketResult{Packets: []map[string]interface{}{}, Total: total}
|
||||
}
|
||||
end := offset + limit
|
||||
if end > total {
|
||||
end = total
|
||||
pageSize := limit
|
||||
if offset+pageSize > total {
|
||||
pageSize = total - offset
|
||||
}
|
||||
|
||||
packets := make([]map[string]interface{}, 0, end-offset)
|
||||
for _, tx := range filtered[offset:end] {
|
||||
packets = append(packets, txToMap(tx))
|
||||
packets := make([]map[string]interface{}, 0, pageSize)
|
||||
if order == "ASC" {
|
||||
for _, tx := range filtered[offset : offset+pageSize] {
|
||||
packets = append(packets, txToMap(tx))
|
||||
}
|
||||
} else {
|
||||
endIdx := total - offset
|
||||
startIdx := endIdx - pageSize
|
||||
if startIdx < 0 {
|
||||
startIdx = 0
|
||||
}
|
||||
for i := endIdx - 1; i >= startIdx; i-- {
|
||||
packets = append(packets, txToMap(filtered[i]))
|
||||
}
|
||||
}
|
||||
return &PacketResult{Packets: packets, Total: total}
|
||||
}
|
||||
@@ -926,15 +938,14 @@ func (s *PacketStore) IngestNewFromDB(sinceID, limit int) ([]map[string]interfac
|
||||
DecodedJSON: r.decodedJSON,
|
||||
}
|
||||
s.byHash[r.hash] = tx
|
||||
// Prepend (newest first)
|
||||
s.packets = append([]*StoreTx{tx}, s.packets...)
|
||||
s.packets = append(s.packets, tx) // oldest-first; new items go to tail
|
||||
s.byTxID[r.txID] = tx
|
||||
s.indexByNode(tx)
|
||||
if tx.PayloadType != nil {
|
||||
pt := *tx.PayloadType
|
||||
// Prepend to maintain newest-first order (matches Load ordering)
|
||||
// Append to maintain oldest-first order (matches Load ordering)
|
||||
// so GetChannelMessages reverse iteration stays correct
|
||||
s.byPayloadType[pt] = append([]*StoreTx{tx}, s.byPayloadType[pt]...)
|
||||
s.byPayloadType[pt] = append(s.byPayloadType[pt], tx)
|
||||
}
|
||||
|
||||
if _, exists := broadcastTxs[r.txID]; !exists {
|
||||
@@ -1079,8 +1090,6 @@ func (s *PacketStore) IngestNewFromDB(sinceID, limit int) ([]map[string]interfac
|
||||
s.cacheMu.Unlock()
|
||||
}
|
||||
|
||||
log.Printf("[poller] IngestNewFromDB: found %d new txs, maxID %d->%d", len(result), sinceID, newMaxID)
|
||||
|
||||
return result, newMaxID
|
||||
}
|
||||
|
||||
@@ -1263,8 +1272,7 @@ func (s *PacketStore) IngestNewObservations(sinceObsID, limit int) int {
|
||||
s.subpathCache = make(map[string]*cachedResult)
|
||||
s.cacheMu.Unlock()
|
||||
|
||||
log.Printf("[poller] IngestNewObservations: updated %d existing txs, maxObsID %d->%d",
|
||||
len(updatedTxs), sinceObsID, newMaxObsID)
|
||||
// analytics caches cleared; no per-cycle log to avoid stdout overhead
|
||||
}
|
||||
|
||||
return newMaxObsID
|
||||
@@ -1888,7 +1896,7 @@ func (s *PacketStore) GetChannelMessages(channelHash string, limit, offset int)
|
||||
msgMap := map[string]*msgEntry{}
|
||||
var msgOrder []string
|
||||
|
||||
// Iterate type-5 packets oldest-first (byPayloadType is in load order = newest first)
|
||||
// Iterate type-5 packets oldest-first (byPayloadType is ASC = oldest first)
|
||||
type decodedMsg struct {
|
||||
Type string `json:"type"`
|
||||
Channel string `json:"channel"`
|
||||
@@ -1899,8 +1907,7 @@ func (s *PacketStore) GetChannelMessages(channelHash string, limit, offset int)
|
||||
}
|
||||
|
||||
grpTxts := s.byPayloadType[5]
|
||||
for i := len(grpTxts) - 1; i >= 0; i-- {
|
||||
tx := grpTxts[i]
|
||||
for _, tx := range grpTxts {
|
||||
if tx.DecodedJSON == "" {
|
||||
continue
|
||||
}
|
||||
@@ -4069,13 +4076,13 @@ func (s *PacketStore) GetNodeHealth(pubkey string) (map[string]interface{}, erro
|
||||
lhVal = lastHeard
|
||||
}
|
||||
|
||||
// Recent packets (up to 20, newest first — packets are already sorted DESC)
|
||||
// Recent packets (up to 20, newest first — read from tail of oldest-first slice)
|
||||
recentLimit := 20
|
||||
if len(packets) < recentLimit {
|
||||
recentLimit = len(packets)
|
||||
}
|
||||
recentPackets := make([]map[string]interface{}, 0, recentLimit)
|
||||
for i := 0; i < recentLimit; i++ {
|
||||
for i := len(packets) - 1; i >= len(packets)-recentLimit; i-- {
|
||||
p := txToMap(packets[i])
|
||||
delete(p, "observations")
|
||||
recentPackets = append(recentPackets, p)
|
||||
|
||||
+3
-1
@@ -1,5 +1,7 @@
|
||||
# Volume paths unified with manage.sh — see manage.sh lines 9-12, 56-68, 98-113
|
||||
# Volume paths unified with manage.sh — see manage.sh lines 9-12, 55-62, 920, 944
|
||||
# Override defaults via .env or environment variables.
|
||||
# CRITICAL: All data mounts use bind mounts (~/path), NOT named volumes.
|
||||
# This ensures the DB and theme are visible on the host filesystem for backup.
|
||||
|
||||
services:
|
||||
prod:
|
||||
|
||||
@@ -52,19 +52,12 @@ is_done() { [ -f "$STATE_FILE" ] && grep -qx "$1" "$STATE_FILE" 2>/dev/null;
|
||||
# ─── Helpers ──────────────────────────────────────────────────────────────
|
||||
|
||||
# Determine the correct data volume/mount args for docker run.
|
||||
# Detects existing host data directories and uses bind mounts if found.
|
||||
# Always uses bind mounts so the DB is visible on the filesystem.
|
||||
get_data_mount_args() {
|
||||
# Check for existing host data directories with a DB file
|
||||
if [ -d "$HOME/meshcore-data" ] && [ -f "$HOME/meshcore-data/meshcore.db" ]; then
|
||||
echo "-v $HOME/meshcore-data:/app/data"
|
||||
return
|
||||
fi
|
||||
if [ -d "$(pwd)/data" ] && [ -f "$(pwd)/data/meshcore.db" ]; then
|
||||
echo "-v $(pwd)/data:/app/data"
|
||||
return
|
||||
fi
|
||||
# Default: Docker named volume
|
||||
echo "-v ${DATA_VOLUME}:/app/data"
|
||||
# Always use bind mount (from .env or default)
|
||||
# Create the directory if it doesn't exist
|
||||
mkdir -p "$PROD_DATA"
|
||||
echo "-v $PROD_DATA:/app/data"
|
||||
}
|
||||
|
||||
# Determine the required port mappings from Caddyfile
|
||||
@@ -385,8 +378,8 @@ cmd_setup() {
|
||||
step 5 "Starting container"
|
||||
|
||||
# Detect existing data directories
|
||||
if [ -d "$HOME/meshcore-data" ] && [ -f "$HOME/meshcore-data/meshcore.db" ]; then
|
||||
info "Found existing data at \$HOME/meshcore-data/ — will use bind mount."
|
||||
if [ -d "$PROD_DATA" ] && [ -f "$PROD_DATA/meshcore.db" ]; then
|
||||
info "Found existing data at $PROD_DATA/ — will use bind mount."
|
||||
elif [ -d "$(pwd)/data" ] && [ -f "$(pwd)/data/meshcore.db" ]; then
|
||||
info "Found existing data at ./data/ — will use bind mount."
|
||||
fi
|
||||
@@ -924,7 +917,8 @@ cmd_backup() {
|
||||
info "Backing up to ${BACKUP_DIR}/"
|
||||
|
||||
# Database
|
||||
DB_PATH=$(docker volume inspect "$DATA_VOLUME" --format '{{ .Mountpoint }}' 2>/dev/null)/meshcore.db
|
||||
# Always use bind mount path (from .env or default)
|
||||
DB_PATH="$PROD_DATA/meshcore.db"
|
||||
if [ -f "$DB_PATH" ]; then
|
||||
cp "$DB_PATH" "$BACKUP_DIR/meshcore.db"
|
||||
log "Database ($(du -h "$BACKUP_DIR/meshcore.db" | cut -f1))"
|
||||
@@ -948,7 +942,8 @@ cmd_backup() {
|
||||
fi
|
||||
|
||||
# Theme
|
||||
THEME_PATH=$(docker volume inspect "$DATA_VOLUME" --format '{{ .Mountpoint }}' 2>/dev/null)/theme.json
|
||||
# Always use bind mount path (from .env or default)
|
||||
THEME_PATH="$PROD_DATA/theme.json"
|
||||
if [ -f "$THEME_PATH" ]; then
|
||||
cp "$THEME_PATH" "$BACKUP_DIR/theme.json"
|
||||
log "theme.json"
|
||||
@@ -1024,12 +1019,10 @@ cmd_restore() {
|
||||
docker stop "$CONTAINER_NAME" 2>/dev/null || true
|
||||
|
||||
# Restore database
|
||||
DEST_DB=$(docker volume inspect "$DATA_VOLUME" --format '{{ .Mountpoint }}' 2>/dev/null)/meshcore.db
|
||||
if [ -d "$(dirname "$DEST_DB")" ]; then
|
||||
cp "$DB_FILE" "$DEST_DB"
|
||||
else
|
||||
docker cp "$DB_FILE" "${CONTAINER_NAME}:/app/data/meshcore.db"
|
||||
fi
|
||||
# Always use bind mount path (from .env or default)
|
||||
mkdir -p "$PROD_DATA"
|
||||
DEST_DB="$PROD_DATA/meshcore.db"
|
||||
cp "$DB_FILE" "$DEST_DB"
|
||||
log "Database restored"
|
||||
|
||||
# Restore config if present
|
||||
@@ -1047,10 +1040,8 @@ cmd_restore() {
|
||||
|
||||
# Restore theme if present
|
||||
if [ -n "$THEME_FILE" ] && [ -f "$THEME_FILE" ]; then
|
||||
DEST_THEME=$(docker volume inspect "$DATA_VOLUME" --format '{{ .Mountpoint }}' 2>/dev/null)/theme.json
|
||||
if [ -d "$(dirname "$DEST_THEME")" ]; then
|
||||
cp "$THEME_FILE" "$DEST_THEME"
|
||||
fi
|
||||
DEST_THEME="$PROD_DATA/theme.json"
|
||||
cp "$THEME_FILE" "$DEST_THEME"
|
||||
log "theme.json restored"
|
||||
fi
|
||||
|
||||
@@ -1098,7 +1089,7 @@ cmd_reset() {
|
||||
rm -f "$STATE_FILE"
|
||||
|
||||
log "Reset complete. Run './manage.sh setup' to start over."
|
||||
echo " Data volume preserved. To delete it: docker volume rm ${DATA_VOLUME}"
|
||||
echo " Data directory: $PROD_DATA (not removed)"
|
||||
}
|
||||
|
||||
# ─── Help ─────────────────────────────────────────────────────────────────
|
||||
|
||||
Reference in New Issue
Block a user