mirror of
https://github.com/Kpa-clawbot/meshcore-analyzer.git
synced 2026-04-23 00:15:51 +00:00
## Summary Adds two config knobs for controlling backfill scope and neighbor graph data retention, plus removes the dead synchronous backfill function. ## Changes ### Config knobs #### `resolvedPath.backfillHours` (default: 24) Controls how far back (in hours) the async backfill scans for observations with NULL `resolved_path`. Transmissions with `first_seen` older than this window are skipped, reducing startup time for instances with large historical datasets. #### `neighborGraph.maxAgeDays` (default: 30) Controls the maximum age of `neighbor_edges` entries. Edges with `last_seen` older than this are pruned from both SQLite and the in-memory graph. Pruning runs on startup (after a 4-minute stagger) and every 24 hours thereafter. ### Dead code removal - Removed the synchronous `backfillResolvedPaths` function that was replaced by the async version. ### Implementation details - `backfillResolvedPathsAsync` now accepts a `backfillHours` parameter and filters by `tx.FirstSeen` - `NeighborGraph.PruneOlderThan(cutoff)` removes stale edges from the in-memory graph - `PruneNeighborEdges(conn, graph, maxAgeDays)` prunes both DB and in-memory graph - Periodic pruning ticker follows the same pattern as metrics pruning (24h interval, staggered start) - Graceful shutdown stops the edge prune ticker ### Config example Both knobs added to `config.example.json` with `_comment` fields. ## Tests - Config default/override tests for both knobs - `TestGraphPruneOlderThan` — in-memory edge pruning - `TestPruneNeighborEdgesDB` — SQLite + in-memory pruning together - `TestBackfillRespectsHourWindow` — verifies old transmissions are excluded by backfill window --------- Co-authored-by: you <you@example.com>
133 lines
3.6 KiB
Go
133 lines
3.6 KiB
Go
package main
|
|
|
|
import (
|
|
"encoding/json"
|
|
"net/http"
|
|
"net/http/httptest"
|
|
"testing"
|
|
"time"
|
|
|
|
"github.com/gorilla/mux"
|
|
)
|
|
|
|
// TestBackfillAsyncChunked verifies that backfillResolvedPathsAsync processes
|
|
// observations in chunks, yields between batches, and sets the completion flag.
|
|
func TestBackfillAsyncChunked(t *testing.T) {
|
|
store := &PacketStore{
|
|
packets: make([]*StoreTx, 0),
|
|
byHash: make(map[string]*StoreTx),
|
|
byTxID: make(map[int]*StoreTx),
|
|
byObsID: make(map[int]*StoreObs),
|
|
}
|
|
|
|
// No pending observations → should complete immediately.
|
|
backfillResolvedPathsAsync(store, "", 100, time.Millisecond, 24)
|
|
if !store.backfillComplete.Load() {
|
|
t.Fatal("expected backfillComplete to be true with empty store")
|
|
}
|
|
}
|
|
|
|
// TestBackfillStatusHeader verifies the X-CoreScope-Status header is set correctly.
|
|
func TestBackfillStatusHeader(t *testing.T) {
|
|
store := &PacketStore{
|
|
packets: make([]*StoreTx, 0),
|
|
byHash: make(map[string]*StoreTx),
|
|
byTxID: make(map[int]*StoreTx),
|
|
byObsID: make(map[int]*StoreObs),
|
|
}
|
|
|
|
srv := &Server{store: store}
|
|
|
|
handler := srv.backfillStatusMiddleware(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
w.WriteHeader(200)
|
|
}))
|
|
|
|
// Before backfill completes → backfilling
|
|
req := httptest.NewRequest("GET", "/api/stats", nil)
|
|
rec := httptest.NewRecorder()
|
|
handler.ServeHTTP(rec, req)
|
|
if got := rec.Header().Get("X-CoreScope-Status"); got != "backfilling" {
|
|
t.Fatalf("expected 'backfilling', got %q", got)
|
|
}
|
|
|
|
// After backfill completes → ready
|
|
store.backfillComplete.Store(true)
|
|
rec = httptest.NewRecorder()
|
|
handler.ServeHTTP(rec, req)
|
|
if got := rec.Header().Get("X-CoreScope-Status"); got != "ready" {
|
|
t.Fatalf("expected 'ready', got %q", got)
|
|
}
|
|
}
|
|
|
|
// TestStatsBackfillFields verifies /api/stats includes backfill fields.
|
|
func TestStatsBackfillFields(t *testing.T) {
|
|
db := setupTestDBv2(t)
|
|
defer db.Close()
|
|
seedV2Data(t, db)
|
|
|
|
store := &PacketStore{
|
|
db: db,
|
|
packets: make([]*StoreTx, 0),
|
|
byHash: make(map[string]*StoreTx),
|
|
byTxID: make(map[int]*StoreTx),
|
|
byObsID: make(map[int]*StoreObs),
|
|
loaded: true,
|
|
}
|
|
|
|
cfg := &Config{Port: 0}
|
|
hub := NewHub()
|
|
srv := NewServer(db, cfg, hub)
|
|
srv.store = store
|
|
|
|
router := mux.NewRouter()
|
|
srv.RegisterRoutes(router)
|
|
|
|
// While backfilling
|
|
req := httptest.NewRequest("GET", "/api/stats", nil)
|
|
rec := httptest.NewRecorder()
|
|
router.ServeHTTP(rec, req)
|
|
|
|
var resp map[string]interface{}
|
|
if err := json.Unmarshal(rec.Body.Bytes(), &resp); err != nil {
|
|
t.Fatalf("failed to parse stats response: %v", err)
|
|
}
|
|
|
|
if backfilling, ok := resp["backfilling"]; !ok {
|
|
t.Fatal("missing 'backfilling' field in stats response")
|
|
} else if backfilling != true {
|
|
t.Fatalf("expected backfilling=true, got %v", backfilling)
|
|
}
|
|
|
|
if _, ok := resp["backfillProgress"]; !ok {
|
|
t.Fatal("missing 'backfillProgress' field in stats response")
|
|
}
|
|
|
|
// Check header
|
|
if got := rec.Header().Get("X-CoreScope-Status"); got != "backfilling" {
|
|
t.Fatalf("expected X-CoreScope-Status=backfilling, got %q", got)
|
|
}
|
|
|
|
// After backfill completes
|
|
store.backfillComplete.Store(true)
|
|
// Invalidate stats cache
|
|
srv.statsMu.Lock()
|
|
srv.statsCache = nil
|
|
srv.statsMu.Unlock()
|
|
|
|
rec = httptest.NewRecorder()
|
|
router.ServeHTTP(rec, req)
|
|
|
|
resp = nil
|
|
if err := json.Unmarshal(rec.Body.Bytes(), &resp); err != nil {
|
|
t.Fatalf("failed to parse stats response: %v", err)
|
|
}
|
|
|
|
if backfilling, ok := resp["backfilling"]; !ok || backfilling != false {
|
|
t.Fatalf("expected backfilling=false after completion, got %v", backfilling)
|
|
}
|
|
|
|
if got := rec.Header().Get("X-CoreScope-Status"); got != "ready" {
|
|
t.Fatalf("expected X-CoreScope-Status=ready, got %q", got)
|
|
}
|
|
}
|