feat(#690): expose observer skew + per-hash evidence in clock UI (#906)

## Summary

UI completion of #690 — surfaces observer clock skew and per-hash
evidence that the backend already computes but wasn't exposed in the
frontend.

**Not related to #845/PR #894** (bimodal detection) — this is the UI
surface for the original #690 scope.

## Changes

### Backend: per-hash evidence in node clock-skew API (commit 1)
- Extended `GET /api/nodes/{pubkey}/clock-skew` to return
`recentHashEvidence` (most recent 10 hashes with per-observer
raw/corrected skew and observer offset) and `calibrationSummary`
(total/calibrated/uncalibrated counts).
- Evidence is cached during `ClockSkewEngine.Recompute()` — route
handler is cheap.
- Fleet endpoint omits evidence to keep payload small.

### Frontend: observer list page — clock offset column (commit 2)
- Added "Clock Offset" column to observers table.
- Fetches `/api/observers/clock-skew` once on page load, joins by
ObserverID.
- Color-coded severity badge + sample count tooltip.
- Singleton observers show "—" not "0".

### Frontend: observer-detail clock card (commit 3)
- Added clock offset card mirroring node clock card style.
- Shows: offset value, sample count, severity badge.
- Inline explainer describing how offset is computed from multi-observer
packets.

### Frontend: node clock card evidence panel (commit 4)
- Collapsible "Evidence" section in existing node clock skew card.
- Per-hash breakdown: observer count, median corrected skew,
per-observer raw/corrected/offset.
- Calibration summary line and plain-English severity reason at top.

## Test Results

```
go test ./... (cmd/server) — PASS (19.3s)
go test ./... (cmd/ingestor) — PASS (31.6s)
Frontend helpers: 610 passed, 0 failed
```

New test: `TestNodeClockSkew_EvidencePayload` — 3-observer scenario
verifying per-hash array shape, corrected = raw + offset math, and
median.

No frontend JS smoke test added — no existing test harness for
clock/observer rendering. Noted for future.

## Screenshots

Screenshots TBD

## Perf justification

Evidence is computed inside the existing `Recompute()` cycle (already
O(n) on samples). The `hashEvidence` map adds ~32 bytes per sample of
memory. Evidence is stripped from fleet responses. Per-node endpoint
returns at most 10 evidence entries — bounded payload.

---------

Co-authored-by: you <you@example.com>
This commit is contained in:
Kpa-clawbot
2026-05-02 10:30:54 -07:00
committed by GitHub
parent c67f3347ce
commit b47587f031
6 changed files with 307 additions and 12 deletions
+123 -4
View File
@@ -120,6 +120,8 @@ type NodeClockSkew struct {
GoodFraction float64 `json:"goodFraction"` // fraction of recent samples with |skew| <= 1h
RecentBadSampleCount int `json:"recentBadSampleCount"` // count of recent samples with |skew| > 1h
RecentSampleCount int `json:"recentSampleCount"` // total recent samples in window
RecentHashEvidence []HashEvidence `json:"recentHashEvidence,omitempty"`
CalibrationSummary *CalibrationSummary `json:"calibrationSummary,omitempty"`
NodeName string `json:"nodeName,omitempty"` // populated in fleet responses
NodeRole string `json:"nodeRole,omitempty"` // populated in fleet responses
}
@@ -130,6 +132,31 @@ type SkewSample struct {
SkewSec float64 `json:"skew"` // corrected skew in seconds
}
// HashEvidenceObserver is one observer's contribution to a per-hash evidence entry.
type HashEvidenceObserver struct {
ObserverID string `json:"observerID"`
ObserverName string `json:"observerName"`
RawSkewSec float64 `json:"rawSkewSec"`
CorrectedSkewSec float64 `json:"correctedSkewSec"`
ObserverOffsetSec float64 `json:"observerOffsetSec"`
Calibrated bool `json:"calibrated"`
}
// HashEvidence is per-hash clock skew evidence showing individual observer contributions.
type HashEvidence struct {
Hash string `json:"hash"`
Observers []HashEvidenceObserver `json:"observers"`
MedianCorrectedSkewSec float64 `json:"medianCorrectedSkewSec"`
Timestamp int64 `json:"timestamp"`
}
// CalibrationSummary counts how many samples were corrected via observer calibration.
type CalibrationSummary struct {
TotalSamples int `json:"totalSamples"`
CalibratedSamples int `json:"calibratedSamples"`
UncalibratedSamples int `json:"uncalibratedSamples"`
}
// txSkewResult maps tx hash → per-transmission skew stats. This is an
// intermediate result keyed by hash (not pubkey); the store maps hash → pubkey
// when building the final per-node view.
@@ -143,15 +170,27 @@ type ClockSkewEngine struct {
observerOffsets map[string]float64 // observerID → calibrated offset (seconds)
observerSamples map[string]int // observerID → number of multi-observer packets used
nodeSkew txSkewResult
hashEvidence map[string][]hashEvidenceEntry // hash → per-observer raw/corrected data
lastComputed time.Time
computeInterval time.Duration
}
// hashEvidenceEntry stores raw evidence per observer per hash, cached during Recompute.
type hashEvidenceEntry struct {
observerID string
rawSkew float64
corrected float64
offset float64
calibrated bool
observedTS int64
}
func NewClockSkewEngine() *ClockSkewEngine {
return &ClockSkewEngine{
observerOffsets: make(map[string]float64),
observerSamples: make(map[string]int),
nodeSkew: make(txSkewResult),
hashEvidence: make(map[string][]hashEvidenceEntry),
computeInterval: 30 * time.Second,
}
}
@@ -176,14 +215,16 @@ func (e *ClockSkewEngine) Recompute(store *PacketStore) {
var newOffsets map[string]float64
var newSamples map[string]int
var newNodeSkew txSkewResult
var newHashEvidence map[string][]hashEvidenceEntry
if len(samples) > 0 {
newOffsets, newSamples = calibrateObservers(samples)
newNodeSkew = computeNodeSkew(samples, newOffsets)
newNodeSkew, newHashEvidence = computeNodeSkew(samples, newOffsets)
} else {
newOffsets = make(map[string]float64)
newSamples = make(map[string]int)
newNodeSkew = make(txSkewResult)
newHashEvidence = make(map[string][]hashEvidenceEntry)
}
// Swap results under brief write lock.
@@ -196,6 +237,7 @@ func (e *ClockSkewEngine) Recompute(store *PacketStore) {
e.observerOffsets = newOffsets
e.observerSamples = newSamples
e.nodeSkew = newNodeSkew
e.hashEvidence = newHashEvidence
e.lastComputed = time.Now()
e.mu.Unlock()
}
@@ -332,7 +374,7 @@ func calibrateObservers(samples []skewSample) (map[string]float64, map[string]in
// ── Phase 3: Per-Node Skew ─────────────────────────────────────────────────────
// computeNodeSkew calculates corrected skew statistics for each node.
func computeNodeSkew(samples []skewSample, obsOffsets map[string]float64) txSkewResult {
func computeNodeSkew(samples []skewSample, obsOffsets map[string]float64) (txSkewResult, map[string][]hashEvidenceEntry) {
// Compute corrected skew per sample, grouped by hash (each hash = one
// node's advert transmission). The caller maps hash → pubkey via byNode.
type correctedSample struct {
@@ -343,6 +385,7 @@ func computeNodeSkew(samples []skewSample, obsOffsets map[string]float64) txSkew
byHash := make(map[string][]correctedSample)
hashAdvertTS := make(map[string]int64)
evidence := make(map[string][]hashEvidenceEntry) // hash → per-observer evidence
for _, s := range samples {
obsOffset, hasCal := obsOffsets[s.observerID]
@@ -359,6 +402,14 @@ func computeNodeSkew(samples []skewSample, obsOffsets map[string]float64) txSkew
calibrated: hasCal,
})
hashAdvertTS[s.hash] = s.advertTS
evidence[s.hash] = append(evidence[s.hash], hashEvidenceEntry{
observerID: s.observerID,
rawSkew: round(rawSkew, 1),
corrected: round(corrected, 1),
offset: round(obsOffset, 1),
calibrated: hasCal,
observedTS: s.observedTS,
})
}
// Each hash represents one advert from one node. Compute median corrected
@@ -397,7 +448,7 @@ func computeNodeSkew(samples []skewSample, obsOffsets map[string]float64) txSkew
LastObservedTS: latestObsTS,
}
}
return result
return result, evidence
}
// ── Integration with PacketStore ───────────────────────────────────────────────
@@ -558,6 +609,70 @@ func (s *PacketStore) getNodeClockSkewLocked(pubkey string) *NodeClockSkew {
samples[i] = SkewSample{Timestamp: p.ts, SkewSec: round(p.skew, 1)}
}
// Build per-hash evidence (most recent 10 hashes with ≥1 observer).
// Observer name lookup from store observations.
obsNameMap := make(map[string]string)
type hashMeta struct {
hash string
ts int64
}
var evidenceHashes []hashMeta
for _, tx := range txs {
if tx.PayloadType == nil || *tx.PayloadType != PayloadADVERT {
continue
}
ev, ok := s.clockSkew.hashEvidence[tx.Hash]
if !ok || len(ev) == 0 {
continue
}
// Collect observer names from tx observations.
for _, obs := range tx.Observations {
if obs.ObserverID != "" && obs.ObserverName != "" {
obsNameMap[obs.ObserverID] = obs.ObserverName
}
}
evidenceHashes = append(evidenceHashes, hashMeta{hash: tx.Hash, ts: ev[0].observedTS})
}
// Sort by timestamp descending, take most recent 10.
sort.Slice(evidenceHashes, func(i, j int) bool { return evidenceHashes[i].ts > evidenceHashes[j].ts })
if len(evidenceHashes) > 10 {
evidenceHashes = evidenceHashes[:10]
}
var recentEvidence []HashEvidence
var calSummary CalibrationSummary
for _, eh := range evidenceHashes {
entries := s.clockSkew.hashEvidence[eh.hash]
var observers []HashEvidenceObserver
var corrSkews []float64
for _, e := range entries {
name := obsNameMap[e.observerID]
if name == "" {
name = e.observerID
}
observers = append(observers, HashEvidenceObserver{
ObserverID: e.observerID,
ObserverName: name,
RawSkewSec: e.rawSkew,
CorrectedSkewSec: e.corrected,
ObserverOffsetSec: e.offset,
Calibrated: e.calibrated,
})
corrSkews = append(corrSkews, e.corrected)
calSummary.TotalSamples++
if e.calibrated {
calSummary.CalibratedSamples++
} else {
calSummary.UncalibratedSamples++
}
}
recentEvidence = append(recentEvidence, HashEvidence{
Hash: eh.hash,
Observers: observers,
MedianCorrectedSkewSec: round(median(corrSkews), 1),
Timestamp: eh.ts,
})
}
return &NodeClockSkew{
Pubkey: pubkey,
MeanSkewSec: round(meanSkew, 1),
@@ -574,6 +689,8 @@ func (s *PacketStore) getNodeClockSkewLocked(pubkey string) *NodeClockSkew {
GoodFraction: round(goodFraction, 2),
RecentBadSampleCount: recentBadCount,
RecentSampleCount: recentSampleCount,
RecentHashEvidence: recentEvidence,
CalibrationSummary: &calSummary,
}
}
@@ -601,8 +718,10 @@ func (s *PacketStore) GetFleetClockSkew() []*NodeClockSkew {
cs.NodeName = ni.Name
cs.NodeRole = ni.Role
}
// Omit samples in fleet response (too much data).
// Omit samples and evidence in fleet response (too much data).
cs.Samples = nil
cs.RecentHashEvidence = nil
cs.CalibrationSummary = nil
results = append(results, cs)
}
return results
+103 -2
View File
@@ -191,7 +191,7 @@ func TestComputeNodeSkew_BasicCorrection(t *testing.T) {
// So the median approach finds obs2 is +5 ahead (relative to median)
// Now compute node skew with those offsets:
nodeSkew := computeNodeSkew(samples, offsets)
nodeSkew, _ := computeNodeSkew(samples, offsets)
cs, ok := nodeSkew["h1"]
if !ok {
t.Fatal("expected skew data for hash h1")
@@ -220,7 +220,7 @@ func TestComputeNodeSkew_ThreeObservers(t *testing.T) {
t.Errorf("obs3 offset = %v, want 30", offsets["obs3"])
}
nodeSkew := computeNodeSkew(samples, offsets)
nodeSkew, _ := computeNodeSkew(samples, offsets)
cs := nodeSkew["h1"]
if cs == nil {
t.Fatal("expected skew data for h1")
@@ -954,3 +954,104 @@ func TestAllGood_OK_845(t *testing.T) {
t.Errorf("recentBadSampleCount = %v, want 0", r.RecentBadSampleCount)
}
}
func TestNodeClockSkew_EvidencePayload(t *testing.T) {
// 3-observer scenario: obs1 ahead by +2s, obs2 on time, obs3 behind by -1s.
// Node clock is 60s ahead. Raw skew = advertTS - obsTS.
// Hash has 3 observations, each observer sees same advert.
ps := NewPacketStore(nil, nil)
pt := 4 // ADVERT
// Advert timestamp: 1700000060 (node 60s ahead of true time 1700000000)
// obs1 sees at 1700000002 (2s ahead of true time) → raw = 60 - 2 = 58
// obs2 sees at 1700000000 (on time) → raw = 60 - 0 = 60
// obs3 sees at 1699999999 (-1s, behind) → raw = 60 + 1 = 61
// Median obsTS = 1700000000, so:
// obs1 offset = 1700000002 - 1700000000 = +2
// obs2 offset = 0
// obs3 offset = 1699999999 - 1700000000 = -1
// Corrected: raw + offset → obs1: 58+2=60, obs2: 60+0=60, obs3: 61+(-1)=60
tx1 := &StoreTx{
Hash: "evidence_hash_1",
PayloadType: &pt,
DecodedJSON: `{"payload":{"timestamp":1700000060}}`,
Observations: []*StoreObs{
{ObserverID: "obs1", ObserverName: "Observer Alpha", Timestamp: "2023-11-14T22:13:22Z"},
{ObserverID: "obs2", ObserverName: "Observer Beta", Timestamp: "2023-11-14T22:13:20Z"},
{ObserverID: "obs3", ObserverName: "Observer Gamma", Timestamp: "2023-11-14T22:13:19Z"},
},
}
// Second hash to ensure we get multiple evidence entries.
tx2 := &StoreTx{
Hash: "evidence_hash_2",
PayloadType: &pt,
DecodedJSON: `{"payload":{"timestamp":1700003660}}`,
Observations: []*StoreObs{
{ObserverID: "obs1", ObserverName: "Observer Alpha", Timestamp: "2023-11-14T23:13:22Z"},
{ObserverID: "obs2", ObserverName: "Observer Beta", Timestamp: "2023-11-14T23:13:20Z"},
{ObserverID: "obs3", ObserverName: "Observer Gamma", Timestamp: "2023-11-14T23:13:19Z"},
},
}
ps.mu.Lock()
ps.byNode["NODETEST"] = []*StoreTx{tx1, tx2}
ps.byPayloadType[4] = []*StoreTx{tx1, tx2}
ps.clockSkew.computeInterval = 0
ps.mu.Unlock()
r := ps.GetNodeClockSkew("NODETEST")
if r == nil {
t.Fatal("expected clock skew result")
}
// Check recentHashEvidence exists.
if len(r.RecentHashEvidence) == 0 {
t.Fatal("expected recentHashEvidence to be populated")
}
if len(r.RecentHashEvidence) != 2 {
t.Errorf("recentHashEvidence length = %d, want 2", len(r.RecentHashEvidence))
}
// Check first evidence entry has 3 observers.
ev := r.RecentHashEvidence[0]
if len(ev.Observers) != 3 {
t.Fatalf("evidence observers = %d, want 3", len(ev.Observers))
}
// Verify corrected = raw + offset for each observer.
for _, o := range ev.Observers {
expected := o.RawSkewSec + o.ObserverOffsetSec
if math.Abs(o.CorrectedSkewSec-expected) > 0.2 {
t.Errorf("observer %s: corrected=%.1f, expected raw(%.1f)+offset(%.1f)=%.1f",
o.ObserverID, o.CorrectedSkewSec, o.RawSkewSec, o.ObserverOffsetSec, expected)
}
}
// All corrected values should be ~60s (node is 60s ahead).
if math.Abs(ev.MedianCorrectedSkewSec-60) > 1 {
t.Errorf("median corrected = %.1f, want ~60", ev.MedianCorrectedSkewSec)
}
// Check calibration summary.
if r.CalibrationSummary == nil {
t.Fatal("expected calibrationSummary")
}
if r.CalibrationSummary.TotalSamples != 6 { // 3 observers × 2 hashes
t.Errorf("calibration total = %d, want 6", r.CalibrationSummary.TotalSamples)
}
if r.CalibrationSummary.CalibratedSamples != 6 {
t.Errorf("calibrated = %d, want 6 (all multi-observer)", r.CalibrationSummary.CalibratedSamples)
}
// Check observer names are populated.
nameFound := false
for _, o := range ev.Observers {
if o.ObserverName == "Observer Alpha" || o.ObserverName == "Observer Beta" {
nameFound = true
}
}
if !nameFound {
t.Error("expected observer names to be populated from tx observations")
}
}
+37 -1
View File
@@ -815,6 +815,41 @@
* Shared between the full-screen detail page and the side panel (#813, #690).
* No-op if the container is missing, the API errors, or the response lacks severity.
*/
/** Build collapsible evidence panel for node clock skew card */
function buildEvidencePanel(cs) {
var evidence = cs.recentHashEvidence;
if (!evidence || evidence.length === 0) return '';
var calSum = cs.calibrationSummary || {};
var calLine = calSum.totalSamples
? '<div style="font-size:11px;color:var(--text-muted);margin-bottom:6px">Last ' + calSum.totalSamples + ' samples: ' + (calSum.calibratedSamples || 0) + ' corrected via observer calibration, ' + (calSum.uncalibratedSamples || 0) + ' uncorrected (single-observer).</div>'
: '';
// Severity reason.
var skewVal = window.currentSkewValue(cs);
var sampleCount = (cs.samples || []).length;
var sevLabel = SKEW_SEVERITY_LABELS[cs.severity] || cs.severity;
var reasonLine = '<div style="font-size:12px;margin-bottom:8px"><strong>Recent ' + sampleCount + ' adverts median ' + formatSkew(skewVal) + ' → ' + sevLabel + '</strong></div>';
var hashBlocks = evidence.map(function(ev) {
var shortHash = (ev.hash || '').substring(0, 8) + '…';
var obsCount = ev.observers ? ev.observers.length : 0;
var header = '<div style="font-weight:600;font-size:12px;margin-top:6px">Hash ' + shortHash + ' · ' + obsCount + ' observer' + (obsCount !== 1 ? 's' : '') + ' · median corrected: ' + formatSkew(ev.medianCorrectedSkewSec) + '</div>';
var lines = (ev.observers || []).map(function(o) {
var name = o.observerName || o.observerID;
return '<div style="font-size:11px;padding-left:16px;font-family:var(--mono)">' +
name + ' raw=' + formatSkew(o.rawSkewSec) + ' corrected=' + formatSkew(o.correctedSkewSec) + ' (observer offset ' + formatSkew(o.observerOffsetSec) + ')' +
'</div>';
}).join('');
return header + lines;
}).join('');
return '<details style="margin-top:10px"><summary style="cursor:pointer;font-size:12px;color:var(--text-muted)">Evidence (' + evidence.length + ' hashes)</summary>' +
'<div style="margin-top:6px;padding:8px;background:var(--bg-secondary);border-radius:6px">' +
reasonLine + calLine + hashBlocks +
'</div></details>';
}
async function loadClockSkewInto(container, pubkey) {
if (!container) return;
try {
@@ -841,7 +876,8 @@
'</div>' +
driftHtml +
(sparkHtml ? '<div class="skew-sparkline-wrap" style="margin-top:8px">' + sparkHtml + '<div style="font-size:10px;color:var(--text-muted)">Skew over time (' + (cs.samples || []).length + ' samples)</div></div>' : '') +
bimodalWarning;
bimodalWarning +
buildEvidencePanel(cs);
} catch (e) {
// Non-fatal — section stays hidden
}
+21 -3
View File
@@ -70,18 +70,24 @@
try {
destroyCharts();
chartDefaults();
const [obs, analytics] = await Promise.all([
const [obs, analytics, obsSkewArr] = await Promise.all([
api('/observers/' + encodeURIComponent(currentId)),
api('/observers/' + encodeURIComponent(currentId) + '/analytics?days=' + currentDays),
api('/observers/clock-skew', { ttl: 30000 }).catch(function() { return []; }),
]);
renderDetail(obs, analytics);
// Find this observer's calibration data.
var obsSkew = null;
(Array.isArray(obsSkewArr) ? obsSkewArr : []).forEach(function(s) {
if (s && s.observerID === currentId) obsSkew = s;
});
renderDetail(obs, analytics, obsSkew);
} catch (e) {
document.getElementById('obsDetailContent').innerHTML =
'<div class="text-muted" style="padding:40px">Error: ' + e.message + '</div>';
}
}
function renderDetail(obs, analytics) {
function renderDetail(obs, analytics, obsSkew) {
const el = document.getElementById('obsDetailContent');
if (!el) return;
@@ -154,6 +160,18 @@
<div class="mono" style="font-size:0.75em;color:var(--text-muted);margin-bottom:20px;word-break:break-all">
ID: ${obs.id}
</div>
${obsSkew && obsSkew.samples > 0 ? `
<div class="node-full-card skew-detail-section" style="margin-bottom:20px;padding:12px">
<h4 style="margin:0 0 6px"> Clock Offset</h4>
<div style="display:flex;align-items:center;gap:12px;flex-wrap:wrap">
<span style="font-size:18px;font-weight:700;font-family:var(--mono)">${formatSkew(obsSkew.offsetSec)}</span>
${renderSkewBadge(observerSkewSeverity(obsSkew.offsetSec), obsSkew.offsetSec)}
<span class="text-muted" style="font-size:12px">${obsSkew.samples} sample${obsSkew.samples !== 1 ? 's' : ''}</span>
</div>
<div style="font-size:12px;color:var(--text-muted);margin-top:8px;max-width:600px">
<strong>How this is computed:</strong> when this observer and another observer see the same packet, we compare their receive timestamps. The median deviation across all multi-observer packets is this observer's offset.
</div>
</div>` : ''}
<div class="obs-charts" style="display:grid;grid-template-columns:repeat(auto-fit,minmax(400px,1fr));gap:16px">
<div class="chart-card" style="padding:12px">
<h3 style="margin:0 0 8px;font-size:0.95em">Packets Over Time</h3>
+17 -2
View File
@@ -3,6 +3,7 @@
(function () {
let observers = [];
let obsSkewMap = {}; // observerID → {offsetSec, samples}
let wsHandler = null;
let refreshTimer = null;
let regionChangeHandler = null;
@@ -51,12 +52,20 @@
if (regionChangeHandler) RegionFilter.offChange(regionChangeHandler);
regionChangeHandler = null;
observers = [];
obsSkewMap = {};
}
async function loadObservers() {
try {
const data = await api('/observers', { ttl: CLIENT_TTL.observers });
const [data, skewData] = await Promise.all([
api('/observers', { ttl: CLIENT_TTL.observers }),
api('/observers/clock-skew', { ttl: 30000 }).catch(function() { return []; })
]);
observers = data.observers || [];
obsSkewMap = {};
(Array.isArray(skewData) ? skewData : []).forEach(function(s) {
if (s && s.observerID) obsSkewMap[s.observerID] = s;
});
render();
} catch (e) {
document.getElementById('obsContent').innerHTML =
@@ -124,7 +133,7 @@
<caption class="sr-only">Observer status and statistics</caption>
<thead><tr>
<th scope="col">Status</th><th scope="col">Name</th><th scope="col">Region</th><th scope="col">Last Seen</th>
<th scope="col">Packets</th><th scope="col">Packets/Hour</th><th scope="col">Uptime</th>
<th scope="col">Packets</th><th scope="col">Packets/Hour</th><th scope="col">Clock Offset</th><th scope="col">Uptime</th>
</tr></thead>
<tbody>${filtered.map(o => {
const h = healthStatus(o.last_seen);
@@ -136,6 +145,12 @@
<td>${timeAgo(o.last_seen)}</td>
<td>${(o.packet_count || 0).toLocaleString()}</td>
<td>${sparkBar(o.packetsLastHour || 0, maxPktsHr)}</td>
<td>${(function() {
var sk = obsSkewMap[o.id];
if (!sk || sk.samples == null || sk.samples === 0) return '<span class="text-muted">—</span>';
var sev = observerSkewSeverity(sk.offsetSec);
return renderSkewBadge(sev, sk.offsetSec) + ' <span class="text-muted" title="Computed from ' + sk.samples + ' multi-observer packets. Positive = observer ahead of consensus.">(' + sk.samples + ')</span>';
})()}</td>
<td>${uptimeStr(o.first_seen)}</td>
</tr>`;
}).join('')}</tbody>
+6
View File
@@ -455,6 +455,12 @@
return '<span class="' + cls + '" title="Clock skew: ' + window.formatSkew(skewSec) + ' (' + (SKEW_SEVERITY_LABELS[severity] || severity) + ')">' + label + '</span>';
};
/** Compute severity for an observer's clock offset (seconds). */
window.observerSkewSeverity = function(offsetSec) {
var abs = Math.abs(offsetSec);
return abs >= 3600 ? 'critical' : abs >= 300 ? 'warning' : 'ok';
};
/** Render a skew sparkline SVG (inline, word-sized) */
window.renderSkewSparkline = function(samples, w, h) {
w = w || 120; h = h || 24;