mirror of
https://github.com/Kpa-clawbot/meshcore-analyzer.git
synced 2026-03-30 14:45:52 +00:00
Modularize audio: engine + swappable voice modules
audio.js is now the core engine (context, routing, voice mgmt).
Voice modules register via MeshAudio.registerVoice(name, module).
Each module exports { name, play(ctx, master, parsed, opts) }.
Voice selector dropdown appears in audio controls.
Voices persist in localStorage. Adding a new voice = new file +
script tag. Previous voices are never lost.
v1 "constellation" extracted as audio-v1-constellation.js.
This commit is contained in:
126
public/audio-v1-constellation.js
Normal file
126
public/audio-v1-constellation.js
Normal file
@@ -0,0 +1,126 @@
|
||||
// Voice v1: "Constellation" — melodic packet sonification
|
||||
// Original voice: type-based instruments, scale-quantized melody from payload bytes,
|
||||
// byte-driven note duration and spacing, hop-based filter, observation chord voicing.
|
||||
|
||||
(function () {
|
||||
'use strict';
|
||||
|
||||
const { buildScale, midiToFreq, mapRange, quantizeToScale } = MeshAudio.helpers;
|
||||
|
||||
// Scales per payload type
|
||||
const SCALES = {
|
||||
ADVERT: buildScale([0, 2, 4, 7, 9], 48), // C major pentatonic
|
||||
GRP_TXT: buildScale([0, 3, 5, 7, 10], 45), // A minor pentatonic
|
||||
TXT_MSG: buildScale([0, 2, 3, 5, 7, 8, 10], 40),// E natural minor
|
||||
TRACE: buildScale([0, 2, 4, 6, 8, 10], 50), // D whole tone
|
||||
};
|
||||
const DEFAULT_SCALE = SCALES.ADVERT;
|
||||
|
||||
// Synth ADSR envelopes per type
|
||||
const SYNTHS = {
|
||||
ADVERT: { type: 'triangle', attack: 0.02, decay: 0.3, sustain: 0.4, release: 0.5 },
|
||||
GRP_TXT: { type: 'sine', attack: 0.005, decay: 0.15, sustain: 0.1, release: 0.2 },
|
||||
TXT_MSG: { type: 'triangle', attack: 0.01, decay: 0.2, sustain: 0.3, release: 0.4 },
|
||||
TRACE: { type: 'sine', attack: 0.05, decay: 0.4, sustain: 0.5, release: 0.8 },
|
||||
};
|
||||
const DEFAULT_SYNTH = SYNTHS.ADVERT;
|
||||
|
||||
function play(audioCtx, masterGain, parsed, opts) {
|
||||
const { payloadBytes, typeName, hopCount, obsCount, payload, hops } = parsed;
|
||||
const tm = opts.tempoMultiplier;
|
||||
|
||||
const scale = SCALES[typeName] || DEFAULT_SCALE;
|
||||
const synthConfig = SYNTHS[typeName] || DEFAULT_SYNTH;
|
||||
|
||||
// Sample sqrt(len) bytes evenly
|
||||
const noteCount = Math.max(2, Math.min(10, Math.ceil(Math.sqrt(payloadBytes.length))));
|
||||
const sampledBytes = [];
|
||||
for (let i = 0; i < noteCount; i++) {
|
||||
const idx = Math.floor((i / noteCount) * payloadBytes.length);
|
||||
sampledBytes.push(payloadBytes[idx]);
|
||||
}
|
||||
|
||||
// Pan from longitude
|
||||
let panValue = 0;
|
||||
if (payload.lat !== undefined && payload.lon !== undefined) {
|
||||
panValue = Math.max(-1, Math.min(1, mapRange(payload.lon, -125, -65, -1, 1)));
|
||||
} else if (hops.length > 0) {
|
||||
panValue = (Math.random() - 0.5) * 0.6;
|
||||
}
|
||||
|
||||
// Filter from hops
|
||||
const filterFreq = mapRange(Math.min(hopCount, 10), 1, 10, 8000, 800);
|
||||
|
||||
// Volume from observations
|
||||
const volume = Math.min(0.5, 0.15 + (obsCount - 1) * 0.03);
|
||||
const voiceCount = Math.min(obsCount, 4);
|
||||
|
||||
// Audio nodes
|
||||
const filter = audioCtx.createBiquadFilter();
|
||||
filter.type = 'lowpass';
|
||||
filter.frequency.value = filterFreq;
|
||||
filter.Q.value = 1;
|
||||
|
||||
const panner = audioCtx.createStereoPanner();
|
||||
panner.pan.value = panValue;
|
||||
|
||||
filter.connect(panner);
|
||||
panner.connect(masterGain);
|
||||
|
||||
let timeOffset = audioCtx.currentTime + 0.01;
|
||||
let lastNoteEnd = timeOffset;
|
||||
|
||||
for (let i = 0; i < sampledBytes.length; i++) {
|
||||
const byte = sampledBytes[i];
|
||||
const freq = midiToFreq(quantizeToScale(byte, scale));
|
||||
const duration = mapRange(byte, 0, 255, 0.05, 0.4) * tm;
|
||||
|
||||
let gap = 0.05 * tm;
|
||||
if (i < sampledBytes.length - 1) {
|
||||
const delta = Math.abs(sampledBytes[i + 1] - byte);
|
||||
gap = mapRange(delta, 0, 255, 0.03, 0.3) * tm;
|
||||
}
|
||||
|
||||
const noteStart = timeOffset;
|
||||
const noteEnd = noteStart + duration;
|
||||
|
||||
for (let v = 0; v < voiceCount; v++) {
|
||||
const detune = v === 0 ? 0 : (v % 2 === 0 ? 1 : -1) * (v * 7);
|
||||
const osc = audioCtx.createOscillator();
|
||||
const envGain = audioCtx.createGain();
|
||||
|
||||
osc.type = synthConfig.type;
|
||||
osc.frequency.value = freq;
|
||||
osc.detune.value = detune;
|
||||
|
||||
const { attack: a, decay: d, sustain: s, release: r } = synthConfig;
|
||||
const voiceVol = volume / voiceCount;
|
||||
|
||||
envGain.gain.setValueAtTime(0, noteStart);
|
||||
envGain.gain.linearRampToValueAtTime(voiceVol, noteStart + a);
|
||||
envGain.gain.linearRampToValueAtTime(voiceVol * s, noteStart + a + d);
|
||||
envGain.gain.setValueAtTime(voiceVol * s, noteEnd);
|
||||
envGain.gain.linearRampToValueAtTime(0.001, noteEnd + r);
|
||||
|
||||
osc.connect(envGain);
|
||||
envGain.connect(filter);
|
||||
osc.start(noteStart);
|
||||
osc.stop(noteEnd + r + 0.01);
|
||||
osc.onended = () => { osc.disconnect(); envGain.disconnect(); };
|
||||
}
|
||||
|
||||
timeOffset = noteEnd + gap;
|
||||
lastNoteEnd = noteEnd + (synthConfig.release || 0.2);
|
||||
}
|
||||
|
||||
// Cleanup shared nodes
|
||||
const cleanupMs = (lastNoteEnd - audioCtx.currentTime + 0.5) * 1000;
|
||||
setTimeout(() => {
|
||||
try { filter.disconnect(); panner.disconnect(); } catch (e) {}
|
||||
}, cleanupMs);
|
||||
|
||||
return lastNoteEnd - audioCtx.currentTime;
|
||||
}
|
||||
|
||||
MeshAudio.registerVoice('constellation', { name: 'constellation', play });
|
||||
})();
|
||||
293
public/audio.js
293
public/audio.js
@@ -1,47 +1,22 @@
|
||||
// Mesh Audio Sonification — public/audio.js
|
||||
// Turns raw packet bytes into generative music per AUDIO-PLAN.md
|
||||
// Mesh Audio Engine — public/audio.js
|
||||
// Core audio infrastructure + swappable voice modules
|
||||
// Each voice module is a separate file (audio-v1.js, audio-v2.js, etc.)
|
||||
|
||||
(function () {
|
||||
'use strict';
|
||||
|
||||
// === State ===
|
||||
// === Engine State ===
|
||||
let audioEnabled = false;
|
||||
let audioCtx = null;
|
||||
let masterGain = null;
|
||||
let bpm = 120; // default BPM
|
||||
let bpm = 120;
|
||||
let activeVoices = 0;
|
||||
const MAX_VOICES = 12;
|
||||
let currentVoice = null; // active voice module
|
||||
|
||||
// === Scales (MIDI note offsets from root) ===
|
||||
// Pentatonic / modal scales across 2-3 octaves
|
||||
const SCALES = {
|
||||
// C major pentatonic: C D E G A (repeated across octaves)
|
||||
ADVERT: buildScale([0, 2, 4, 7, 9], 48), // root C3
|
||||
// A minor pentatonic: A C D E G
|
||||
GRP_TXT: buildScale([0, 3, 5, 7, 10], 45), // root A2
|
||||
// E natural minor: E F# G A B C D
|
||||
TXT_MSG: buildScale([0, 2, 3, 5, 7, 8, 10], 40), // root E2
|
||||
// D whole tone: D E F# G# A# C
|
||||
TRACE: buildScale([0, 2, 4, 6, 8, 10], 50), // root D3
|
||||
};
|
||||
|
||||
// Fallback scale for unknown types
|
||||
const DEFAULT_SCALE = SCALES.ADVERT;
|
||||
|
||||
// === Synth configs per type ===
|
||||
const SYNTH_CONFIGS = {
|
||||
ADVERT: { type: 'triangle', attack: 0.02, decay: 0.3, sustain: 0.4, release: 0.5 }, // bell/pad
|
||||
GRP_TXT: { type: 'sine', attack: 0.005, decay: 0.15, sustain: 0.1, release: 0.2 }, // marimba/pluck
|
||||
TXT_MSG: { type: 'triangle', attack: 0.01, decay: 0.2, sustain: 0.3, release: 0.4 }, // piano-like
|
||||
TRACE: { type: 'sine', attack: 0.05, decay: 0.4, sustain: 0.5, release: 0.8 }, // ethereal
|
||||
};
|
||||
|
||||
const DEFAULT_SYNTH = SYNTH_CONFIGS.ADVERT;
|
||||
|
||||
// === Helpers ===
|
||||
// === Shared Helpers (available to voice modules) ===
|
||||
|
||||
function buildScale(intervals, rootMidi) {
|
||||
// Build scale across 3 octaves
|
||||
const notes = [];
|
||||
for (let oct = 0; oct < 3; oct++) {
|
||||
for (const interval of intervals) {
|
||||
@@ -60,180 +35,105 @@
|
||||
}
|
||||
|
||||
function quantizeToScale(byteVal, scale) {
|
||||
// Map 0-255 to scale index
|
||||
const idx = Math.floor((byteVal / 256) * scale.length);
|
||||
return scale[Math.min(idx, scale.length - 1)];
|
||||
}
|
||||
|
||||
function tempoMultiplier() {
|
||||
// 120 BPM = 1.0x, higher = faster (shorter durations)
|
||||
return 120 / bpm;
|
||||
}
|
||||
|
||||
// === Core: Initialize audio context ===
|
||||
function parsePacketBytes(pkt) {
|
||||
const rawHex = pkt.raw || pkt.raw_hex || (pkt.packet && pkt.packet.raw_hex) || '';
|
||||
if (!rawHex || rawHex.length < 6) return null;
|
||||
const allBytes = [];
|
||||
for (let i = 0; i < rawHex.length; i += 2) {
|
||||
const b = parseInt(rawHex.slice(i, i + 2), 16);
|
||||
if (!isNaN(b)) allBytes.push(b);
|
||||
}
|
||||
if (allBytes.length < 3) return null;
|
||||
|
||||
const decoded = pkt.decoded || {};
|
||||
const header = decoded.header || {};
|
||||
const payload = decoded.payload || {};
|
||||
const hops = decoded.path?.hops || [];
|
||||
|
||||
return {
|
||||
allBytes,
|
||||
headerBytes: allBytes.slice(0, 3),
|
||||
payloadBytes: allBytes.slice(3),
|
||||
typeName: header.payloadTypeName || 'UNKNOWN',
|
||||
hopCount: Math.max(1, hops.length),
|
||||
obsCount: pkt.observation_count || (pkt.packet && pkt.packet.observation_count) || 1,
|
||||
payload,
|
||||
hops,
|
||||
};
|
||||
}
|
||||
|
||||
// === Engine: Init ===
|
||||
|
||||
function initAudio() {
|
||||
if (audioCtx) return;
|
||||
if (audioCtx) {
|
||||
if (audioCtx.state === 'suspended') audioCtx.resume();
|
||||
return;
|
||||
}
|
||||
audioCtx = new (window.AudioContext || window.webkitAudioContext)();
|
||||
masterGain = audioCtx.createGain();
|
||||
masterGain.gain.value = 0.3;
|
||||
masterGain.connect(audioCtx.destination);
|
||||
}
|
||||
|
||||
// === Core: Sonify a single packet ===
|
||||
// === Engine: Sonify ===
|
||||
|
||||
function sonifyPacket(pkt) {
|
||||
if (!audioEnabled || !audioCtx) return;
|
||||
if (activeVoices >= MAX_VOICES) return; // voice stealing: just drop
|
||||
if (!audioEnabled || !audioCtx || !currentVoice) return;
|
||||
if (audioCtx.state === 'suspended') audioCtx.resume();
|
||||
if (activeVoices >= MAX_VOICES) return;
|
||||
|
||||
const rawHex = pkt.raw || pkt.raw_hex || (pkt.packet && pkt.packet.raw_hex) || '';
|
||||
if (!rawHex || rawHex.length < 6) return; // need at least 3 bytes
|
||||
|
||||
// Parse raw hex to byte array
|
||||
const allBytes = [];
|
||||
for (let i = 0; i < rawHex.length; i += 2) {
|
||||
const b = parseInt(rawHex.slice(i, i + 2), 16);
|
||||
if (!isNaN(b)) allBytes.push(b);
|
||||
}
|
||||
if (allBytes.length < 3) return;
|
||||
|
||||
// Header = first 3 bytes (configure voice), payload = rest
|
||||
const payloadBytes = allBytes.slice(3);
|
||||
if (payloadBytes.length === 0) return;
|
||||
|
||||
// Extract musical parameters from pkt
|
||||
const decoded = pkt.decoded || {};
|
||||
const header = decoded.header || {};
|
||||
const typeName = header.payloadTypeName || 'UNKNOWN';
|
||||
const hops = decoded.path?.hops || [];
|
||||
const hopCount = Math.max(1, hops.length);
|
||||
const obsCount = pkt.observation_count || (pkt.packet && pkt.packet.observation_count) || 1;
|
||||
|
||||
// Select scale and synth config
|
||||
const scale = SCALES[typeName] || DEFAULT_SCALE;
|
||||
const synthConfig = SYNTH_CONFIGS[typeName] || DEFAULT_SYNTH;
|
||||
|
||||
// Sample sqrt(payload_length) bytes evenly across payload
|
||||
const noteCount = Math.max(2, Math.min(10, Math.ceil(Math.sqrt(payloadBytes.length))));
|
||||
const sampledBytes = [];
|
||||
for (let i = 0; i < noteCount; i++) {
|
||||
const idx = Math.floor((i / noteCount) * payloadBytes.length);
|
||||
sampledBytes.push(payloadBytes[idx]);
|
||||
}
|
||||
|
||||
// Compute pan from origin longitude if available
|
||||
const payload = decoded.payload || {};
|
||||
let panValue = 0; // center default
|
||||
if (payload.lat !== undefined && payload.lon !== undefined) {
|
||||
// Map typical mesh longitude range (-125 to -65 for US) to -1..1
|
||||
panValue = mapRange(payload.lon, -125, -65, -1, 1);
|
||||
panValue = Math.max(-1, Math.min(1, panValue));
|
||||
} else if (hops.length > 0) {
|
||||
// Try first hop's position if available (node markers have lat/lon)
|
||||
// Fall back to slight random pan for spatial interest
|
||||
panValue = (Math.random() - 0.5) * 0.6;
|
||||
}
|
||||
|
||||
// Filter cutoff from hop count: few hops = bright (8000Hz), many = muffled (800Hz)
|
||||
const filterFreq = mapRange(Math.min(hopCount, 10), 1, 10, 8000, 800);
|
||||
|
||||
// Volume from observation count: 1 obs = base, more = louder (capped)
|
||||
const baseVolume = 0.15;
|
||||
const volume = Math.min(0.5, baseVolume + (obsCount - 1) * 0.03);
|
||||
|
||||
// Detune cents for chord voicing (observation > 1)
|
||||
const voiceCount = Math.min(obsCount, 4); // max 4 stacked voices
|
||||
|
||||
// Schedule the note sequence
|
||||
const tm = tempoMultiplier();
|
||||
let timeOffset = audioCtx.currentTime + 0.01; // tiny offset to avoid clicks
|
||||
const parsed = parsePacketBytes(pkt);
|
||||
if (!parsed || parsed.payloadBytes.length === 0) return;
|
||||
|
||||
activeVoices++;
|
||||
|
||||
// Create shared filter
|
||||
const filter = audioCtx.createBiquadFilter();
|
||||
filter.type = 'lowpass';
|
||||
filter.frequency.value = filterFreq;
|
||||
filter.Q.value = 1;
|
||||
try {
|
||||
const duration = currentVoice.play(audioCtx, masterGain, parsed, {
|
||||
bpm, tempoMultiplier: tempoMultiplier(),
|
||||
});
|
||||
|
||||
// Create panner
|
||||
const panner = audioCtx.createStereoPanner();
|
||||
panner.pan.value = panValue;
|
||||
|
||||
// Chain: voices → filter → panner → master
|
||||
filter.connect(panner);
|
||||
panner.connect(masterGain);
|
||||
|
||||
let lastNoteEnd = timeOffset;
|
||||
|
||||
for (let i = 0; i < sampledBytes.length; i++) {
|
||||
const byte = sampledBytes[i];
|
||||
const midiNote = quantizeToScale(byte, scale);
|
||||
const freq = midiToFreq(midiNote);
|
||||
|
||||
// Duration from byte value: low = staccato (50ms), high = sustained (400ms)
|
||||
const duration = mapRange(byte, 0, 255, 0.05, 0.4) * tm;
|
||||
|
||||
// Spacing from delta to next byte
|
||||
let gap = 0.05 * tm; // minimum gap
|
||||
if (i < sampledBytes.length - 1) {
|
||||
const delta = Math.abs(sampledBytes[i + 1] - byte);
|
||||
gap = mapRange(delta, 0, 255, 0.03, 0.3) * tm;
|
||||
}
|
||||
|
||||
const noteStart = timeOffset;
|
||||
const noteEnd = noteStart + duration;
|
||||
|
||||
// Play note (with optional chord voicing)
|
||||
for (let v = 0; v < voiceCount; v++) {
|
||||
const detune = v === 0 ? 0 : (v % 2 === 0 ? 1 : -1) * (v * 7); // ±7, ±14 cents
|
||||
|
||||
const osc = audioCtx.createOscillator();
|
||||
const envGain = audioCtx.createGain();
|
||||
|
||||
osc.type = synthConfig.type;
|
||||
osc.frequency.value = freq;
|
||||
osc.detune.value = detune;
|
||||
|
||||
// ADSR envelope
|
||||
const a = synthConfig.attack;
|
||||
const d = synthConfig.decay;
|
||||
const s = synthConfig.sustain;
|
||||
const r = synthConfig.release;
|
||||
const voiceVol = volume / voiceCount; // split volume across voices
|
||||
|
||||
envGain.gain.setValueAtTime(0, noteStart);
|
||||
envGain.gain.linearRampToValueAtTime(voiceVol, noteStart + a);
|
||||
envGain.gain.linearRampToValueAtTime(voiceVol * s, noteStart + a + d);
|
||||
envGain.gain.setValueAtTime(voiceVol * s, noteEnd);
|
||||
envGain.gain.linearRampToValueAtTime(0.001, noteEnd + r);
|
||||
|
||||
osc.connect(envGain);
|
||||
envGain.connect(filter);
|
||||
|
||||
osc.start(noteStart);
|
||||
osc.stop(noteEnd + r + 0.01);
|
||||
|
||||
// Cleanup
|
||||
osc.onended = () => {
|
||||
osc.disconnect();
|
||||
envGain.disconnect();
|
||||
};
|
||||
}
|
||||
|
||||
timeOffset = noteEnd + gap;
|
||||
lastNoteEnd = noteEnd + (synthConfig.release || 0.2);
|
||||
}
|
||||
|
||||
// Release voice slot after all notes finish
|
||||
const totalDuration = (lastNoteEnd - audioCtx.currentTime + 0.5) * 1000;
|
||||
setTimeout(() => {
|
||||
// Release voice slot after estimated duration
|
||||
const releaseMs = (duration || 3) * 1000 + 500;
|
||||
setTimeout(() => { activeVoices = Math.max(0, activeVoices - 1); }, releaseMs);
|
||||
} catch (e) {
|
||||
activeVoices = Math.max(0, activeVoices - 1);
|
||||
try {
|
||||
filter.disconnect();
|
||||
panner.disconnect();
|
||||
} catch (e) {}
|
||||
}, totalDuration);
|
||||
console.error('[audio] voice error:', e);
|
||||
}
|
||||
}
|
||||
|
||||
// === Voice Registration ===
|
||||
|
||||
function registerVoice(name, voiceModule) {
|
||||
// voiceModule must have: { name, play(audioCtx, masterGain, parsed, opts) → durationSec }
|
||||
if (!window._meshAudioVoices) window._meshAudioVoices = {};
|
||||
window._meshAudioVoices[name] = voiceModule;
|
||||
// Auto-select first registered voice if none active
|
||||
if (!currentVoice) currentVoice = voiceModule;
|
||||
}
|
||||
|
||||
function setVoice(name) {
|
||||
if (window._meshAudioVoices && window._meshAudioVoices[name]) {
|
||||
currentVoice = window._meshAudioVoices[name];
|
||||
localStorage.setItem('live-audio-voice', name);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
function getVoiceName() {
|
||||
return currentVoice ? currentVoice.name : null;
|
||||
}
|
||||
|
||||
function getVoiceNames() {
|
||||
return Object.keys(window._meshAudioVoices || {});
|
||||
}
|
||||
|
||||
// === Public API ===
|
||||
@@ -244,29 +144,22 @@
|
||||
localStorage.setItem('live-audio-enabled', on);
|
||||
}
|
||||
|
||||
function isEnabled() {
|
||||
return audioEnabled;
|
||||
}
|
||||
function isEnabled() { return audioEnabled; }
|
||||
|
||||
function setBPM(val) {
|
||||
bpm = Math.max(40, Math.min(300, val));
|
||||
localStorage.setItem('live-audio-bpm', bpm);
|
||||
}
|
||||
|
||||
function getBPM() {
|
||||
return bpm;
|
||||
}
|
||||
function getBPM() { return bpm; }
|
||||
|
||||
function setVolume(val) {
|
||||
if (masterGain) masterGain.gain.value = Math.max(0, Math.min(1, val));
|
||||
localStorage.setItem('live-audio-volume', val);
|
||||
}
|
||||
|
||||
function getVolume() {
|
||||
return masterGain ? masterGain.gain.value : 0.3;
|
||||
}
|
||||
function getVolume() { return masterGain ? masterGain.gain.value : 0.3; }
|
||||
|
||||
// Restore from localStorage
|
||||
function restore() {
|
||||
const saved = localStorage.getItem('live-audio-enabled');
|
||||
if (saved === 'true') audioEnabled = true;
|
||||
@@ -277,17 +170,19 @@
|
||||
initAudio();
|
||||
if (masterGain) masterGain.gain.value = parseFloat(savedVol) || 0.3;
|
||||
}
|
||||
const savedVoice = localStorage.getItem('live-audio-voice');
|
||||
if (savedVoice) setVoice(savedVoice);
|
||||
}
|
||||
|
||||
// Export
|
||||
// Export engine + helpers for voice modules
|
||||
window.MeshAudio = {
|
||||
sonifyPacket,
|
||||
setEnabled,
|
||||
isEnabled,
|
||||
setBPM,
|
||||
getBPM,
|
||||
setVolume,
|
||||
getVolume,
|
||||
setEnabled, isEnabled,
|
||||
setBPM, getBPM,
|
||||
setVolume, getVolume,
|
||||
registerVoice, setVoice, getVoiceName, getVoiceNames,
|
||||
restore,
|
||||
// Helpers for voice modules
|
||||
helpers: { buildScale, midiToFreq, mapRange, quantizeToScale },
|
||||
};
|
||||
})();
|
||||
|
||||
@@ -22,7 +22,7 @@
|
||||
<meta name="twitter:title" content="MeshCore Analyzer">
|
||||
<meta name="twitter:description" content="Real-time MeshCore LoRa mesh network analyzer — live packet visualization, node tracking, channel decryption, and route analysis.">
|
||||
<meta name="twitter:image" content="https://raw.githubusercontent.com/Kpa-clawbot/meshcore-analyzer/master/public/og-image.png">
|
||||
<link rel="stylesheet" href="style.css?v=1774172118">
|
||||
<link rel="stylesheet" href="style.css?v=1774199178">
|
||||
<link rel="stylesheet" href="home.css">
|
||||
<link rel="stylesheet" href="live.css?v=1774058575">
|
||||
<link rel="stylesheet" href="https://unpkg.com/leaflet@1.9.4/dist/leaflet.css"
|
||||
@@ -90,8 +90,9 @@
|
||||
<script src="nodes.js?v=1774126708" onerror="console.error('Failed to load:', this.src)"></script>
|
||||
<script src="traces.js?v=1774135052" onerror="console.error('Failed to load:', this.src)"></script>
|
||||
<script src="analytics.js?v=1774126708" onerror="console.error('Failed to load:', this.src)"></script>
|
||||
<script src="audio.js?v=1774172118" onerror="console.error('Failed to load:', this.src)"></script>
|
||||
<script src="live.js?v=1774172118" onerror="console.error('Failed to load:', this.src)"></script>
|
||||
<script src="audio.js?v=1774199178" onerror="console.error('Failed to load:', this.src)"></script>
|
||||
<script src="audio-v1-constellation.js?v=1774199178" onerror="console.error('Failed to load:', this.src)"></script>
|
||||
<script src="live.js?v=1774199178" onerror="console.error('Failed to load:', this.src)"></script>
|
||||
<script src="observers.js?v=1774290000" onerror="console.error('Failed to load:', this.src)"></script>
|
||||
<script src="observer-detail.js?v=1774028201" onerror="console.error('Failed to load:', this.src)"></script>
|
||||
<script src="node-analytics.js?v=1774126708" onerror="console.error('Failed to load:', this.src)"></script>
|
||||
|
||||
@@ -645,6 +645,7 @@
|
||||
<span id="favDesc" class="sr-only">Show only favorited and claimed nodes</span>
|
||||
</div>
|
||||
<div class="audio-controls hidden" id="audioControls">
|
||||
<label class="audio-slider-label">Voice <select id="audioVoiceSelect" class="audio-voice-select"></select></label>
|
||||
<label class="audio-slider-label">BPM <input type="range" id="audioBpmSlider" min="40" max="300" value="120" class="audio-slider"><span id="audioBpmVal">120</span></label>
|
||||
<label class="audio-slider-label">Vol <input type="range" id="audioVolSlider" min="0" max="100" value="30" class="audio-slider"><span id="audioVolVal">30</span></label>
|
||||
</div>
|
||||
@@ -847,6 +848,17 @@
|
||||
bpmVal.textContent = MeshAudio.getBPM();
|
||||
volSlider.value = Math.round(MeshAudio.getVolume() * 100);
|
||||
volVal.textContent = Math.round(MeshAudio.getVolume() * 100);
|
||||
|
||||
// Populate voice selector
|
||||
const voiceSelect = document.getElementById('audioVoiceSelect');
|
||||
const voices = MeshAudio.getVoiceNames();
|
||||
voices.forEach(v => {
|
||||
const opt = document.createElement('option');
|
||||
opt.value = v; opt.textContent = v;
|
||||
voiceSelect.appendChild(opt);
|
||||
});
|
||||
voiceSelect.value = MeshAudio.getVoiceName() || voices[0] || '';
|
||||
voiceSelect.addEventListener('change', (e) => MeshAudio.setVoice(e.target.value));
|
||||
}
|
||||
|
||||
audioToggle.addEventListener('change', (e) => {
|
||||
|
||||
@@ -1624,3 +1624,19 @@ tr[data-hops]:hover { background: rgba(59,130,246,0.1); }
|
||||
.matrix-theme .audio-controls label,
|
||||
.matrix-theme .audio-controls span { color: #00ff41 !important; }
|
||||
.matrix-theme .audio-slider { accent-color: #00ff41; }
|
||||
|
||||
/* Audio voice selector */
|
||||
.audio-voice-select {
|
||||
background: var(--bg-secondary, #1f2937);
|
||||
color: var(--text-primary, #e5e7eb);
|
||||
border: 1px solid var(--border, #374151);
|
||||
border-radius: 4px;
|
||||
padding: 2px 4px;
|
||||
font-size: 11px;
|
||||
cursor: pointer;
|
||||
}
|
||||
.matrix-theme .audio-voice-select {
|
||||
background: #001a00 !important;
|
||||
color: #00ff41 !important;
|
||||
border-color: #00ff4130 !important;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user