chore: bump version to 4.4.0; refactor announce limits and add new community interfaces management

This commit is contained in:
Ivan
2026-03-24 00:40:08 +03:00
parent cfcc78868c
commit daf48ec406
24 changed files with 865 additions and 211 deletions
+1 -1
View File
@@ -1,3 +1,3 @@
"""Reticulum MeshChatX - A mesh network communications app."""
__version__ = "4.3.1"
__version__ = "4.4.0"
+171 -36
View File
@@ -5,6 +5,7 @@ import asyncio
import atexit
import base64
import configparser
import contextlib
import copy
import gc
import hashlib
@@ -72,7 +73,9 @@ from meshchatx.src.backend.meshchat_utils import (
convert_db_favourite_to_dict,
convert_propagation_node_state_to_string,
has_attachments,
hex_identifier_to_bytes,
message_fields_have_attachments,
normalize_hex_identifier,
parse_bool_query_param,
parse_lxmf_display_name,
parse_lxmf_propagation_node_app_data,
@@ -1564,6 +1567,24 @@ class ReticulumMeshChat:
)
]
def _default_announce_fetch_limit(self, aspect):
ctx = self.current_context
if not ctx or not ctx.config:
return 500
keys = {
"lxmf.delivery": ctx.config.announce_fetch_limit_lxmf_delivery,
"nomadnetwork.node": ctx.config.announce_fetch_limit_nomadnetwork_node,
"lxmf.propagation": ctx.config.announce_fetch_limit_lxmf_propagation,
"lxst.telephony": ctx.config.announce_fetch_limit_lxmf_delivery,
}
cfg = keys.get(aspect)
if cfg is None:
return 500
v = cfg.get()
if v is None or v < 1:
return 500
return min(int(v), 100_000)
def get_lxst_version(self) -> str:
return self.get_package_version("lxst", getattr(LXST, "__version__", "unknown"))
@@ -2327,10 +2348,24 @@ class ReticulumMeshChat:
# web server has shutdown, likely ctrl+c, but if we don't do the following, the script never exits
async def shutdown(self, app):
# stop page nodes before tearing down reticulum
if hasattr(self, "page_node_manager"):
self.page_node_manager.teardown()
for identity_hash in list(self.contexts.keys()):
ctx = self.contexts.get(identity_hash)
if ctx is None:
continue
try:
ctx.teardown()
except Exception: # noqa: S110
pass
self.contexts.clear()
self.current_context = None
if hasattr(self, "_health_monitor") and self._health_monitor is not None:
with contextlib.suppress(Exception):
self._health_monitor.stop()
# force close websocket clients
for websocket_client in self.websocket_clients:
try:
@@ -3206,10 +3241,58 @@ class ReticulumMeshChat:
InterfaceEditor.update_value(interface_details, data, "kiss_framing")
InterfaceEditor.update_value(interface_details, data, "i2p_tunneled")
if interface_type == "BackboneInterface":
remote = data.get("remote") or data.get("target_host")
if remote is None or str(remote).strip() == "":
return web.json_response(
{
"message": "Remote host is required",
},
status=422,
)
interface_target_port = data.get("target_port")
if interface_target_port is None or interface_target_port == "":
return web.json_response(
{
"message": "Target Port is required",
},
status=422,
)
transport_identity = data.get("transport_identity")
if transport_identity is None or str(transport_identity).strip() == "":
return web.json_response(
{
"message": "Transport identity is required",
},
status=422,
)
interface_details["remote"] = str(remote).strip()
interface_details["target_port"] = interface_target_port
interface_details["transport_identity"] = str(
transport_identity
).strip()
# handle I2P interface
if interface_type == "I2PInterface":
interface_details["connectable"] = "True"
InterfaceEditor.update_value(interface_details, data, "peers")
peers = data.get("peers")
cleaned_peers: list[str] = []
if isinstance(peers, list):
cleaned_peers = [str(p).strip() for p in peers if str(p).strip()]
elif peers is not None and str(peers).strip() != "":
cleaned_peers = [
s.strip()
for s in str(peers).replace(",", " ").split()
if s.strip()
]
if not cleaned_peers:
return web.json_response(
{
"message": "At least one I2P peer is required",
},
status=422,
)
interface_details["peers"] = cleaned_peers
# handle tcp server interface
if interface_type == "TCPServerInterface":
@@ -4794,6 +4877,13 @@ class ReticulumMeshChat:
whitelist_patterns,
blacklist_patterns,
)
max_disc = 500
if self.current_context and self.current_context.config:
mv = self.current_context.config.discovered_interfaces_max_return.get()
if mv is not None and mv > 0:
max_disc = min(int(mv), 50_000)
if len(interfaces) > max_disc:
interfaces = interfaces[:max_disc]
active = []
try:
if hasattr(self, "reticulum") and self.reticulum:
@@ -4853,6 +4943,9 @@ class ReticulumMeshChat:
except Exception as e:
logger.debug(f"Failed to get interface stats: {e}")
if len(active) > max_disc:
active = active[:max_disc]
def to_jsonable(obj):
if isinstance(obj, bytes):
return obj.hex()
@@ -6025,7 +6118,7 @@ class ReticulumMeshChat:
try:
limit = request.query.get("limit")
limit = int(limit) if limit is not None else None
limit = int(limit) if limit is not None and limit != "" else None
except ValueError:
limit = None
@@ -6035,6 +6128,15 @@ class ReticulumMeshChat:
except ValueError:
offset = 0
if not search_query and limit is None:
limit = self._default_announce_fetch_limit(aspect)
search_max = 2000
if self.current_context and self.current_context.config:
sm = self.current_context.config.announce_search_max_fetch.get()
if sm is not None and sm > 0:
search_max = min(int(sm), 10_000)
include_blocked = (
request.query.get("include_blocked", "false").lower() == "true"
)
@@ -6046,7 +6148,10 @@ class ReticulumMeshChat:
)
blocked_identity_hashes = [b["destination_hash"] for b in blocked]
db_limit = limit if not search_query else None
if search_query:
db_limit = min(search_max, limit) if limit is not None else search_max
else:
db_limit = limit
db_offset = offset if not search_query else 0
results = await asyncio.to_thread(
@@ -6074,8 +6179,6 @@ class ReticulumMeshChat:
blocked_identity_hashes=blocked_identity_hashes,
)
# ... rest of processing ...
# pre-fetch icons and other data to avoid N+1 queries in convert_db_announce_to_dict
other_user_hashes = [r["destination_hash"] for r in results]
user_icons = {}
@@ -9531,21 +9634,29 @@ class ReticulumMeshChat:
value = self._parse_bool(data["show_suggested_community_interfaces"])
self.config.show_suggested_community_interfaces.set(value)
for key in (
"announce_limit_lxmf_delivery",
"announce_limit_nomadnetwork_node",
"announce_limit_lxmf_propagation",
):
if key in data:
val = data[key]
if val is None or val == "":
getattr(self.config, key).set(None)
else:
try:
v = int(val)
getattr(self.config, key).set(max(0, v) if v >= 0 else None)
except (TypeError, ValueError):
getattr(self.config, key).set(None)
_announce_int_fields = [
("announce_max_stored_lxmf_delivery", 1, 1_000_000),
("announce_max_stored_nomadnetwork_node", 1, 1_000_000),
("announce_max_stored_lxmf_propagation", 1, 1_000_000),
("announce_fetch_limit_lxmf_delivery", 1, 100_000),
("announce_fetch_limit_nomadnetwork_node", 1, 100_000),
("announce_fetch_limit_lxmf_propagation", 1, 100_000),
("announce_search_max_fetch", 100, 10_000),
("discovered_interfaces_max_return", 1, 50_000),
]
for key, lo, hi in _announce_int_fields:
if key not in data:
continue
val = data[key]
if val is None or val == "":
getattr(self.config, key).set(None)
continue
try:
v = int(val)
v = max(lo, min(hi, v))
getattr(self.config, key).set(v)
except (TypeError, ValueError):
getattr(self.config, key).set(None)
if "lxmf_preferred_propagation_node_destination_hash" in data:
# update config value
@@ -10914,9 +11025,14 @@ class ReticulumMeshChat:
"desktop_open_calls_in_separate_window": ctx.config.desktop_open_calls_in_separate_window.get(),
"desktop_hardware_acceleration_enabled": ctx.config.desktop_hardware_acceleration_enabled.get(),
"blackhole_integration_enabled": ctx.config.blackhole_integration_enabled.get(),
"announce_limit_lxmf_delivery": ctx.config.announce_limit_lxmf_delivery.get(),
"announce_limit_nomadnetwork_node": ctx.config.announce_limit_nomadnetwork_node.get(),
"announce_limit_lxmf_propagation": ctx.config.announce_limit_lxmf_propagation.get(),
"announce_max_stored_lxmf_delivery": ctx.config.announce_max_stored_lxmf_delivery.get(),
"announce_max_stored_nomadnetwork_node": ctx.config.announce_max_stored_nomadnetwork_node.get(),
"announce_max_stored_lxmf_propagation": ctx.config.announce_max_stored_lxmf_propagation.get(),
"announce_fetch_limit_lxmf_delivery": ctx.config.announce_fetch_limit_lxmf_delivery.get(),
"announce_fetch_limit_nomadnetwork_node": ctx.config.announce_fetch_limit_nomadnetwork_node.get(),
"announce_fetch_limit_lxmf_propagation": ctx.config.announce_fetch_limit_lxmf_propagation.get(),
"announce_search_max_fetch": ctx.config.announce_search_max_fetch.get(),
"discovered_interfaces_max_return": ctx.config.discovered_interfaces_max_return.get(),
"csp_extra_connect_src": ctx.config.csp_extra_connect_src.get(),
"csp_extra_img_src": ctx.config.csp_extra_img_src.get(),
"csp_extra_frame_src": ctx.config.csp_extra_frame_src.get(),
@@ -10933,6 +11049,7 @@ class ReticulumMeshChat:
# try and get a name for the provided identity hash
def get_name_for_identity_hash(self, identity_hash: str):
id_norm = normalize_hex_identifier(identity_hash) if identity_hash else ""
# 1. try recall identity and calculate lxmf destination hash
identity = self.recall_identity(identity_hash)
if identity is not None:
@@ -10960,15 +11077,17 @@ class ReticulumMeshChat:
# 2. if identity recall failed, or we couldn't find a name for the calculated hash
# try to look up an lxmf.delivery announce with this identity_hash in the database
search = id_norm if len(id_norm) >= 8 else identity_hash
announces = self.database.announces.get_filtered_announces(
aspect="lxmf.delivery",
search_term=identity_hash,
search_term=search,
)
if announces:
for announce in announces:
# search_term matches destination_hash OR identity_hash in the DAO.
# We want to be sure it's the identity_hash we're looking for.
if announce["identity_hash"] == identity_hash:
ann_id = announce.get("identity_hash") or ""
if ann_id and normalize_hex_identifier(ann_id) == id_norm:
lxmf_destination_hash = announce["destination_hash"]
# check custom name for this hash
@@ -10992,30 +11111,36 @@ class ReticulumMeshChat:
# recall identity from reticulum or database
def get_lxmf_destination_hash_for_identity_hash(self, identity_hash: str):
id_norm = normalize_hex_identifier(identity_hash) if identity_hash else ""
identity = self.recall_identity(identity_hash)
if identity is not None:
return RNS.Destination.hash(identity, "lxmf", "delivery").hex()
# fallback to announces
search = id_norm if len(id_norm) >= 8 else identity_hash
announces = self.database.announces.get_filtered_announces(
aspect="lxmf.delivery",
search_term=identity_hash,
search_term=search,
)
if announces:
for announce in announces:
if announce["identity_hash"] == identity_hash:
ann_id = announce.get("identity_hash") or ""
if ann_id and normalize_hex_identifier(ann_id) == id_norm:
return announce["destination_hash"]
return None
def get_lxst_telephony_hash_for_identity_hash(self, identity_hash: str):
id_norm = normalize_hex_identifier(identity_hash) if identity_hash else ""
# Primary: use announces table for lxst.telephony aspect
search = id_norm if len(id_norm) >= 8 else identity_hash
announces = self.database.announces.get_filtered_announces(
aspect="lxst.telephony",
search_term=identity_hash,
search_term=search,
)
if announces:
for announce in announces:
if announce["identity_hash"] == identity_hash:
ann_id = announce.get("identity_hash") or ""
if ann_id and normalize_hex_identifier(ann_id) == id_norm:
return announce.get("destination_hash")
# Fallback: derive from identity if available (same identity, different aspect)
@@ -11029,22 +11154,32 @@ class ReticulumMeshChat:
def recall_identity(self, hash_hex: str) -> RNS.Identity | None:
try:
if not hash_hex or not isinstance(hash_hex, str):
return None
stripped = hash_hex.strip()
canonical = normalize_hex_identifier(stripped)
# 1. try reticulum recall (works for both identity and destination hashes)
hash_bytes = bytes.fromhex(hash_hex)
identity = RNS.Identity.recall(hash_bytes)
if identity:
return identity
hash_bytes = hex_identifier_to_bytes(stripped)
if hash_bytes:
identity = RNS.Identity.recall(hash_bytes)
if identity:
return identity
# 2. try database lookup
# lookup by destination hash first
announce = self.database.announces.get_announce_by_hash(hash_hex)
announce = self.database.announces.get_announce_by_hash(stripped)
if not announce and canonical:
announce = self.database.announces.get_announce_by_hash(canonical)
if announce:
announce = dict(announce)
if not announce:
# lookup by identity hash
search_term = canonical if len(canonical) >= 8 else stripped
results = self.database.announces.get_filtered_announces(
search_term=hash_hex,
search_term=search_term,
)
if results:
# find first one with a public key
+41 -24
View File
@@ -2,10 +2,18 @@ import base64
from .database import Database
_ASPECT_LIMIT_KEYS = {
"lxmf.delivery": "announce_limit_lxmf_delivery",
"nomadnetwork.node": "announce_limit_nomadnetwork_node",
"lxmf.propagation": "announce_limit_lxmf_propagation",
_ASPECT_MAX_STORED_KEYS = {
"lxmf.delivery": "announce_max_stored_lxmf_delivery",
"nomadnetwork.node": "announce_max_stored_nomadnetwork_node",
"lxmf.propagation": "announce_max_stored_lxmf_propagation",
"lxst.telephony": "announce_max_stored_lxmf_delivery",
}
_ASPECT_FETCH_LIMIT_KEYS = {
"lxmf.delivery": "announce_fetch_limit_lxmf_delivery",
"nomadnetwork.node": "announce_fetch_limit_nomadnetwork_node",
"lxmf.propagation": "announce_fetch_limit_lxmf_propagation",
"lxst.telephony": "announce_fetch_limit_lxmf_delivery",
}
@@ -14,14 +22,31 @@ class AnnounceManager:
self.db = db
self.config = config
def _get_limit_for_aspect(self, aspect):
key = _ASPECT_LIMIT_KEYS.get(aspect)
if not key:
def _get_max_stored_for_aspect(self, aspect):
key = _ASPECT_MAX_STORED_KEYS.get(aspect)
if not key or not self.config:
return None
attr = getattr(self.config, key, None)
if attr is None:
return None
return attr.get()
v = attr.get()
if v is None or v < 1:
return None
return min(v, 1_000_000)
def _get_fetch_limit_for_aspect(self, aspect):
if not self.config:
return 500
key = _ASPECT_FETCH_LIMIT_KEYS.get(aspect)
if not key:
return 500
attr = getattr(self.config, key, None)
if attr is None:
return 500
v = attr.get()
if v is None or v < 1:
return 500
return min(v, 100_000)
def upsert_announce(
self,
@@ -32,26 +57,12 @@ class AnnounceManager:
app_data,
announce_packet_hash,
):
if self.config:
limit = self._get_limit_for_aspect(aspect)
if limit is not None and limit >= 0:
dest_hex = (
destination_hash.hex()
if isinstance(destination_hash, bytes)
else destination_hash
)
existing = self.db.announces.get_announce_by_hash(dest_hex)
if not existing or existing.get("aspect") != aspect:
count = self.db.announces.get_announce_count_by_aspect(aspect)
if count >= limit:
return
rssi = snr = quality = None
if announce_packet_hash and reticulum:
rssi = reticulum.get_packet_rssi(announce_packet_hash)
snr = reticulum.get_packet_snr(announce_packet_hash)
quality = reticulum.get_packet_q(announce_packet_hash)
# prepare data to insert or update
data = {
"destination_hash": destination_hash.hex()
if isinstance(destination_hash, bytes)
@@ -66,12 +77,15 @@ class AnnounceManager:
"quality": quality,
}
# only set app data if provided
if app_data is not None:
data["app_data"] = base64.b64encode(app_data).decode("utf-8")
self.db.announces.upsert_announce(data)
max_stored = self._get_max_stored_for_aspect(aspect)
if max_stored is not None:
self.db.announces.trim_announces_for_aspect(aspect, max_stored)
def get_filtered_announces(
self,
aspect=None,
@@ -79,9 +93,12 @@ class AnnounceManager:
destination_hash=None,
query=None,
blocked_identity_hashes=None,
limit=500,
limit=None,
offset=0,
):
if limit is None:
limit = self._get_fetch_limit_for_aspect(aspect)
sql = """
SELECT a.*, c.custom_image as contact_image
FROM announces a
+9
View File
@@ -336,5 +336,14 @@ class BotHandler:
return False
def stop_all(self):
seen = set()
for bot_id in list(self.running_bots.keys()):
seen.add(bot_id)
self.stop_bot(bot_id)
for entry in list(self.bots_state):
bot_id = entry.get("id")
if not bot_id or bot_id in seen:
continue
pid = entry.get("pid")
if pid and self._is_pid_alive(pid):
self.stop_bot(bot_id)
+61 -14
View File
@@ -1,22 +1,69 @@
import json
from pathlib import Path
from typing import Any
_BUNDLED = Path(__file__).resolve().parent / "data" / "community_interfaces.json"
class CommunityInterfacesManager:
"""
Suggested community interfaces for the interface wizard.
No outbound connectivity checks are performed; listing these does not contact the internet.
"""
"""Load suggested interface presets from bundled data or public/community_interfaces.json."""
def __init__(self):
self.interfaces = [
{
"name": "Quad4",
"type": "TCPClientInterface",
"target_host": "62.151.179.77",
"target_port": 45657,
"description": "Quad4 Official Node",
},
]
def __init__(self, public_override_path: str | None = None):
self._public_override_path = public_override_path
self.interfaces = self._load_raw()
def _candidate_paths(self) -> list[Path]:
paths: list[Path] = []
if self._public_override_path:
paths.append(Path(self._public_override_path))
paths.append(_BUNDLED)
return paths
def _load_raw(self) -> list[dict[str, Any]]:
for path in self._candidate_paths():
if not path.is_file():
continue
try:
doc = json.loads(path.read_text(encoding="utf-8"))
except (OSError, UnicodeError, json.JSONDecodeError):
continue
raw = doc.get("interfaces", doc) if isinstance(doc, dict) else doc
if not isinstance(raw, list):
continue
normalized = [self._normalize_entry(x) for x in raw if isinstance(x, dict)]
if normalized:
return normalized
return []
@staticmethod
def _normalize_entry(item: dict[str, Any]) -> dict[str, Any]:
out = dict(item)
iface_type = out.get("type")
if iface_type == "BackboneInterface":
remote = (out.get("remote") or out.get("target_host") or "").strip()
if remote:
out["remote"] = remote
out["target_host"] = remote
tp = out.get("target_port")
if tp is not None and tp != "":
out["target_port"] = int(tp)
elif iface_type == "I2PInterface":
peers = out.get("i2p_peers") or []
if isinstance(peers, list) and peers:
cleaned = [str(p).strip() for p in peers if str(p).strip()]
out["i2p_peers"] = cleaned
if cleaned:
out["target_host"] = cleaned[0]
if "target_port" not in out:
out["target_port"] = None
elif iface_type == "TCPClientInterface":
th = (out.get("target_host") or "").strip()
if th:
out["target_host"] = th
tp = out.get("target_port")
if tp is not None and tp != "":
out["target_port"] = int(tp)
return out
async def get_interfaces(self) -> list[dict[str, Any]]:
return [{**iface, "online": None, "last_check": 0} for iface in self.interfaces]
@@ -0,0 +1,167 @@
"""Convert directory.rns.recipes listing rows into MeshChat interface preset dicts."""
from __future__ import annotations
import re
from typing import Any
DEFAULT_SUBMITTED_URL = (
"https://directory.rns.recipes/api/directory/submitted?search=&type=&status=online"
)
DESCRIPTION = "directory.rns.recipes (user-submitted, online)"
_RE_REMOTE = re.compile(r"^\s*remote\s*=\s*(\S+)", re.MULTILINE | re.IGNORECASE)
_RE_TARGET_HOST = re.compile(
r"^\s*target_host\s*=\s*(\S+)",
re.MULTILINE | re.IGNORECASE,
)
_RE_TRANSPORT_IDENTITY = re.compile(
r"^\s*transport_identity\s*=\s*(\S+)",
re.MULTILINE | re.IGNORECASE,
)
_RE_PEERS_LINE = re.compile(r"^\s*peers\s*=\s*(.+)$", re.MULTILINE | re.IGNORECASE)
def rows_from_payload(payload: object) -> list[Any]:
if isinstance(payload, dict):
data = payload.get("data", payload)
if isinstance(data, list):
return data
if isinstance(payload, list):
return payload
raise ValueError("Expected list or object with 'data' array")
def _parse_config_value(pattern: re.Pattern[str], cfg: str) -> str:
m = pattern.search(cfg)
if not m:
return ""
return m.group(1).strip().strip('"').strip("'")
def _first_peer_from_config(cfg: str) -> str:
raw = _parse_config_value(_RE_PEERS_LINE, cfg)
if not raw:
return ""
for part in raw.replace(",", " ").split():
p = part.strip().strip('"').strip("'")
if p:
return p
return ""
def _tcp_host_from_row(row: dict[str, Any], cfg: str) -> str:
h = (row.get("host") or row.get("address") or "").strip()
if h:
return h
return _parse_config_value(_RE_REMOTE, cfg) or _parse_config_value(
_RE_TARGET_HOST,
cfg,
)
def _i2p_peer(row: dict[str, Any], cfg: str) -> str:
h = (row.get("host") or row.get("address") or "").strip()
if h:
return h
return _first_peer_from_config(cfg)
def _transport_id(row: dict[str, Any], cfg: str) -> str:
tid = row.get("transportId")
if tid is not None and str(tid).strip():
return str(tid).strip()
return _parse_config_value(_RE_TRANSPORT_IDENTITY, cfg)
def transform_directory_rows(rows: list[Any]) -> list[dict[str, Any]]:
out_list: list[dict[str, Any]] = []
if not isinstance(rows, list):
return out_list
for row in rows:
if not isinstance(row, dict):
continue
name = (row.get("name") or "").strip()
rtype = (row.get("type") or "").lower()
type_name = row.get("typeName") or ""
if not isinstance(type_name, str):
type_name = str(type_name)
cfg = row.get("config") or ""
if not isinstance(cfg, str):
cfg = ""
if rtype == "rnode":
continue
if rtype == "i2p":
addr = _i2p_peer(row, cfg)
if not addr:
continue
out_list.append(
{
"name": name or addr,
"type": "I2PInterface",
"i2p_peers": [addr],
"description": DESCRIPTION,
},
)
continue
port = row.get("port")
if port is None or port == "":
continue
try:
port_i = int(port)
except (TypeError, ValueError):
continue
addr = _tcp_host_from_row(row, cfg)
if not addr:
continue
tid = _transport_id(row, cfg)
backbone_in_type = rtype == "backbone" or type_name == "BackboneInterface"
backbone_in_tcp = rtype == "tcp" and "BackboneInterface" in cfg
is_backboneish = backbone_in_type or backbone_in_tcp
is_tcp_style = rtype == "tcp" or type_name == "TCPClientInterface"
if is_backboneish and tid:
out_list.append(
{
"name": name,
"type": "BackboneInterface",
"remote": addr,
"target_port": port_i,
"transport_identity": tid,
"description": DESCRIPTION,
},
)
continue
if is_backboneish and not tid:
out_list.append(
{
"name": name,
"type": "TCPClientInterface",
"target_host": addr,
"target_port": port_i,
"description": DESCRIPTION,
},
)
continue
if is_tcp_style:
out_list.append(
{
"name": name,
"type": "TCPClientInterface",
"target_host": addr,
"target_port": port_i,
"description": DESCRIPTION,
},
)
return out_list
+59 -12
View File
@@ -313,21 +313,48 @@ class ConfigManager:
"#ef4444",
)
# announce limits (optional, None = no limit; protects against flood on public networks)
self.announce_limit_lxmf_delivery = self.IntConfig(
# announce caps: max rows stored per aspect (oldest dropped). Default 1000.
self.announce_max_stored_lxmf_delivery = self.IntConfig(
self,
"announce_limit_lxmf_delivery",
None,
"announce_max_stored_lxmf_delivery",
1000,
)
self.announce_limit_nomadnetwork_node = self.IntConfig(
self.announce_max_stored_nomadnetwork_node = self.IntConfig(
self,
"announce_limit_nomadnetwork_node",
None,
"announce_max_stored_nomadnetwork_node",
1000,
)
self.announce_limit_lxmf_propagation = self.IntConfig(
self.announce_max_stored_lxmf_propagation = self.IntConfig(
self,
"announce_limit_lxmf_propagation",
None,
"announce_max_stored_lxmf_propagation",
1000,
)
# default API page size per aspect when limit query param omitted. Default 500.
self.announce_fetch_limit_lxmf_delivery = self.IntConfig(
self,
"announce_fetch_limit_lxmf_delivery",
500,
)
self.announce_fetch_limit_nomadnetwork_node = self.IntConfig(
self,
"announce_fetch_limit_nomadnetwork_node",
500,
)
self.announce_fetch_limit_lxmf_propagation = self.IntConfig(
self,
"announce_fetch_limit_lxmf_propagation",
500,
)
# lxst.telephony shares LXMF caps in announce_manager aspect mapping
self.announce_search_max_fetch = self.IntConfig(
self,
"announce_search_max_fetch",
2000,
)
self.discovered_interfaces_max_return = self.IntConfig(
self,
"discovered_interfaces_max_return",
500,
)
# blackhole integration config
@@ -348,12 +375,29 @@ class ConfigManager:
self.csp_extra_script_src = self.StringConfig(self, "csp_extra_script_src", "")
self.csp_extra_style_src = self.StringConfig(self, "csp_extra_style_src", "")
self._migrate_legacy_announce_limit_keys()
def get(self, key: str, default_value=None) -> str | None:
return self.db.config.get(key, default_value)
def set(self, key: str, value: str | None):
self.db.config.set(key, value)
def _migrate_legacy_announce_limit_keys(self):
pairs = [
("announce_limit_lxmf_delivery", "announce_max_stored_lxmf_delivery"),
(
"announce_limit_nomadnetwork_node",
"announce_max_stored_nomadnetwork_node",
),
("announce_limit_lxmf_propagation", "announce_max_stored_lxmf_propagation"),
]
for old_key, new_key in pairs:
old_val = self.db.config.get(old_key, default=None)
new_val = self.db.config.get(new_key, default=None)
if old_val is not None and new_val is None:
self.db.config.set(new_key, old_val)
class StringConfig:
def __init__(self, manager, key: str, default_value: str | None = None):
self.manager = manager
@@ -397,5 +441,8 @@ class ConfigManager:
except (ValueError, TypeError):
return self.default_value
def set(self, value: int):
self.manager.set(self.key, str(value))
def set(self, value: int | None):
if value is None:
self.manager.db.config.delete(self.key)
else:
self.manager.db.config.set(self.key, str(value))
@@ -40,6 +40,29 @@ class AnnounceDAO:
params.append(now)
self.provider.execute(query, params)
def trim_announces_for_aspect(self, aspect, max_rows):
"""Delete oldest rows for this aspect until at most max_rows remain."""
if max_rows < 1 or not aspect:
return
row = self.provider.fetchone(
"SELECT COUNT(*) AS c FROM announces WHERE aspect = ?",
(aspect,),
)
count = row["c"] if row else 0
excess = count - max_rows
if excess <= 0:
return
self.provider.execute(
"""
DELETE FROM announces WHERE id IN (
SELECT id FROM announces WHERE aspect = ?
ORDER BY updated_at ASC, id ASC
LIMIT ?
)
""",
(aspect, excess),
)
def get_announces(self, aspect=None):
if aspect:
return self.provider.fetchall(
@@ -1,4 +1,5 @@
import base64
import contextlib
import os
import RNS
@@ -115,3 +116,48 @@ class ForwardingManager:
for alias_hash in self.forwarding_destinations:
destination = self.forwarding_destinations[alias_hash]
destination.announce()
def teardown(self):
"""Stop alias LXMF routers and deregister their RNS destinations."""
for alias_hash, router in list(self.forwarding_routers.items()):
try:
if hasattr(router, "register_delivery_callback"):
with contextlib.suppress(Exception):
router.register_delivery_callback(None)
if hasattr(router, "delivery_destinations"):
for dest_hash in list(router.delivery_destinations.keys()):
dest = router.delivery_destinations[dest_hash]
with contextlib.suppress(Exception):
RNS.Transport.deregister_destination(dest)
if getattr(router, "propagation_destination", None):
with contextlib.suppress(Exception):
RNS.Transport.deregister_destination(
router.propagation_destination,
)
except Exception as e:
print(f"Error deregistering forwarding destinations {alias_hash}: {e}")
try:
if hasattr(router, "identity") and router.identity:
ih = router.identity.hash
for link in list(RNS.Transport.active_links):
match = False
if hasattr(link, "destination") and link.destination:
if (
hasattr(link.destination, "identity")
and link.destination.identity
):
if link.destination.identity.hash == ih:
match = True
if match:
with contextlib.suppress(Exception):
link.teardown()
except Exception as e:
print(f"Error cleaning forwarding links {alias_hash}: {e}")
try:
router.jobs = lambda: None
if hasattr(router, "exit_handler"):
router.exit_handler()
except Exception as e:
print(f"Error stopping forwarding LXMF router {alias_hash}: {e}")
self.forwarding_destinations.clear()
self.forwarding_routers.clear()
+12 -4
View File
@@ -315,7 +315,9 @@ class IdentityContext:
storage_dir=self.storage_path,
)
self.community_interfaces_manager = CommunityInterfacesManager()
self.community_interfaces_manager = CommunityInterfacesManager(
public_override_path=self.app.get_public_path("community_interfaces.json"),
)
self.auto_propagation_manager = AutoPropagationManager(
app=self.app,
@@ -466,6 +468,15 @@ class IdentityContext:
RNS.Transport.deregister_announce_handler(handler)
self.announce_handlers = []
if self.forwarding_manager:
try:
self.forwarding_manager.teardown()
except Exception as e:
print(
f"Error tearing down forwarding manager for {self.identity_hash}: {e}",
)
self.forwarding_manager = None
# 2. Cleanup RNS destinations and links
try:
if self.message_router:
@@ -564,9 +575,6 @@ class IdentityContext:
print(f"Error while stopping bots for {self.identity_hash}: {e}")
self.bot_handler = None
if self.forwarding_manager:
self.forwarding_manager = None
if self.database:
try:
if not getattr(self.app, "emergency", False):
+18
View File
@@ -214,3 +214,21 @@ def parse_lxmf_propagation_node_app_data(app_data_base64: str | bytes | None):
except Exception as e:
print(f"Failed to parse LXMF propagation node app data: {e}")
return None
def normalize_hex_identifier(value: str | None) -> str:
"""Return lowercase hex digits only (strips UUID hyphens, colons, whitespace)."""
if not value or not isinstance(value, str):
return ""
return "".join(c for c in value.strip().lower() if c in "0123456789abcdef")
def hex_identifier_to_bytes(value: str | None) -> bytes | None:
"""Parse a hex identity or hash string for ``bytes.fromhex`` (tolerates UUID-style separators)."""
h = normalize_hex_identifier(value)
if not h or len(h) % 2:
return None
try:
return bytes.fromhex(h)
except ValueError:
return None
+21 -6
View File
@@ -7,6 +7,11 @@ import time
import RNS
from LXST import Telephone
from meshchatx.src.backend.meshchat_utils import (
hex_identifier_to_bytes,
normalize_hex_identifier,
)
class Tee:
def __init__(self, sink):
@@ -223,7 +228,9 @@ class TelephoneManager:
def resolve_identity(target_hash_hex):
"""Resolve identity from multiple hints: direct recall, destination_hash announce, identity_hash announce, or public key."""
target_hash = bytes.fromhex(target_hash_hex)
target_hash = hex_identifier_to_bytes(target_hash_hex)
if not target_hash:
return None
# 1) Direct recall (identity hash)
ident = RNS.Identity.recall(target_hash)
@@ -233,12 +240,18 @@ class TelephoneManager:
if not self.db:
return None
th = target_hash_hex.strip()
canonical = normalize_hex_identifier(th)
# 2) By destination_hash (could be lxst.telephony or lxmf.delivery hash)
announce = self.db.announces.get_announce_by_hash(target_hash_hex)
announce = self.db.announces.get_announce_by_hash(th)
if not announce and canonical:
announce = self.db.announces.get_announce_by_hash(canonical)
if not announce:
# 3) By identity_hash field (if user entered identity hash but we missed recall, or other announce types)
id_key = canonical if canonical else th
announces = self.db.announces.get_filtered_announces(
identity_hash=target_hash_hex,
identity_hash=id_key,
)
if announces:
announce = announces[0]
@@ -249,9 +262,11 @@ class TelephoneManager:
# Try identity_hash from announce
identity_hex = announce.get("identity_hash")
if identity_hex:
ident = RNS.Identity.recall(bytes.fromhex(identity_hex))
if ident:
return ident
id_bytes = hex_identifier_to_bytes(identity_hex)
if id_bytes:
ident = RNS.Identity.recall(id_bytes)
if ident:
return ident
# Try reconstructing from public key
if announce.get("identity_public_key"):
+1 -1
View File
@@ -2,4 +2,4 @@
Run: pnpm run version:sync
"""
__version__ = "4.3.1"
__version__ = "4.4.0"
+6
View File
@@ -116,6 +116,12 @@ def test_db_performance():
end_time = time.time()
print(f"get_filtered_announces() took {end_time - start_time:.4f} seconds")
print("Testing trim_announces_for_aspect()...")
start_time = time.time()
db.announces.trim_announces_for_aspect("lxmf.delivery", 1000)
end_time = time.time()
print(f"trim_announces_for_aspect() took {end_time - start_time:.4f} seconds")
shutil.rmtree(dir_path)
+34
View File
@@ -153,6 +153,39 @@ class PerformanceBenchmarker:
count,
)
def benchmark_announce_trim(self, seed_count=800, runs=40):
aspect = "lxmf.delivery"
def seed():
with self.db.provider:
for _ in range(seed_count):
self.db.announces.upsert_announce(
{
"destination_hash": generate_hash(),
"aspect": aspect,
"identity_hash": generate_hash(),
"identity_public_key": "cHVibmtleQ==",
"app_data": None,
"rssi": None,
"snr": None,
"quality": None,
},
)
seed()
def run_trim():
for _ in range(runs):
self.db.announces.trim_announces_for_aspect(
aspect, max(1, seed_count // 2)
)
self.record_benchmark(
f"Announce trim ({seed_count} rows, {runs} trims)",
run_trim,
runs,
)
def main():
print("Starting Backend Memory & Performance Benchmarking...")
@@ -163,6 +196,7 @@ def main():
bench.benchmark_crash_recovery_overhead()
bench.benchmark_identity_generation()
bench.benchmark_identity_listing()
bench.benchmark_announce_trim()
print("\n" + "=" * 80)
print(f"{'Benchmark Name':40} | {'Avg Time':10} | {'Mem Growth':10}")
@@ -255,10 +255,16 @@ class BackendBenchmarker:
def get_announces():
return self.db.announces.get_filtered_announces(limit=50)
@benchmark("Trim Announces for Aspect", iterations=20)
def trim_announces():
return self.db.announces.trim_announces_for_aspect("lxmf.delivery", 500)
_, res = upsert_announces()
self.results.append(res)
_, res = get_announces()
self.results.append(res)
_, res = trim_announces()
self.results.append(res)
def bench_identity_operations(self):
manager = IdentityManager(self.temp_dir)
+59
View File
@@ -0,0 +1,59 @@
import os
import tempfile
from meshchatx.src.backend.database import Database
from meshchatx.src.backend.database.provider import DatabaseProvider
def _insert(db, dest_hex, aspect, updated_order):
db.announces.upsert_announce(
{
"destination_hash": dest_hex,
"aspect": aspect,
"identity_hash": "a" * 32,
"identity_public_key": "cHVibmtleQ==",
"app_data": None,
"rssi": None,
"snr": None,
"quality": None,
},
)
db.provider.execute(
"UPDATE announces SET updated_at = ? WHERE destination_hash = ?",
(updated_order, dest_hex),
)
def test_trim_announces_for_aspect_drops_oldest():
path = None
db = None
try:
with tempfile.NamedTemporaryFile(suffix=".db", delete=False) as f:
path = f.name
db = Database(path)
db.initialize()
aspect = "lxmf.delivery"
_insert(db, "01" * 16, aspect, "2000-01-01T00:00:00Z")
_insert(db, "02" * 16, aspect, "2000-01-02T00:00:00Z")
_insert(db, "03" * 16, aspect, "2000-01-03T00:00:00Z")
db.announces.trim_announces_for_aspect(aspect, 2)
rows = db.announces.get_announces(aspect=aspect)
hashes = {r["destination_hash"] for r in rows}
assert hashes == {"03" * 16, "02" * 16}
finally:
if db is not None:
try:
db.close()
except Exception:
pass
DatabaseProvider._instance = None
if path:
try:
os.unlink(path)
except OSError:
pass
for suffix in ("-wal", "-shm"):
try:
os.unlink(path + suffix)
except OSError:
pass
+8 -16
View File
@@ -35,24 +35,15 @@ def test_flood_announces_no_limit(mock_db):
)
assert mock_db.announces.upsert_announce.call_count == 500
mock_db.announces.trim_announces_for_aspect.assert_not_called()
def test_flood_announces_with_limit_caps_at_limit(mock_db):
def test_flood_announces_trims_each_upsert(mock_db):
config = MagicMock()
config.announce_limit_lxmf_delivery = MagicMock()
config.announce_limit_lxmf_delivery.get.return_value = 50
mock_db.announces.get_announce_by_hash.return_value = None
count = [0]
def get_count(aspect):
return count[0]
def on_upsert(*args, **kwargs):
count[0] += 1
mock_db.announces.get_announce_count_by_aspect.side_effect = get_count
mock_db.announces.upsert_announce.side_effect = on_upsert
config.announce_max_stored_lxmf_delivery = MagicMock()
config.announce_max_stored_lxmf_delivery.get.return_value = 50
config.announce_max_stored_nomadnetwork_node = MagicMock()
config.announce_max_stored_lxmf_propagation = MagicMock()
manager = AnnounceManager(mock_db, config)
reticulum = MagicMock()
@@ -72,7 +63,8 @@ def test_flood_announces_with_limit_caps_at_limit(mock_db):
os.urandom(16),
)
assert count[0] == 50
assert mock_db.announces.upsert_announce.call_count == 100
assert mock_db.announces.trim_announces_for_aspect.call_count == 100
def test_load_rapid_nomadnet_announces(mock_db):
+16 -22
View File
@@ -34,32 +34,20 @@ def test_upsert_announce_fuzz_aspect_and_app_data(aspect, app_data):
@pytest.mark.parametrize(
"limit_val,count,should_accept",
[
(None, 100, True),
(0, 0, False),
(10, 5, True),
(10, 10, False),
(100, 100, False),
(1000, 0, True),
],
"max_stored",
[None, 0, 1, 10, 1000, 999_999],
)
def test_announce_limit_config_fuzz(limit_val, count, should_accept):
def test_announce_max_stored_config_fuzz(max_stored):
mock_db = MagicMock()
mock_db.announces = MagicMock()
mock_db.announces.get_announce_by_hash.return_value = None
mock_db.announces.get_announce_count_by_aspect.return_value = count
config = MagicMock()
config.announce_limit_lxmf_delivery = MagicMock()
config.announce_limit_lxmf_delivery.get.return_value = limit_val
config.announce_limit_nomadnetwork_node = MagicMock()
config.announce_limit_lxmf_propagation = MagicMock()
config.announce_max_stored_lxmf_delivery = MagicMock()
config.announce_max_stored_lxmf_delivery.get.return_value = max_stored
config.announce_max_stored_nomadnetwork_node = MagicMock()
config.announce_max_stored_lxmf_propagation = MagicMock()
manager = AnnounceManager(mock_db, config)
reticulum = MagicMock()
reticulum.get_packet_rssi.return_value = -50
reticulum.get_packet_snr.return_value = 10
identity = MagicMock()
identity.hash.hex.return_value = "id_hash"
identity.get_public_key.return_value = b"pub_key"
@@ -68,7 +56,13 @@ def test_announce_limit_config_fuzz(limit_val, count, should_accept):
reticulum, identity, b"new_dest", "lxmf.delivery", b"app_data", b"packet"
)
if should_accept:
mock_db.announces.upsert_announce.assert_called_once()
mock_db.announces.upsert_announce.assert_called_once()
if max_stored is not None and max_stored >= 1:
mock_db.announces.trim_announces_for_aspect.assert_called_once()
cap = min(max_stored, 1_000_000)
mock_db.announces.trim_announces_for_aspect.assert_called_with(
"lxmf.delivery",
cap,
)
else:
mock_db.announces.upsert_announce.assert_not_called()
mock_db.announces.trim_announces_for_aspect.assert_not_called()
+56 -72
View File
@@ -16,41 +16,24 @@ def mock_db():
@pytest.fixture
def mock_config():
config = MagicMock()
config.announce_limit_lxmf_delivery = MagicMock()
config.announce_limit_nomadnetwork_node = MagicMock()
config.announce_limit_lxmf_propagation = MagicMock()
config.announce_max_stored_lxmf_delivery = MagicMock()
config.announce_max_stored_nomadnetwork_node = MagicMock()
config.announce_max_stored_lxmf_propagation = MagicMock()
config.announce_fetch_limit_lxmf_delivery = MagicMock()
config.announce_fetch_limit_nomadnetwork_node = MagicMock()
config.announce_fetch_limit_lxmf_propagation = MagicMock()
return config
def test_announce_limit_rejects_when_at_capacity(mock_db, mock_config):
mock_config.announce_limit_lxmf_delivery.get.return_value = 2
mock_db.announces.get_announce_by_hash.return_value = None
mock_db.announces.get_announce_count_by_aspect.return_value = 2
def _make_manager(mock_db, mock_config):
return AnnounceManager(mock_db, mock_config)
manager = AnnounceManager(mock_db, mock_config)
def test_trim_called_when_over_max_stored(mock_db, mock_config):
mock_config.announce_max_stored_lxmf_delivery.get.return_value = 3
manager = _make_manager(mock_db, mock_config)
reticulum = MagicMock()
reticulum.get_packet_rssi.return_value = -50
reticulum.get_packet_snr.return_value = 10
identity = MagicMock()
identity.hash.hex.return_value = "id_hash"
identity.get_public_key.return_value = b"pub_key"
manager.upsert_announce(
reticulum, identity, b"new_dest", "lxmf.delivery", b"app_data", b"packet_hash"
)
mock_db.announces.upsert_announce.assert_not_called()
def test_announce_limit_allows_when_under_capacity(mock_db, mock_config):
mock_config.announce_limit_lxmf_delivery.get.return_value = 10
mock_db.announces.get_announce_by_hash.return_value = None
mock_db.announces.get_announce_count_by_aspect.return_value = 5
manager = AnnounceManager(mock_db, mock_config)
reticulum = MagicMock()
reticulum.get_packet_rssi.return_value = -50
reticulum.get_packet_snr.return_value = 10
identity = MagicMock()
identity.hash.hex.return_value = "id_hash"
identity.get_public_key.return_value = b"pub_key"
@@ -60,60 +43,48 @@ def test_announce_limit_allows_when_under_capacity(mock_db, mock_config):
)
mock_db.announces.upsert_announce.assert_called_once()
mock_db.announces.trim_announces_for_aspect.assert_called_once_with(
"lxmf.delivery",
3,
)
def test_announce_limit_allows_update_of_existing(mock_db, mock_config):
mock_config.announce_limit_lxmf_delivery.get.return_value = 1
mock_db.announces.get_announce_by_hash.return_value = {
"destination_hash": b"existing".hex(),
"aspect": "lxmf.delivery",
}
mock_db.announces.get_announce_count_by_aspect.return_value = 1
manager = AnnounceManager(mock_db, mock_config)
def test_no_trim_without_config(mock_db):
manager = AnnounceManager(mock_db)
reticulum = MagicMock()
reticulum.get_packet_rssi.return_value = -50
reticulum.get_packet_snr.return_value = 10
identity = MagicMock()
identity.hash.hex.return_value = "id_hash"
identity.get_public_key.return_value = b"pub_key"
manager.upsert_announce(
reticulum, identity, b"existing", "lxmf.delivery", b"app_data", b"packet_hash"
reticulum, identity, b"dest", "lxmf.delivery", b"app_data", b"packet"
)
mock_db.announces.upsert_announce.assert_called_once()
mock_db.announces.trim_announces_for_aspect.assert_not_called()
def test_announce_limit_none_means_no_limit(mock_db, mock_config):
mock_config.announce_limit_lxmf_delivery.get.return_value = None
mock_db.announces.get_announce_by_hash.return_value = None
mock_db.announces.get_announce_count_by_aspect.return_value = 1000
def test_max_stored_none_skips_trim(mock_db, mock_config):
mock_config.announce_max_stored_lxmf_delivery.get.return_value = None
manager = AnnounceManager(mock_db, mock_config)
manager = _make_manager(mock_db, mock_config)
reticulum = MagicMock()
reticulum.get_packet_rssi.return_value = -50
reticulum.get_packet_snr.return_value = 10
identity = MagicMock()
identity.hash.hex.return_value = "id_hash"
identity.get_public_key.return_value = b"pub_key"
manager.upsert_announce(
reticulum, identity, b"new_dest", "lxmf.delivery", b"app_data", b"packet_hash"
reticulum, identity, b"dest", "lxmf.delivery", b"app_data", b"packet"
)
mock_db.announces.upsert_announce.assert_called_once()
mock_db.announces.trim_announces_for_aspect.assert_not_called()
def test_announce_limit_nomadnetwork_aspect(mock_db, mock_config):
mock_config.announce_limit_nomadnetwork_node.get.return_value = 1
mock_db.announces.get_announce_by_hash.return_value = None
mock_db.announces.get_announce_count_by_aspect.return_value = 1
def test_nomadnetwork_uses_own_max_key(mock_db, mock_config):
mock_config.announce_max_stored_nomadnetwork_node.get.return_value = 5
manager = AnnounceManager(mock_db, mock_config)
manager = _make_manager(mock_db, mock_config)
reticulum = MagicMock()
reticulum.get_packet_rssi.return_value = -50
reticulum.get_packet_snr.return_value = 10
identity = MagicMock()
identity.hash.hex.return_value = "id_hash"
identity.get_public_key.return_value = b"pub_key"
@@ -121,24 +92,23 @@ def test_announce_limit_nomadnetwork_aspect(mock_db, mock_config):
manager.upsert_announce(
reticulum,
identity,
b"new_dest",
b"dest",
"nomadnetwork.node",
b"app_data",
b"packet_hash",
b"packet",
)
mock_db.announces.upsert_announce.assert_not_called()
mock_db.announces.trim_announces_for_aspect.assert_called_once_with(
"nomadnetwork.node",
5,
)
def test_announce_limit_propagation_aspect(mock_db, mock_config):
mock_config.announce_limit_lxmf_propagation.get.return_value = 0
mock_db.announces.get_announce_by_hash.return_value = None
mock_db.announces.get_announce_count_by_aspect.return_value = 0
def test_telephony_shares_lxmf_max(mock_db, mock_config):
mock_config.announce_max_stored_lxmf_delivery.get.return_value = 7
manager = AnnounceManager(mock_db, mock_config)
manager = _make_manager(mock_db, mock_config)
reticulum = MagicMock()
reticulum.get_packet_rssi.return_value = -50
reticulum.get_packet_snr.return_value = 10
identity = MagicMock()
identity.hash.hex.return_value = "id_hash"
identity.get_public_key.return_value = b"pub_key"
@@ -146,13 +116,27 @@ def test_announce_limit_propagation_aspect(mock_db, mock_config):
manager.upsert_announce(
reticulum,
identity,
b"new_dest",
"lxmf.propagation",
b"dest",
"lxst.telephony",
b"app_data",
b"packet_hash",
b"packet",
)
mock_db.announces.upsert_announce.assert_not_called()
mock_db.announces.trim_announces_for_aspect.assert_called_once_with(
"lxst.telephony",
7,
)
def test_get_filtered_announces_resolves_default_limit(mock_db, mock_config):
mock_config.announce_fetch_limit_lxmf_delivery.get.return_value = 33
manager = _make_manager(mock_db, mock_config)
manager.get_filtered_announces(aspect="lxmf.delivery", limit=None)
args, _ = mock_db.provider.fetchall.call_args
_sql, params = args
assert 33 in params
def test_announce_handles_none_packet_hash(mock_db):
+3 -3
View File
@@ -94,8 +94,8 @@ async def test_app_status_endpoints(mock_rns_minimal, temp_dir):
app_instance.config.set("tutorial_seen", True)
assert app_instance.config.get("tutorial_seen") == "true"
app_instance.config.set("changelog_seen_version", "4.3.1")
assert app_instance.config.get("changelog_seen_version") == "4.3.1"
app_instance.config.set("changelog_seen_version", "4.4.0")
assert app_instance.config.get("changelog_seen_version") == "4.4.0"
# Test app_info returns these values
with ExitStack() as info_stack:
@@ -111,4 +111,4 @@ async def test_app_status_endpoints(mock_rns_minimal, temp_dir):
assert val == "true"
val = app_instance.config.get("changelog_seen_version")
assert val == "4.3.1"
assert val == "4.4.0"
@@ -0,0 +1,35 @@
"""Tests for ForwardingManager lifecycle."""
from unittest.mock import MagicMock, patch
import RNS
from meshchatx.src.backend.forwarding_manager import ForwardingManager
def test_forwarding_manager_teardown_deregisters_and_stops_routers():
db = MagicMock()
db.messages.get_all_forwarding_mappings.return_value = []
mgr = ForwardingManager(
db,
storage_path="/tmp/fwd_test",
delivery_callback=lambda m: None,
config=None,
)
mock_dest = MagicMock()
mock_router = MagicMock()
mock_router.delivery_destinations = {"a": mock_dest}
mock_router.propagation_destination = MagicMock()
alias_hash = "deadbeef"
mgr.forwarding_destinations[alias_hash] = mock_dest
mgr.forwarding_routers[alias_hash] = mock_router
with patch.object(RNS.Transport, "deregister_destination") as dd:
mgr.teardown()
assert dd.call_count >= 2
mock_router.exit_handler.assert_called_once()
assert mgr.forwarding_destinations == {}
assert mgr.forwarding_routers == {}
+8
View File
@@ -144,6 +144,14 @@ def test_get_config_dict_basic(mock_app):
"desktop_open_calls_in_separate_window",
"desktop_hardware_acceleration_enabled",
"blackhole_integration_enabled",
"announce_max_stored_lxmf_delivery",
"announce_max_stored_nomadnetwork_node",
"announce_max_stored_lxmf_propagation",
"announce_fetch_limit_lxmf_delivery",
"announce_fetch_limit_nomadnetwork_node",
"announce_fetch_limit_lxmf_propagation",
"announce_search_max_fetch",
"discovered_interfaces_max_return",
"csp_extra_connect_src",
"csp_extra_img_src",
"csp_extra_frame_src",
+4
View File
@@ -105,6 +105,10 @@ def test_config_manager_smoke():
config.auto_announce_enabled.set(True)
assert config.auto_announce_enabled.get() is True
assert config.announce_max_stored_lxmf_delivery.get() == 1000
assert config.announce_fetch_limit_lxmf_delivery.get() == 500
assert config.discovered_interfaces_max_return.get() == 500
def test_telephone_manager_smoke():
"""Smoke test for TelephoneManager initialization."""