mirror of
https://github.com/Kpa-clawbot/meshcore-analyzer.git
synced 2026-04-27 08:55:14 +00:00
Compare commits
85 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| b0c9ff9b2b | |||
| 12b8c176f1 | |||
| 3e39776178 | |||
| 8851d996f2 | |||
| dc635775b5 | |||
| 8a94c43334 | |||
| 6aaa5cdc20 | |||
| 788005bff7 | |||
| af03f9aa57 | |||
| 3328ca4354 | |||
| 14732135b7 | |||
| e42477b810 | |||
| cbc3e3ce13 | |||
| 1796493ec0 | |||
| 168866ecb6 | |||
| be9257cd26 | |||
| b5b6faf90a | |||
| 592061ec7e | |||
| 596ccf2322 | |||
| 232770a858 | |||
| 747aea37b7 | |||
| 968c104e14 | |||
| 6f35d4d417 | |||
| aaf00d0616 | |||
| 41c046c974 | |||
| 1fbdd1c3d3 | |||
| d34320fa6c | |||
| 77b7c33d0f | |||
| 0a55717283 | |||
| bcab31bf72 | |||
| 6ae62ce535 | |||
| 6e2f79c0ad | |||
| b0862f7a41 | |||
| 45991eca09 | |||
| 76c42556a2 | |||
| 6f8378a31c | |||
| 56115ee0a4 | |||
| 321d1cf913 | |||
| 790a713ba9 | |||
| cd470dffbe | |||
| 7ff89d8607 | |||
| 493849f2e3 | |||
| 87ac61748c | |||
| 26de38f4b6 | |||
| d2d4c504e8 | |||
| b37e8e2da2 | |||
| 45d8116880 | |||
| f68e98c376 | |||
| f3d5d1e021 | |||
| 02004c5912 | |||
| ef30031e2e | |||
| 67511ed6a7 | |||
| b35b473508 | |||
| d4f2c3ac66 | |||
| 37300bf5c8 | |||
| cb8a2e15c8 | |||
| aac038abb9 | |||
| 588fba226d | |||
| c670742589 | |||
| f897ce1b26 | |||
| cbfce41d7e | |||
| 1e1c4cb91f | |||
| 0c340e1eb6 | |||
| ae38cdefb4 | |||
| a97fa52f10 | |||
| 43673e86f2 | |||
| 81ef51cc5c | |||
| ddce26ff2d | |||
| ee29cc627f | |||
| f3caf42be4 | |||
| c34744247a | |||
| 10f712f9d7 | |||
| 412a8fdb8f | |||
| 9a39198d92 | |||
| 526ea8a1fc | |||
| 8e42febc9c | |||
| 59bff5462c | |||
| 8c1cd8a9fe | |||
| 29e8e37114 | |||
| 9b9f396af5 | |||
| b472c8de30 | |||
| 03e384bbc4 | |||
| bf8c9e72ec | |||
| 48923db3d0 | |||
| 709e5a4776 |
@@ -236,7 +236,7 @@ jobs:
|
||||
build:
|
||||
name: "🏗️ Build Docker Image"
|
||||
needs: [e2e-test]
|
||||
runs-on: [self-hosted, Linux]
|
||||
runs-on: [self-hosted, meshcore-vm]
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v5
|
||||
@@ -271,7 +271,7 @@ jobs:
|
||||
name: "🚀 Deploy Staging"
|
||||
if: github.event_name == 'push'
|
||||
needs: [build]
|
||||
runs-on: [self-hosted, Linux]
|
||||
runs-on: [self-hosted, meshcore-vm]
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v5
|
||||
|
||||
@@ -362,6 +362,12 @@ One logical change per commit. Each commit is deployable. Each commit has its te
|
||||
- Tests: `test-{feature}.js` in repo root
|
||||
- No build step, no transpilation — write ES2020 for server, ES5/6 for frontend (broad browser support)
|
||||
|
||||
### Deep Linking
|
||||
All new UI states that a user might want to share or bookmark MUST be reflected in the URL hash.
|
||||
This includes: tabs, filters, selected items, view modes. Use query parameters on the hash
|
||||
(e.g., `#/packets?observer=ABC&timeRange=24h`) for filter state.
|
||||
Existing patterns: `#/nodes/{pubkey}?section=node-neighbors`, `#/analytics?tab=collisions`, `#/packets/{hash}`.
|
||||
|
||||
## What NOT to Do
|
||||
- **Don't check in private information** — no names, API keys, tokens, passwords, IP addresses, personal data, or any identifying information. This is a PUBLIC repo.
|
||||
- Don't add npm dependencies without asking
|
||||
|
||||
@@ -0,0 +1,674 @@
|
||||
GNU GENERAL PUBLIC LICENSE
|
||||
Version 3, 29 June 2007
|
||||
|
||||
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
|
||||
Preamble
|
||||
|
||||
The GNU General Public License is a free, copyleft license for
|
||||
software and other kinds of works.
|
||||
|
||||
The licenses for most software and other practical works are designed
|
||||
to take away your freedom to share and change the works. By contrast,
|
||||
the GNU General Public License is intended to guarantee your freedom to
|
||||
share and change all versions of a program--to make sure it remains free
|
||||
software for all its users. We, the Free Software Foundation, use the
|
||||
GNU General Public License for most of our software; it applies also to
|
||||
any other work released this way by its authors. You can apply it to
|
||||
your programs, too.
|
||||
|
||||
When we speak of free software, we are referring to freedom, not
|
||||
price. Our General Public Licenses are designed to make sure that you
|
||||
have the freedom to distribute copies of free software (and charge for
|
||||
them if you wish), that you receive source code or can get it if you
|
||||
want it, that you can change the software or use pieces of it in new
|
||||
free programs, and that you know you can do these things.
|
||||
|
||||
To protect your rights, we need to prevent others from denying you
|
||||
these rights or asking you to surrender the rights. Therefore, you have
|
||||
certain responsibilities if you distribute copies of the software, or if
|
||||
you modify it: responsibilities to respect the freedom of others.
|
||||
|
||||
For example, if you distribute copies of such a program, whether
|
||||
gratis or for a fee, you must pass on to the recipients the same
|
||||
freedoms that you received. You must make sure that they, too, receive
|
||||
or can get the source code. And you must show them these terms so they
|
||||
know their rights.
|
||||
|
||||
Developers that use the GNU GPL protect your rights with two steps:
|
||||
(1) assert copyright on the software, and (2) offer you this License
|
||||
giving you legal permission to copy, distribute and/or modify it.
|
||||
|
||||
For the developers' and authors' protection, the GPL clearly explains
|
||||
that there is no warranty for this free software. For both users' and
|
||||
authors' sake, the GPL requires that modified versions be marked as
|
||||
changed, so that their problems will not be attributed erroneously to
|
||||
authors of previous versions.
|
||||
|
||||
Some devices are designed to deny users access to install or run
|
||||
modified versions of the software inside them, although the manufacturer
|
||||
can do so. This is fundamentally incompatible with the aim of
|
||||
protecting users' freedom to change the software. The systematic
|
||||
pattern of such abuse occurs in the area of products for individuals to
|
||||
use, which is precisely where it is most unacceptable. Therefore, we
|
||||
have designed this version of the GPL to prohibit the practice for those
|
||||
products. If such problems arise substantially in other domains, we
|
||||
stand ready to extend this provision to those domains in future versions
|
||||
of the GPL, as needed to protect the freedom of users.
|
||||
|
||||
Finally, every program is threatened constantly by software patents.
|
||||
States should not allow patents to restrict development and use of
|
||||
software on general-purpose computers, but in those that do, we wish to
|
||||
avoid the special danger that patents applied to a free program could
|
||||
make it effectively proprietary. To prevent this, the GPL assures that
|
||||
patents cannot be used to render the program non-free.
|
||||
|
||||
The precise terms and conditions for copying, distribution and
|
||||
modification follow.
|
||||
|
||||
TERMS AND CONDITIONS
|
||||
|
||||
0. Definitions.
|
||||
|
||||
"This License" refers to version 3 of the GNU General Public License.
|
||||
|
||||
"Copyright" also means copyright-like laws that apply to other kinds of
|
||||
works, such as semiconductor masks.
|
||||
|
||||
"The Program" refers to any copyrightable work licensed under this
|
||||
License. Each licensee is addressed as "you". "Licensees" and
|
||||
"recipients" may be individuals or organizations.
|
||||
|
||||
To "modify" a work means to copy from or adapt all or part of the work
|
||||
in a fashion requiring copyright permission, other than the making of an
|
||||
exact copy. The resulting work is called a "modified version" of the
|
||||
earlier work or a work "based on" the earlier work.
|
||||
|
||||
A "covered work" means either the unmodified Program or a work based
|
||||
on the Program.
|
||||
|
||||
To "propagate" a work means to do anything with it that, without
|
||||
permission, would make you directly or secondarily liable for
|
||||
infringement under applicable copyright law, except executing it on a
|
||||
computer or modifying a private copy. Propagation includes copying,
|
||||
distribution (with or without modification), making available to the
|
||||
public, and in some countries other activities as well.
|
||||
|
||||
To "convey" a work means any kind of propagation that enables other
|
||||
parties to make or receive copies. Mere interaction with a user through
|
||||
a computer network, with no transfer of a copy, is not conveying.
|
||||
|
||||
An interactive user interface displays "Appropriate Legal Notices"
|
||||
to the extent that it includes a convenient and prominently visible
|
||||
feature that (1) displays an appropriate copyright notice, and (2)
|
||||
tells the user that there is no warranty for the work (except to the
|
||||
extent that warranties are provided), that licensees may convey the
|
||||
work under this License, and how to view a copy of this License. If
|
||||
the interface presents a list of user commands or options, such as a
|
||||
menu, a prominent item in the list meets this criterion.
|
||||
|
||||
1. Source Code.
|
||||
|
||||
The "source code" for a work means the preferred form of the work
|
||||
for making modifications to it. "Object code" means any non-source
|
||||
form of a work.
|
||||
|
||||
A "Standard Interface" means an interface that either is an official
|
||||
standard defined by a recognized standards body, or, in the case of
|
||||
interfaces specified for a particular programming language, one that
|
||||
is widely used among developers working in that language.
|
||||
|
||||
The "System Libraries" of an executable work include anything, other
|
||||
than the work as a whole, that (a) is included in the normal form of
|
||||
packaging a Major Component, but which is not part of that Major
|
||||
Component, and (b) serves only to enable use of the work with that
|
||||
Major Component, or to implement a Standard Interface for which an
|
||||
implementation is available to the public in source code form. A
|
||||
"Major Component", in this context, means a major essential component
|
||||
(kernel, window system, and so on) of the specific operating system
|
||||
(if any) on which the executable work runs, or a compiler used to
|
||||
produce the work, or an object code interpreter used to run it.
|
||||
|
||||
The "Corresponding Source" for a work in object code form means all
|
||||
the source code needed to generate, install, and (for an executable
|
||||
work) run the object code and to modify the work, including scripts to
|
||||
control those activities. However, it does not include the work's
|
||||
System Libraries, or general-purpose tools or generally available free
|
||||
programs which are used unmodified in performing those activities but
|
||||
which are not part of the work. For example, Corresponding Source
|
||||
includes interface definition files associated with source files for
|
||||
the work, and the source code for shared libraries and dynamically
|
||||
linked subprograms that the work is specifically designed to require,
|
||||
such as by intimate data communication or control flow between those
|
||||
subprograms and other parts of the work.
|
||||
|
||||
The Corresponding Source need not include anything that users
|
||||
can regenerate automatically from other parts of the Corresponding
|
||||
Source.
|
||||
|
||||
The Corresponding Source for a work in source code form is that
|
||||
same work.
|
||||
|
||||
2. Basic Permissions.
|
||||
|
||||
All rights granted under this License are granted for the term of
|
||||
copyright on the Program, and are irrevocable provided the stated
|
||||
conditions are met. This License explicitly affirms your unlimited
|
||||
permission to run the unmodified Program. The output from running a
|
||||
covered work is covered by this License only if the output, given its
|
||||
content, constitutes a covered work. This License acknowledges your
|
||||
rights of fair use or other equivalent, as provided by copyright law.
|
||||
|
||||
You may make, run and propagate covered works that you do not
|
||||
convey, without conditions so long as your license otherwise remains
|
||||
in force. You may convey covered works to others for the sole purpose
|
||||
of having them make modifications exclusively for you, or provide you
|
||||
with facilities for running those works, provided that you comply with
|
||||
the terms of this License in conveying all material for which you do
|
||||
not control copyright. Those thus making or running the covered works
|
||||
for you must do so exclusively on your behalf, under your direction
|
||||
and control, on terms that prohibit them from making any copies of
|
||||
your copyrighted material outside their relationship with you.
|
||||
|
||||
Conveying under any other circumstances is permitted solely under
|
||||
the conditions stated below. Sublicensing is not allowed; section 10
|
||||
makes it unnecessary.
|
||||
|
||||
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
||||
|
||||
No covered work shall be deemed part of an effective technological
|
||||
measure under any applicable law fulfilling obligations under article
|
||||
11 of the WIPO copyright treaty adopted on 20 December 1996, or
|
||||
similar laws prohibiting or restricting circumvention of such
|
||||
measures.
|
||||
|
||||
When you convey a covered work, you waive any legal power to forbid
|
||||
circumvention of technological measures to the extent such circumvention
|
||||
is effected by exercising rights under this License with respect to
|
||||
the covered work, and you disclaim any intention to limit operation or
|
||||
modification of the work as a means of enforcing, against the work's
|
||||
users, your or third parties' legal rights to forbid circumvention of
|
||||
technological measures.
|
||||
|
||||
4. Conveying Verbatim Copies.
|
||||
|
||||
You may convey verbatim copies of the Program's source code as you
|
||||
receive it, in any medium, provided that you conspicuously and
|
||||
appropriately publish on each copy an appropriate copyright notice;
|
||||
keep intact all notices stating that this License and any
|
||||
non-permissive terms added in accord with section 7 apply to the code;
|
||||
keep intact all notices of the absence of any warranty; and give all
|
||||
recipients a copy of this License along with the Program.
|
||||
|
||||
You may charge any price or no price for each copy that you convey,
|
||||
and you may offer support or warranty protection for a fee.
|
||||
|
||||
5. Conveying Modified Source Versions.
|
||||
|
||||
You may convey a work based on the Program, or the modifications to
|
||||
produce it from the Program, in the form of source code under the
|
||||
terms of section 4, provided that you also meet all of these conditions:
|
||||
|
||||
a) The work must carry prominent notices stating that you modified
|
||||
it, and giving a relevant date.
|
||||
|
||||
b) The work must carry prominent notices stating that it is
|
||||
released under this License and any conditions added under section
|
||||
7. This requirement modifies the requirement in section 4 to
|
||||
"keep intact all notices".
|
||||
|
||||
c) You must license the entire work, as a whole, under this
|
||||
License to anyone who comes into possession of a copy. This
|
||||
License will therefore apply, along with any applicable section 7
|
||||
additional terms, to the whole of the work, and all its parts,
|
||||
regardless of how they are packaged. This License gives no
|
||||
permission to license the work in any other way, but it does not
|
||||
invalidate such permission if you have separately received it.
|
||||
|
||||
d) If the work has interactive user interfaces, each must display
|
||||
Appropriate Legal Notices; however, if the Program has interactive
|
||||
interfaces that do not display Appropriate Legal Notices, your
|
||||
work need not make them do so.
|
||||
|
||||
A compilation of a covered work with other separate and independent
|
||||
works, which are not by their nature extensions of the covered work,
|
||||
and which are not combined with it such as to form a larger program,
|
||||
in or on a volume of a storage or distribution medium, is called an
|
||||
"aggregate" if the compilation and its resulting copyright are not
|
||||
used to limit the access or legal rights of the compilation's users
|
||||
beyond what the individual works permit. Inclusion of a covered work
|
||||
in an aggregate does not cause this License to apply to the other
|
||||
parts of the aggregate.
|
||||
|
||||
6. Conveying Non-Source Forms.
|
||||
|
||||
You may convey a covered work in object code form under the terms
|
||||
of sections 4 and 5, provided that you also convey the
|
||||
machine-readable Corresponding Source under the terms of this License,
|
||||
in one of these ways:
|
||||
|
||||
a) Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by the
|
||||
Corresponding Source fixed on a durable physical medium
|
||||
customarily used for software interchange.
|
||||
|
||||
b) Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by a
|
||||
written offer, valid for at least three years and valid for as
|
||||
long as you offer spare parts or customer support for that product
|
||||
model, to give anyone who possesses the object code either (1) a
|
||||
copy of the Corresponding Source for all the software in the
|
||||
product that is covered by this License, on a durable physical
|
||||
medium customarily used for software interchange, for a price no
|
||||
more than your reasonable cost of physically performing this
|
||||
conveying of source, or (2) access to copy the
|
||||
Corresponding Source from a network server at no charge.
|
||||
|
||||
c) Convey individual copies of the object code with a copy of the
|
||||
written offer to provide the Corresponding Source. This
|
||||
alternative is allowed only occasionally and noncommercially, and
|
||||
only if you received the object code with such an offer, in accord
|
||||
with subsection 6b.
|
||||
|
||||
d) Convey the object code by offering access from a designated
|
||||
place (gratis or for a charge), and offer equivalent access to the
|
||||
Corresponding Source in the same way through the same place at no
|
||||
further charge. You need not require recipients to copy the
|
||||
Corresponding Source along with the object code. If the place to
|
||||
copy the object code is a network server, the Corresponding Source
|
||||
may be on a different server (operated by you or a third party)
|
||||
that supports equivalent copying facilities, provided you maintain
|
||||
clear directions next to the object code saying where to find the
|
||||
Corresponding Source. Regardless of what server hosts the
|
||||
Corresponding Source, you remain obligated to ensure that it is
|
||||
available for as long as needed to satisfy these requirements.
|
||||
|
||||
e) Convey the object code using peer-to-peer transmission, provided
|
||||
you inform other peers where the object code and Corresponding
|
||||
Source of the work are being offered to the general public at no
|
||||
charge under subsection 6d.
|
||||
|
||||
A separable portion of the object code, whose source code is excluded
|
||||
from the Corresponding Source as a System Library, need not be
|
||||
included in conveying the object code work.
|
||||
|
||||
A "User Product" is either (1) a "consumer product", which means any
|
||||
tangible personal property which is normally used for personal, family,
|
||||
or household purposes, or (2) anything designed or sold for incorporation
|
||||
into a dwelling. In determining whether a product is a consumer product,
|
||||
doubtful cases shall be resolved in favor of coverage. For a particular
|
||||
product received by a particular user, "normally used" refers to a
|
||||
typical or common use of that class of product, regardless of the status
|
||||
of the particular user or of the way in which the particular user
|
||||
actually uses, or expects or is expected to use, the product. A product
|
||||
is a consumer product regardless of whether the product has substantial
|
||||
commercial, industrial or non-consumer uses, unless such uses represent
|
||||
the only significant mode of use of the product.
|
||||
|
||||
"Installation Information" for a User Product means any methods,
|
||||
procedures, authorization keys, or other information required to install
|
||||
and execute modified versions of a covered work in that User Product from
|
||||
a modified version of its Corresponding Source. The information must
|
||||
suffice to ensure that the continued functioning of the modified object
|
||||
code is in no case prevented or interfered with solely because
|
||||
modification has been made.
|
||||
|
||||
If you convey an object code work under this section in, or with, or
|
||||
specifically for use in, a User Product, and the conveying occurs as
|
||||
part of a transaction in which the right of possession and use of the
|
||||
User Product is transferred to the recipient in perpetuity or for a
|
||||
fixed term (regardless of how the transaction is characterized), the
|
||||
Corresponding Source conveyed under this section must be accompanied
|
||||
by the Installation Information. But this requirement does not apply
|
||||
if neither you nor any third party retains the ability to install
|
||||
modified object code on the User Product (for example, the work has
|
||||
been installed in ROM).
|
||||
|
||||
The requirement to provide Installation Information does not include a
|
||||
requirement to continue to provide support service, warranty, or updates
|
||||
for a work that has been modified or installed by the recipient, or for
|
||||
the User Product in which it has been modified or installed. Access to a
|
||||
network may be denied when the modification itself materially and
|
||||
adversely affects the operation of the network or violates the rules and
|
||||
protocols for communication across the network.
|
||||
|
||||
Corresponding Source conveyed, and Installation Information provided,
|
||||
in accord with this section must be in a format that is publicly
|
||||
documented (and with an implementation available to the public in
|
||||
source code form), and must require no special password or key for
|
||||
unpacking, reading or copying.
|
||||
|
||||
7. Additional Terms.
|
||||
|
||||
"Additional permissions" are terms that supplement the terms of this
|
||||
License by making exceptions from one or more of its conditions.
|
||||
Additional permissions that are applicable to the entire Program shall
|
||||
be treated as though they were included in this License, to the extent
|
||||
that they are valid under applicable law. If additional permissions
|
||||
apply only to part of the Program, that part may be used separately
|
||||
under those permissions, but the entire Program remains governed by
|
||||
this License without regard to the additional permissions.
|
||||
|
||||
When you convey a copy of a covered work, you may at your option
|
||||
remove any additional permissions from that copy, or from any part of
|
||||
it. (Additional permissions may be written to require their own
|
||||
removal in certain cases when you modify the work.) You may place
|
||||
additional permissions on material, added by you to a covered work,
|
||||
for which you have or can give appropriate copyright permission.
|
||||
|
||||
Notwithstanding any other provision of this License, for material you
|
||||
add to a covered work, you may (if authorized by the copyright holders of
|
||||
that material) supplement the terms of this License with terms:
|
||||
|
||||
a) Disclaiming warranty or limiting liability differently from the
|
||||
terms of sections 15 and 16 of this License; or
|
||||
|
||||
b) Requiring preservation of specified reasonable legal notices or
|
||||
author attributions in that material or in the Appropriate Legal
|
||||
Notices displayed by works containing it; or
|
||||
|
||||
c) Prohibiting misrepresentation of the origin of that material, or
|
||||
requiring that modified versions of such material be marked in
|
||||
reasonable ways as different from the original version; or
|
||||
|
||||
d) Limiting the use for publicity purposes of names of licensors or
|
||||
authors of the material; or
|
||||
|
||||
e) Declining to grant rights under trademark law for use of some
|
||||
trade names, trademarks, or service marks; or
|
||||
|
||||
f) Requiring indemnification of licensors and authors of that
|
||||
material by anyone who conveys the material (or modified versions of
|
||||
it) with contractual assumptions of liability to the recipient, for
|
||||
any liability that these contractual assumptions directly impose on
|
||||
those licensors and authors.
|
||||
|
||||
All other non-permissive additional terms are considered "further
|
||||
restrictions" within the meaning of section 10. If the Program as you
|
||||
received it, or any part of it, contains a notice stating that it is
|
||||
governed by this License along with a term that is a further
|
||||
restriction, you may remove that term. If a license document contains
|
||||
a further restriction but permits relicensing or conveying under this
|
||||
License, you may add to a covered work material governed by the terms
|
||||
of that license document, provided that the further restriction does
|
||||
not survive such relicensing or conveying.
|
||||
|
||||
If you add terms to a covered work in accord with this section, you
|
||||
must place, in the relevant source files, a statement of the
|
||||
additional terms that apply to those files, or a notice indicating
|
||||
where to find the applicable terms.
|
||||
|
||||
Additional terms, permissive or non-permissive, may be stated in the
|
||||
form of a separately written license, or stated as exceptions;
|
||||
the above requirements apply either way.
|
||||
|
||||
8. Termination.
|
||||
|
||||
You may not propagate or modify a covered work except as expressly
|
||||
provided under this License. Any attempt otherwise to propagate or
|
||||
modify it is void, and will automatically terminate your rights under
|
||||
this License (including any patent licenses granted under the third
|
||||
paragraph of section 11).
|
||||
|
||||
However, if you cease all violation of this License, then your
|
||||
license from a particular copyright holder is reinstated (a)
|
||||
provisionally, unless and until the copyright holder explicitly and
|
||||
finally terminates your license, and (b) permanently, if the copyright
|
||||
holder fails to notify you of the violation by some reasonable means
|
||||
prior to 60 days after the cessation.
|
||||
|
||||
Moreover, your license from a particular copyright holder is
|
||||
reinstated permanently if the copyright holder notifies you of the
|
||||
violation by some reasonable means, this is the first time you have
|
||||
received notice of violation of this License (for any work) from that
|
||||
copyright holder, and you cure the violation prior to 30 days after
|
||||
your receipt of the notice.
|
||||
|
||||
Termination of your rights under this section does not terminate the
|
||||
licenses of parties who have received copies or rights from you under
|
||||
this License. If your rights have been terminated and not permanently
|
||||
reinstated, you do not qualify to receive new licenses for the same
|
||||
material under section 10.
|
||||
|
||||
9. Acceptance Not Required for Having Copies.
|
||||
|
||||
You are not required to accept this License in order to receive or
|
||||
run a copy of the Program. Ancillary propagation of a covered work
|
||||
occurring solely as a consequence of using peer-to-peer transmission
|
||||
to receive a copy likewise does not require acceptance. However,
|
||||
nothing other than this License grants you permission to propagate or
|
||||
modify any covered work. These actions infringe copyright if you do
|
||||
not accept this License. Therefore, by modifying or propagating a
|
||||
covered work, you indicate your acceptance of this License to do so.
|
||||
|
||||
10. Automatic Licensing of Downstream Recipients.
|
||||
|
||||
Each time you convey a covered work, the recipient automatically
|
||||
receives a license from the original licensors, to run, modify and
|
||||
propagate that work, subject to this License. You are not responsible
|
||||
for enforcing compliance by third parties with this License.
|
||||
|
||||
An "entity transaction" is a transaction transferring control of an
|
||||
organization, or substantially all assets of one, or subdividing an
|
||||
organization, or merging organizations. If propagation of a covered
|
||||
work results from an entity transaction, each party to that
|
||||
transaction who receives a copy of the work also receives whatever
|
||||
licenses to the work the party's predecessor in interest had or could
|
||||
give under the previous paragraph, plus a right to possession of the
|
||||
Corresponding Source of the work from the predecessor in interest, if
|
||||
the predecessor has it or can get it with reasonable efforts.
|
||||
|
||||
You may not impose any further restrictions on the exercise of the
|
||||
rights granted or affirmed under this License. For example, you may
|
||||
not impose a license fee, royalty, or other charge for exercise of
|
||||
rights granted under this License, and you may not initiate litigation
|
||||
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
||||
any patent claim is infringed by making, using, selling, offering for
|
||||
sale, or importing the Program or any portion of it.
|
||||
|
||||
11. Patents.
|
||||
|
||||
A "contributor" is a copyright holder who authorizes use under this
|
||||
License of the Program or a work on which the Program is based. The
|
||||
work thus licensed is called the contributor's "contributor version".
|
||||
|
||||
A contributor's "essential patent claims" are all patent claims
|
||||
owned or controlled by the contributor, whether already acquired or
|
||||
hereafter acquired, that would be infringed by some manner, permitted
|
||||
by this License, of making, using, or selling its contributor version,
|
||||
but do not include claims that would be infringed only as a
|
||||
consequence of further modification of the contributor version. For
|
||||
purposes of this definition, "control" includes the right to grant
|
||||
patent sublicenses in a manner consistent with the requirements of
|
||||
this License.
|
||||
|
||||
Each contributor grants you a non-exclusive, worldwide, royalty-free
|
||||
patent license under the contributor's essential patent claims, to
|
||||
make, use, sell, offer for sale, import and otherwise run, modify and
|
||||
propagate the contents of its contributor version.
|
||||
|
||||
In the following three paragraphs, a "patent license" is any express
|
||||
agreement or commitment, however denominated, not to enforce a patent
|
||||
(such as an express permission to practice a patent or covenant not to
|
||||
sue for patent infringement). To "grant" such a patent license to a
|
||||
party means to make such an agreement or commitment not to enforce a
|
||||
patent against the party.
|
||||
|
||||
If you convey a covered work, knowingly relying on a patent license,
|
||||
and the Corresponding Source of the work is not available for anyone
|
||||
to copy, free of charge and under the terms of this License, through a
|
||||
publicly available network server or other readily accessible means,
|
||||
then you must either (1) cause the Corresponding Source to be so
|
||||
available, or (2) arrange to deprive yourself of the benefit of the
|
||||
patent license for this particular work, or (3) arrange, in a manner
|
||||
consistent with the requirements of this License, to extend the patent
|
||||
license to downstream recipients. "Knowingly relying" means you have
|
||||
actual knowledge that, but for the patent license, your conveying the
|
||||
covered work in a country, or your recipient's use of the covered work
|
||||
in a country, would infringe one or more identifiable patents in that
|
||||
country that you have reason to believe are valid.
|
||||
|
||||
If, pursuant to or in connection with a single transaction or
|
||||
arrangement, you convey, or propagate by procuring conveyance of, a
|
||||
covered work, and grant a patent license to some of the parties
|
||||
receiving the covered work authorizing them to use, propagate, modify
|
||||
or convey a specific copy of the covered work, then the patent license
|
||||
you grant is automatically extended to all recipients of the covered
|
||||
work and works based on it.
|
||||
|
||||
A patent license is "discriminatory" if it does not include within
|
||||
the scope of its coverage, prohibits the exercise of, or is
|
||||
conditioned on the non-exercise of one or more of the rights that are
|
||||
specifically granted under this License. You may not convey a covered
|
||||
work if you are a party to an arrangement with a third party that is
|
||||
in the business of distributing software, under which you make payment
|
||||
to the third party based on the extent of your activity of conveying
|
||||
the work, and under which the third party grants, to any of the
|
||||
parties who would receive the covered work from you, a discriminatory
|
||||
patent license (a) in connection with copies of the covered work
|
||||
conveyed by you (or copies made from those copies), or (b) primarily
|
||||
for and in connection with specific products or compilations that
|
||||
contain the covered work, unless you entered into that arrangement,
|
||||
or that patent license was granted, prior to 28 March 2007.
|
||||
|
||||
Nothing in this License shall be construed as excluding or limiting
|
||||
any implied license or other defenses to infringement that may
|
||||
otherwise be available to you under applicable patent law.
|
||||
|
||||
12. No Surrender of Others' Freedom.
|
||||
|
||||
If conditions are imposed on you (whether by court order, agreement or
|
||||
otherwise) that contradict the conditions of this License, they do not
|
||||
excuse you from the conditions of this License. If you cannot convey a
|
||||
covered work so as to satisfy simultaneously your obligations under this
|
||||
License and any other pertinent obligations, then as a consequence you may
|
||||
not convey it at all. For example, if you agree to terms that obligate you
|
||||
to collect a royalty for further conveying from those to whom you convey
|
||||
the Program, the only way you could satisfy both those terms and this
|
||||
License would be to refrain entirely from conveying the Program.
|
||||
|
||||
13. Use with the GNU Affero General Public License.
|
||||
|
||||
Notwithstanding any other provision of this License, you have
|
||||
permission to link or combine any covered work with a work licensed
|
||||
under version 3 of the GNU Affero General Public License into a single
|
||||
combined work, and to convey the resulting work. The terms of this
|
||||
License will continue to apply to the part which is the covered work,
|
||||
but the special requirements of the GNU Affero General Public License,
|
||||
section 13, concerning interaction through a network will apply to the
|
||||
combination as such.
|
||||
|
||||
14. Revised Versions of this License.
|
||||
|
||||
The Free Software Foundation may publish revised and/or new versions of
|
||||
the GNU General Public License from time to time. Such new versions will
|
||||
be similar in spirit to the present version, but may differ in detail to
|
||||
address new problems or concerns.
|
||||
|
||||
Each version is given a distinguishing version number. If the
|
||||
Program specifies that a certain numbered version of the GNU General
|
||||
Public License "or any later version" applies to it, you have the
|
||||
option of following the terms and conditions either of that numbered
|
||||
version or of any later version published by the Free Software
|
||||
Foundation. If the Program does not specify a version number of the
|
||||
GNU General Public License, you may choose any version ever published
|
||||
by the Free Software Foundation.
|
||||
|
||||
If the Program specifies that a proxy can decide which future
|
||||
versions of the GNU General Public License can be used, that proxy's
|
||||
public statement of acceptance of a version permanently authorizes you
|
||||
to choose that version for the Program.
|
||||
|
||||
Later license versions may give you additional or different
|
||||
permissions. However, no additional obligations are imposed on any
|
||||
author or copyright holder as a result of your choosing to follow a
|
||||
later version.
|
||||
|
||||
15. Disclaimer of Warranty.
|
||||
|
||||
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
|
||||
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
|
||||
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
|
||||
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
||||
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
|
||||
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
|
||||
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
||||
|
||||
16. Limitation of Liability.
|
||||
|
||||
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
||||
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
|
||||
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
|
||||
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
|
||||
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
|
||||
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
|
||||
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
|
||||
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
|
||||
SUCH DAMAGES.
|
||||
|
||||
17. Interpretation of Sections 15 and 16.
|
||||
|
||||
If the disclaimer of warranty and limitation of liability provided
|
||||
above cannot be given local legal effect according to their terms,
|
||||
reviewing courts shall apply local law that most closely approximates
|
||||
an absolute waiver of all civil liability in connection with the
|
||||
Program, unless a warranty or assumption of liability accompanies a
|
||||
copy of the Program in return for a fee.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
How to Apply These Terms to Your New Programs
|
||||
|
||||
If you develop a new program, and you want it to be of the greatest
|
||||
possible use to the public, the best way to achieve this is to make it
|
||||
free software which everyone can redistribute and change under these terms.
|
||||
|
||||
To do so, attach the following notices to the program. It is safest
|
||||
to attach them to the start of each source file to most effectively
|
||||
state the exclusion of warranty; and each file should have at least
|
||||
the "copyright" line and a pointer to where the full notice is found.
|
||||
|
||||
<one line to give the program's name and a brief idea of what it does.>
|
||||
Copyright (C) <year> <name of author>
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
Also add information on how to contact you by electronic and paper mail.
|
||||
|
||||
If the program does terminal interaction, make it output a short
|
||||
notice like this when it starts in an interactive mode:
|
||||
|
||||
<program> Copyright (C) <year> <name of author>
|
||||
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
|
||||
This is free software, and you are welcome to redistribute it
|
||||
under certain conditions; type `show c' for details.
|
||||
|
||||
The hypothetical commands `show w' and `show c' should show the appropriate
|
||||
parts of the General Public License. Of course, your program's commands
|
||||
might be different; for a GUI interface, you would use an "about box".
|
||||
|
||||
You should also get your employer (if you work as a programmer) or school,
|
||||
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
||||
For more information on this, and how to apply and follow the GNU GPL, see
|
||||
<https://www.gnu.org/licenses/>.
|
||||
|
||||
The GNU General Public License does not permit incorporating your program
|
||||
into proprietary programs. If your program is a subroutine library, you
|
||||
may consider it more useful to permit linking proprietary applications with
|
||||
the library. If this is what you want to do, use the GNU Lesser General
|
||||
Public License instead of this License. But first, please read
|
||||
<https://www.gnu.org/licenses/why-not-lgpl.html>.
|
||||
+24
-1
@@ -36,6 +36,7 @@ type Config struct {
|
||||
ChannelKeys map[string]string `json:"channelKeys,omitempty"`
|
||||
HashChannels []string `json:"hashChannels,omitempty"`
|
||||
Retention *RetentionConfig `json:"retention,omitempty"`
|
||||
Metrics *MetricsConfig `json:"metrics,omitempty"`
|
||||
GeoFilter *GeoFilterConfig `json:"geo_filter,omitempty"`
|
||||
}
|
||||
|
||||
@@ -44,7 +45,29 @@ type GeoFilterConfig = geofilter.Config
|
||||
|
||||
// RetentionConfig controls how long stale nodes are kept before being moved to inactive_nodes.
|
||||
type RetentionConfig struct {
|
||||
NodeDays int `json:"nodeDays"`
|
||||
NodeDays int `json:"nodeDays"`
|
||||
MetricsDays int `json:"metricsDays"`
|
||||
}
|
||||
|
||||
// MetricsConfig controls observer metrics collection.
|
||||
type MetricsConfig struct {
|
||||
SampleIntervalSec int `json:"sampleIntervalSec"`
|
||||
}
|
||||
|
||||
// MetricsSampleInterval returns the configured sample interval or 300s default.
|
||||
func (c *Config) MetricsSampleInterval() int {
|
||||
if c.Metrics != nil && c.Metrics.SampleIntervalSec > 0 {
|
||||
return c.Metrics.SampleIntervalSec
|
||||
}
|
||||
return 300
|
||||
}
|
||||
|
||||
// MetricsRetentionDays returns configured metrics retention or 30 days default.
|
||||
func (c *Config) MetricsRetentionDays() int {
|
||||
if c.Retention != nil && c.Retention.MetricsDays > 0 {
|
||||
return c.Retention.MetricsDays
|
||||
}
|
||||
return 30
|
||||
}
|
||||
|
||||
// NodeDaysOrDefault returns the configured retention.nodeDays or 7 if not set.
|
||||
|
||||
+140
-1
@@ -39,11 +39,19 @@ type Store struct {
|
||||
stmtGetObserverRowid *sql.Stmt
|
||||
stmtUpdateObserverLastSeen *sql.Stmt
|
||||
stmtUpdateNodeTelemetry *sql.Stmt
|
||||
stmtUpsertMetrics *sql.Stmt
|
||||
|
||||
sampleIntervalSec int
|
||||
}
|
||||
|
||||
// OpenStore opens or creates a SQLite DB at the given path, applying the
|
||||
// v3 schema that is compatible with the Node.js server.
|
||||
func OpenStore(dbPath string) (*Store, error) {
|
||||
return OpenStoreWithInterval(dbPath, 300)
|
||||
}
|
||||
|
||||
// OpenStoreWithInterval opens or creates a SQLite DB with a configurable sample interval.
|
||||
func OpenStoreWithInterval(dbPath string, sampleIntervalSec int) (*Store, error) {
|
||||
dir := filepath.Dir(dbPath)
|
||||
if err := os.MkdirAll(dir, 0o755); err != nil {
|
||||
return nil, fmt.Errorf("creating data dir: %w", err)
|
||||
@@ -66,7 +74,7 @@ func OpenStore(dbPath string) (*Store, error) {
|
||||
return nil, fmt.Errorf("applying schema: %w", err)
|
||||
}
|
||||
|
||||
s := &Store{db: db}
|
||||
s := &Store{db: db, sampleIntervalSec: sampleIntervalSec}
|
||||
if err := s.prepareStatements(); err != nil {
|
||||
return nil, fmt.Errorf("preparing statements: %w", err)
|
||||
}
|
||||
@@ -292,6 +300,51 @@ func applySchema(db *sql.DB) error {
|
||||
log.Println("[migration] observations timestamp index created")
|
||||
}
|
||||
|
||||
// observer_metrics table for RF health dashboard
|
||||
row = db.QueryRow("SELECT 1 FROM _migrations WHERE name = 'observer_metrics_v1'")
|
||||
if row.Scan(&migDone) != nil {
|
||||
log.Println("[migration] Creating observer_metrics table...")
|
||||
_, err := db.Exec(`
|
||||
CREATE TABLE IF NOT EXISTS observer_metrics (
|
||||
observer_id TEXT NOT NULL,
|
||||
timestamp TEXT NOT NULL,
|
||||
noise_floor REAL,
|
||||
tx_air_secs INTEGER,
|
||||
rx_air_secs INTEGER,
|
||||
recv_errors INTEGER,
|
||||
battery_mv INTEGER,
|
||||
PRIMARY KEY (observer_id, timestamp)
|
||||
)
|
||||
`)
|
||||
if err != nil {
|
||||
return fmt.Errorf("observer_metrics schema: %w", err)
|
||||
}
|
||||
db.Exec(`INSERT INTO _migrations (name) VALUES ('observer_metrics_v1')`)
|
||||
log.Println("[migration] observer_metrics table created")
|
||||
}
|
||||
|
||||
// Migration: add timestamp index for cross-observer time-range queries
|
||||
row = db.QueryRow("SELECT 1 FROM _migrations WHERE name = 'observer_metrics_ts_idx'")
|
||||
if row.Scan(&migDone) != nil {
|
||||
log.Println("[migration] Creating observer_metrics timestamp index...")
|
||||
_, err := db.Exec(`CREATE INDEX IF NOT EXISTS idx_observer_metrics_timestamp ON observer_metrics(timestamp)`)
|
||||
if err != nil {
|
||||
return fmt.Errorf("observer_metrics timestamp index: %w", err)
|
||||
}
|
||||
db.Exec(`INSERT INTO _migrations (name) VALUES ('observer_metrics_ts_idx')`)
|
||||
log.Println("[migration] observer_metrics timestamp index created")
|
||||
}
|
||||
|
||||
// Migration: add packets_sent and packets_recv columns to observer_metrics
|
||||
row = db.QueryRow("SELECT 1 FROM _migrations WHERE name = 'observer_metrics_packets_v1'")
|
||||
if row.Scan(&migDone) != nil {
|
||||
log.Println("[migration] Adding packets_sent/packets_recv columns to observer_metrics...")
|
||||
db.Exec(`ALTER TABLE observer_metrics ADD COLUMN packets_sent INTEGER`)
|
||||
db.Exec(`ALTER TABLE observer_metrics ADD COLUMN packets_recv INTEGER`)
|
||||
db.Exec(`INSERT INTO _migrations (name) VALUES ('observer_metrics_packets_v1')`)
|
||||
log.Println("[migration] packets_sent/packets_recv columns added")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -385,6 +438,14 @@ func (s *Store) prepareStatements() error {
|
||||
return err
|
||||
}
|
||||
|
||||
s.stmtUpsertMetrics, err = s.db.Prepare(`
|
||||
INSERT OR REPLACE INTO observer_metrics (observer_id, timestamp, noise_floor, tx_air_secs, rx_air_secs, recv_errors, battery_mv, packets_sent, packets_recv)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -517,6 +578,11 @@ type ObserverMeta struct {
|
||||
BatteryMv *int // millivolts, always integer
|
||||
UptimeSecs *int64 // seconds, always integer
|
||||
NoiseFloor *float64 // dBm, may have decimals
|
||||
TxAirSecs *int // cumulative TX seconds since boot
|
||||
RxAirSecs *int // cumulative RX seconds since boot
|
||||
RecvErrors *int // cumulative CRC/decode failures since boot
|
||||
PacketsSent *int // cumulative packets sent since boot
|
||||
PacketsRecv *int // cumulative packets received since boot
|
||||
}
|
||||
|
||||
// UpsertObserver inserts or updates an observer with optional hardware metadata.
|
||||
@@ -568,6 +634,79 @@ func (s *Store) Close() error {
|
||||
return s.db.Close()
|
||||
}
|
||||
|
||||
// RoundToInterval rounds a time to the nearest sample interval boundary.
|
||||
func RoundToInterval(t time.Time, intervalSec int) time.Time {
|
||||
if intervalSec <= 0 {
|
||||
intervalSec = 300
|
||||
}
|
||||
epoch := t.Unix()
|
||||
half := int64(intervalSec) / 2
|
||||
rounded := ((epoch + half) / int64(intervalSec)) * int64(intervalSec)
|
||||
return time.Unix(rounded, 0).UTC()
|
||||
}
|
||||
|
||||
// MetricsData holds the fields to insert into observer_metrics.
|
||||
type MetricsData struct {
|
||||
ObserverID string
|
||||
NoiseFloor *float64
|
||||
TxAirSecs *int
|
||||
RxAirSecs *int
|
||||
RecvErrors *int
|
||||
BatteryMv *int
|
||||
PacketsSent *int
|
||||
PacketsRecv *int
|
||||
}
|
||||
|
||||
// InsertMetrics inserts a metrics sample for an observer using ingestor wall clock.
|
||||
func (s *Store) InsertMetrics(data *MetricsData) error {
|
||||
ts := RoundToInterval(time.Now().UTC(), s.sampleIntervalSec)
|
||||
tsStr := ts.Format(time.RFC3339)
|
||||
|
||||
var nf, txAir, rxAir, recvErr, batt, pktSent, pktRecv interface{}
|
||||
if data.NoiseFloor != nil {
|
||||
nf = *data.NoiseFloor
|
||||
}
|
||||
if data.TxAirSecs != nil {
|
||||
txAir = *data.TxAirSecs
|
||||
}
|
||||
if data.RxAirSecs != nil {
|
||||
rxAir = *data.RxAirSecs
|
||||
}
|
||||
if data.RecvErrors != nil {
|
||||
recvErr = *data.RecvErrors
|
||||
}
|
||||
if data.BatteryMv != nil {
|
||||
batt = *data.BatteryMv
|
||||
}
|
||||
if data.PacketsSent != nil {
|
||||
pktSent = *data.PacketsSent
|
||||
}
|
||||
if data.PacketsRecv != nil {
|
||||
pktRecv = *data.PacketsRecv
|
||||
}
|
||||
|
||||
_, err := s.stmtUpsertMetrics.Exec(data.ObserverID, tsStr, nf, txAir, rxAir, recvErr, batt, pktSent, pktRecv)
|
||||
if err != nil {
|
||||
s.Stats.WriteErrors.Add(1)
|
||||
return fmt.Errorf("insert metrics: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// PruneOldMetrics deletes observer_metrics rows older than retentionDays.
|
||||
func (s *Store) PruneOldMetrics(retentionDays int) (int64, error) {
|
||||
cutoff := time.Now().UTC().AddDate(0, 0, -retentionDays).Format(time.RFC3339)
|
||||
result, err := s.db.Exec(`DELETE FROM observer_metrics WHERE timestamp < ?`, cutoff)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("prune metrics: %w", err)
|
||||
}
|
||||
n, _ := result.RowsAffected()
|
||||
if n > 0 {
|
||||
log.Printf("[metrics] Pruned %d rows older than %d days", n, retentionDays)
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Checkpoint forces a WAL checkpoint to release the WAL lock file,
|
||||
// preventing lock contention with a new process starting up.
|
||||
func (s *Store) Checkpoint() {
|
||||
|
||||
@@ -1703,3 +1703,182 @@ func TestInsertTransmissionWithScoreAndDirection(t *testing.T) {
|
||||
}
|
||||
|
||||
func ptrFloat(f float64) *float64 { return &f }
|
||||
func ptrInt(i int) *int { return &i }
|
||||
|
||||
func TestRoundToInterval(t *testing.T) {
|
||||
tests := []struct {
|
||||
input time.Time
|
||||
interval int
|
||||
want time.Time
|
||||
}{
|
||||
{time.Date(2026, 4, 5, 10, 2, 0, 0, time.UTC), 300, time.Date(2026, 4, 5, 10, 0, 0, 0, time.UTC)},
|
||||
{time.Date(2026, 4, 5, 10, 3, 0, 0, time.UTC), 300, time.Date(2026, 4, 5, 10, 5, 0, 0, time.UTC)},
|
||||
{time.Date(2026, 4, 5, 10, 2, 30, 0, time.UTC), 300, time.Date(2026, 4, 5, 10, 5, 0, 0, time.UTC)},
|
||||
{time.Date(2026, 4, 5, 10, 5, 0, 0, time.UTC), 300, time.Date(2026, 4, 5, 10, 5, 0, 0, time.UTC)},
|
||||
{time.Date(2026, 4, 5, 10, 7, 29, 0, time.UTC), 300, time.Date(2026, 4, 5, 10, 5, 0, 0, time.UTC)},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
got := RoundToInterval(tc.input, tc.interval)
|
||||
if !got.Equal(tc.want) {
|
||||
t.Errorf("RoundToInterval(%v, %d) = %v, want %v", tc.input, tc.interval, got, tc.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestInsertMetrics(t *testing.T) {
|
||||
store, err := OpenStore(tempDBPath(t))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer store.Close()
|
||||
|
||||
nf := -112.5
|
||||
txAir := 100
|
||||
rxAir := 500
|
||||
recvErr := 3
|
||||
batt := 3720
|
||||
data := &MetricsData{
|
||||
ObserverID: "obs1",
|
||||
NoiseFloor: &nf,
|
||||
TxAirSecs: &txAir,
|
||||
RxAirSecs: &rxAir,
|
||||
RecvErrors: &recvErr,
|
||||
BatteryMv: &batt,
|
||||
}
|
||||
|
||||
if err := store.InsertMetrics(data); err != nil {
|
||||
t.Fatalf("InsertMetrics: %v", err)
|
||||
}
|
||||
|
||||
// Verify insertion
|
||||
var count int
|
||||
store.db.QueryRow("SELECT COUNT(*) FROM observer_metrics WHERE observer_id = 'obs1'").Scan(&count)
|
||||
if count != 1 {
|
||||
t.Errorf("expected 1 row, got %d", count)
|
||||
}
|
||||
|
||||
// Verify values
|
||||
var gotNF float64
|
||||
var gotTx, gotRx, gotErr, gotBatt int
|
||||
store.db.QueryRow("SELECT noise_floor, tx_air_secs, rx_air_secs, recv_errors, battery_mv FROM observer_metrics WHERE observer_id = 'obs1'").Scan(&gotNF, &gotTx, &gotRx, &gotErr, &gotBatt)
|
||||
if gotNF != -112.5 {
|
||||
t.Errorf("noise_floor = %v, want -112.5", gotNF)
|
||||
}
|
||||
if gotTx != 100 {
|
||||
t.Errorf("tx_air_secs = %d, want 100", gotTx)
|
||||
}
|
||||
}
|
||||
|
||||
func TestInsertMetricsIdempotent(t *testing.T) {
|
||||
store, err := OpenStoreWithInterval(tempDBPath(t), 300)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer store.Close()
|
||||
|
||||
nf := -110.0
|
||||
data := &MetricsData{ObserverID: "obs1", NoiseFloor: &nf}
|
||||
|
||||
// Insert twice — should result in 1 row (INSERT OR REPLACE)
|
||||
store.InsertMetrics(data)
|
||||
nf2 := -108.0
|
||||
data.NoiseFloor = &nf2
|
||||
store.InsertMetrics(data)
|
||||
|
||||
var count int
|
||||
store.db.QueryRow("SELECT COUNT(*) FROM observer_metrics WHERE observer_id = 'obs1'").Scan(&count)
|
||||
if count != 1 {
|
||||
t.Errorf("expected 1 row (idempotent), got %d", count)
|
||||
}
|
||||
|
||||
// Verify the value was replaced
|
||||
var gotNF float64
|
||||
store.db.QueryRow("SELECT noise_floor FROM observer_metrics WHERE observer_id = 'obs1'").Scan(&gotNF)
|
||||
if gotNF != -108.0 {
|
||||
t.Errorf("noise_floor = %v, want -108.0 (replaced)", gotNF)
|
||||
}
|
||||
}
|
||||
|
||||
func TestInsertMetricsNullFields(t *testing.T) {
|
||||
store, err := OpenStore(tempDBPath(t))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer store.Close()
|
||||
|
||||
nf := -115.0
|
||||
data := &MetricsData{
|
||||
ObserverID: "obs1",
|
||||
NoiseFloor: &nf,
|
||||
// All other fields nil
|
||||
}
|
||||
|
||||
if err := store.InsertMetrics(data); err != nil {
|
||||
t.Fatalf("InsertMetrics with nulls: %v", err)
|
||||
}
|
||||
|
||||
var gotNF sql.NullFloat64
|
||||
var gotTx sql.NullInt64
|
||||
store.db.QueryRow("SELECT noise_floor, tx_air_secs FROM observer_metrics WHERE observer_id = 'obs1'").Scan(&gotNF, &gotTx)
|
||||
if !gotNF.Valid || gotNF.Float64 != -115.0 {
|
||||
t.Errorf("noise_floor = %v, want -115.0", gotNF)
|
||||
}
|
||||
if gotTx.Valid {
|
||||
t.Errorf("tx_air_secs should be NULL, got %v", gotTx.Int64)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPruneOldMetrics(t *testing.T) {
|
||||
store, err := OpenStore(tempDBPath(t))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer store.Close()
|
||||
|
||||
// Insert old and new metrics directly
|
||||
oldTs := time.Now().UTC().AddDate(0, 0, -40).Format(time.RFC3339)
|
||||
newTs := time.Now().UTC().Format(time.RFC3339)
|
||||
store.db.Exec("INSERT INTO observer_metrics (observer_id, timestamp, noise_floor) VALUES (?, ?, ?)", "obs1", oldTs, -110.0)
|
||||
store.db.Exec("INSERT INTO observer_metrics (observer_id, timestamp, noise_floor) VALUES (?, ?, ?)", "obs1", newTs, -112.0)
|
||||
|
||||
n, err := store.PruneOldMetrics(30)
|
||||
if err != nil {
|
||||
t.Fatalf("PruneOldMetrics: %v", err)
|
||||
}
|
||||
if n != 1 {
|
||||
t.Errorf("pruned %d rows, want 1", n)
|
||||
}
|
||||
|
||||
var count int
|
||||
store.db.QueryRow("SELECT COUNT(*) FROM observer_metrics").Scan(&count)
|
||||
if count != 1 {
|
||||
t.Errorf("expected 1 row remaining, got %d", count)
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtractObserverMetaNewFields(t *testing.T) {
|
||||
msg := map[string]interface{}{
|
||||
"model": "L1",
|
||||
"stats": map[string]interface{}{
|
||||
"noise_floor": -112.5,
|
||||
"battery_mv": 3720.0,
|
||||
"uptime_secs": 86400.0,
|
||||
"tx_air_secs": 100.0,
|
||||
"rx_air_secs": 500.0,
|
||||
"recv_errors": 3.0,
|
||||
},
|
||||
}
|
||||
meta := extractObserverMeta(msg)
|
||||
if meta == nil {
|
||||
t.Fatal("expected non-nil meta")
|
||||
}
|
||||
if meta.TxAirSecs == nil || *meta.TxAirSecs != 100 {
|
||||
t.Errorf("TxAirSecs = %v, want 100", meta.TxAirSecs)
|
||||
}
|
||||
if meta.RxAirSecs == nil || *meta.RxAirSecs != 500 {
|
||||
t.Errorf("RxAirSecs = %v, want 500", meta.RxAirSecs)
|
||||
}
|
||||
if meta.RecvErrors == nil || *meta.RecvErrors != 3 {
|
||||
t.Errorf("RecvErrors = %v, want 3", meta.RecvErrors)
|
||||
}
|
||||
}
|
||||
|
||||
+65
-1
@@ -53,7 +53,7 @@ func main() {
|
||||
log.Fatal("no MQTT sources configured — set mqttSources in config or MQTT_BROKER env var")
|
||||
}
|
||||
|
||||
store, err := OpenStore(cfg.DBPath)
|
||||
store, err := OpenStoreWithInterval(cfg.DBPath, cfg.MetricsSampleInterval())
|
||||
if err != nil {
|
||||
log.Fatalf("db: %v", err)
|
||||
}
|
||||
@@ -64,6 +64,10 @@ func main() {
|
||||
nodeDays := cfg.NodeDaysOrDefault()
|
||||
store.MoveStaleNodes(nodeDays)
|
||||
|
||||
// Metrics retention: prune old metrics on startup
|
||||
metricsDays := cfg.MetricsRetentionDays()
|
||||
store.PruneOldMetrics(metricsDays)
|
||||
|
||||
// Daily ticker for node retention
|
||||
retentionTicker := time.NewTicker(1 * time.Hour)
|
||||
go func() {
|
||||
@@ -72,6 +76,14 @@ func main() {
|
||||
}
|
||||
}()
|
||||
|
||||
// Daily ticker for metrics retention (every 24h)
|
||||
metricsRetentionTicker := time.NewTicker(24 * time.Hour)
|
||||
go func() {
|
||||
for range metricsRetentionTicker.C {
|
||||
store.PruneOldMetrics(metricsDays)
|
||||
}
|
||||
}()
|
||||
|
||||
// Periodic stats logging (every 5 minutes)
|
||||
statsTicker := time.NewTicker(5 * time.Minute)
|
||||
go func() {
|
||||
@@ -163,6 +175,7 @@ func main() {
|
||||
|
||||
log.Println("Shutting down...")
|
||||
retentionTicker.Stop()
|
||||
metricsRetentionTicker.Stop()
|
||||
statsTicker.Stop()
|
||||
store.LogStats() // final stats on shutdown
|
||||
for _, c := range clients {
|
||||
@@ -215,6 +228,22 @@ func handleMessage(store *Store, tag string, source MQTTSource, m mqtt.Message,
|
||||
if err := store.UpsertObserver(observerID, name, iata, meta); err != nil {
|
||||
log.Printf("MQTT [%s] observer status error: %v", tag, err)
|
||||
}
|
||||
// Insert metrics sample from status message
|
||||
if meta != nil {
|
||||
metricsData := &MetricsData{
|
||||
ObserverID: observerID,
|
||||
NoiseFloor: meta.NoiseFloor,
|
||||
TxAirSecs: meta.TxAirSecs,
|
||||
RxAirSecs: meta.RxAirSecs,
|
||||
RecvErrors: meta.RecvErrors,
|
||||
BatteryMv: meta.BatteryMv,
|
||||
PacketsSent: meta.PacketsSent,
|
||||
PacketsRecv: meta.PacketsRecv,
|
||||
}
|
||||
if err := store.InsertMetrics(metricsData); err != nil {
|
||||
log.Printf("MQTT [%s] metrics insert error: %v", tag, err)
|
||||
}
|
||||
}
|
||||
log.Printf("MQTT [%s] status: %s (%s)", tag, firstNonEmpty(name, observerID), iata)
|
||||
return
|
||||
}
|
||||
@@ -616,6 +645,41 @@ func extractObserverMeta(msg map[string]interface{}) *ObserverMeta {
|
||||
hasData = true
|
||||
}
|
||||
}
|
||||
if v := nestedOrTopLevel(stats, msg, "tx_air_secs"); v != nil {
|
||||
if f, ok := toFloat64(v); ok {
|
||||
iv := int(math.Round(f))
|
||||
meta.TxAirSecs = &iv
|
||||
hasData = true
|
||||
}
|
||||
}
|
||||
if v := nestedOrTopLevel(stats, msg, "rx_air_secs"); v != nil {
|
||||
if f, ok := toFloat64(v); ok {
|
||||
iv := int(math.Round(f))
|
||||
meta.RxAirSecs = &iv
|
||||
hasData = true
|
||||
}
|
||||
}
|
||||
if v := nestedOrTopLevel(stats, msg, "recv_errors"); v != nil {
|
||||
if f, ok := toFloat64(v); ok {
|
||||
iv := int(math.Round(f))
|
||||
meta.RecvErrors = &iv
|
||||
hasData = true
|
||||
}
|
||||
}
|
||||
if v := nestedOrTopLevel(stats, msg, "packets_sent"); v != nil {
|
||||
if f, ok := toFloat64(v); ok {
|
||||
iv := int(math.Round(f))
|
||||
meta.PacketsSent = &iv
|
||||
hasData = true
|
||||
}
|
||||
}
|
||||
if v := nestedOrTopLevel(stats, msg, "packets_recv"); v != nil {
|
||||
if f, ok := toFloat64(v); ok {
|
||||
iv := int(math.Round(f))
|
||||
meta.PacketsRecv = &iv
|
||||
hasData = true
|
||||
}
|
||||
}
|
||||
|
||||
if !hasData {
|
||||
return nil
|
||||
|
||||
@@ -0,0 +1,181 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// TestAdvertPubkeyTracking verifies that advertPubkeys is maintained
|
||||
// incrementally during ingest and eviction, and that GetPerfStoreStats
|
||||
// returns the correct count without per-request JSON parsing.
|
||||
func TestAdvertPubkeyTracking(t *testing.T) {
|
||||
ps := NewPacketStore(nil, nil)
|
||||
ps.mu.Lock()
|
||||
|
||||
// Helper to create an ADVERT StoreTx with a given pubkey.
|
||||
pt4 := 4
|
||||
mkAdvert := func(id int, pubkey string) *StoreTx {
|
||||
d := map[string]interface{}{"pubKey": pubkey}
|
||||
j, _ := json.Marshal(d)
|
||||
return &StoreTx{
|
||||
ID: id,
|
||||
Hash: fmt.Sprintf("hash%d", id),
|
||||
PayloadType: &pt4,
|
||||
DecodedJSON: string(j),
|
||||
}
|
||||
}
|
||||
|
||||
// Add 3 adverts: 2 distinct pubkeys
|
||||
tx1 := mkAdvert(1, "pk_alpha")
|
||||
tx2 := mkAdvert(2, "pk_beta")
|
||||
tx3 := mkAdvert(3, "pk_alpha") // duplicate pubkey
|
||||
|
||||
for _, tx := range []*StoreTx{tx1, tx2, tx3} {
|
||||
ps.packets = append(ps.packets, tx)
|
||||
ps.byHash[tx.Hash] = tx
|
||||
ps.byTxID[tx.ID] = tx
|
||||
ps.byPayloadType[4] = append(ps.byPayloadType[4], tx)
|
||||
ps.trackAdvertPubkey(tx)
|
||||
}
|
||||
ps.mu.Unlock()
|
||||
|
||||
// GetPerfStoreStats should report 2 distinct pubkeys
|
||||
stats := ps.GetPerfStoreStats()
|
||||
indexes := stats["indexes"].(map[string]interface{})
|
||||
got := indexes["advertByObserver"].(int)
|
||||
if got != 2 {
|
||||
t.Errorf("advertByObserver = %d, want 2", got)
|
||||
}
|
||||
|
||||
// GetPerfStoreStatsTyped should agree
|
||||
typed := ps.GetPerfStoreStatsTyped()
|
||||
if typed.Indexes.AdvertByObserver != 2 {
|
||||
t.Errorf("typed AdvertByObserver = %d, want 2", typed.Indexes.AdvertByObserver)
|
||||
}
|
||||
|
||||
// Evict tx3 (pk_alpha duplicate) — count should stay 2
|
||||
ps.mu.Lock()
|
||||
ps.untrackAdvertPubkey(tx3)
|
||||
ps.mu.Unlock()
|
||||
|
||||
stats2 := ps.GetPerfStoreStats()
|
||||
idx2 := stats2["indexes"].(map[string]interface{})
|
||||
if idx2["advertByObserver"].(int) != 2 {
|
||||
t.Errorf("after evicting duplicate: advertByObserver = %d, want 2", idx2["advertByObserver"].(int))
|
||||
}
|
||||
|
||||
// Evict tx1 (last pk_alpha) — count should drop to 1
|
||||
ps.mu.Lock()
|
||||
ps.untrackAdvertPubkey(tx1)
|
||||
ps.mu.Unlock()
|
||||
|
||||
stats3 := ps.GetPerfStoreStats()
|
||||
idx3 := stats3["indexes"].(map[string]interface{})
|
||||
if idx3["advertByObserver"].(int) != 1 {
|
||||
t.Errorf("after evicting last pk_alpha: advertByObserver = %d, want 1", idx3["advertByObserver"].(int))
|
||||
}
|
||||
|
||||
// Evict tx2 (last remaining) — count should be 0
|
||||
ps.mu.Lock()
|
||||
ps.untrackAdvertPubkey(tx2)
|
||||
ps.mu.Unlock()
|
||||
|
||||
stats4 := ps.GetPerfStoreStats()
|
||||
idx4 := stats4["indexes"].(map[string]interface{})
|
||||
if idx4["advertByObserver"].(int) != 0 {
|
||||
t.Errorf("after evicting all: advertByObserver = %d, want 0", idx4["advertByObserver"].(int))
|
||||
}
|
||||
}
|
||||
|
||||
// TestAdvertPubkeyPublicKeyField tests the "public_key" JSON field variant.
|
||||
func TestAdvertPubkeyPublicKeyField(t *testing.T) {
|
||||
ps := NewPacketStore(nil, nil)
|
||||
ps.mu.Lock()
|
||||
pt4 := 4
|
||||
d, _ := json.Marshal(map[string]interface{}{"public_key": "pk_legacy"})
|
||||
tx := &StoreTx{ID: 1, Hash: "h1", PayloadType: &pt4, DecodedJSON: string(d)}
|
||||
ps.trackAdvertPubkey(tx)
|
||||
ps.mu.Unlock()
|
||||
|
||||
stats := ps.GetPerfStoreStats()
|
||||
idx := stats["indexes"].(map[string]interface{})
|
||||
if idx["advertByObserver"].(int) != 1 {
|
||||
t.Errorf("public_key field: advertByObserver = %d, want 1", idx["advertByObserver"].(int))
|
||||
}
|
||||
}
|
||||
|
||||
// TestAdvertPubkeyNonAdvert ensures non-ADVERT packets don't affect the count.
|
||||
func TestAdvertPubkeyNonAdvert(t *testing.T) {
|
||||
ps := NewPacketStore(nil, nil)
|
||||
ps.mu.Lock()
|
||||
pt2 := 2
|
||||
d, _ := json.Marshal(map[string]interface{}{"pubKey": "pk_text"})
|
||||
tx := &StoreTx{ID: 1, Hash: "h1", PayloadType: &pt2, DecodedJSON: string(d)}
|
||||
ps.trackAdvertPubkey(tx)
|
||||
ps.mu.Unlock()
|
||||
|
||||
stats := ps.GetPerfStoreStats()
|
||||
idx := stats["indexes"].(map[string]interface{})
|
||||
if idx["advertByObserver"].(int) != 0 {
|
||||
t.Errorf("non-ADVERT should not be tracked: advertByObserver = %d, want 0", idx["advertByObserver"].(int))
|
||||
}
|
||||
}
|
||||
|
||||
// BenchmarkGetPerfStoreStats benchmarks the perf stats endpoint with many adverts.
|
||||
// Before the fix, this did O(N) JSON unmarshals per call.
|
||||
// After the fix, it's O(1) — just len(map).
|
||||
func BenchmarkGetPerfStoreStats(b *testing.B) {
|
||||
ps := NewPacketStore(nil, nil)
|
||||
ps.mu.Lock()
|
||||
pt4 := 4
|
||||
for i := 0; i < 5000; i++ {
|
||||
pk := fmt.Sprintf("pk_%04d", i%200) // 200 distinct pubkeys
|
||||
d, _ := json.Marshal(map[string]interface{}{"pubKey": pk})
|
||||
tx := &StoreTx{
|
||||
ID: i + 1,
|
||||
Hash: fmt.Sprintf("hash%d", i+1),
|
||||
PayloadType: &pt4,
|
||||
DecodedJSON: string(d),
|
||||
}
|
||||
ps.packets = append(ps.packets, tx)
|
||||
ps.byHash[tx.Hash] = tx
|
||||
ps.byTxID[tx.ID] = tx
|
||||
ps.byPayloadType[4] = append(ps.byPayloadType[4], tx)
|
||||
ps.trackAdvertPubkey(tx)
|
||||
}
|
||||
ps.mu.Unlock()
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
ps.GetPerfStoreStats()
|
||||
}
|
||||
}
|
||||
|
||||
// BenchmarkGetPerfStoreStatsTyped benchmarks the typed variant.
|
||||
func BenchmarkGetPerfStoreStatsTyped(b *testing.B) {
|
||||
ps := NewPacketStore(nil, nil)
|
||||
ps.mu.Lock()
|
||||
pt4 := 4
|
||||
for i := 0; i < 5000; i++ {
|
||||
pk := fmt.Sprintf("pk_%04d", i%200)
|
||||
d, _ := json.Marshal(map[string]interface{}{"pubKey": pk})
|
||||
tx := &StoreTx{
|
||||
ID: i + 1,
|
||||
Hash: fmt.Sprintf("hash%d", i+1),
|
||||
PayloadType: &pt4,
|
||||
DecodedJSON: string(d),
|
||||
}
|
||||
ps.packets = append(ps.packets, tx)
|
||||
ps.byHash[tx.Hash] = tx
|
||||
ps.byTxID[tx.ID] = tx
|
||||
ps.byPayloadType[4] = append(ps.byPayloadType[4], tx)
|
||||
ps.trackAdvertPubkey(tx)
|
||||
}
|
||||
ps.mu.Unlock()
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
ps.GetPerfStoreStatsTyped()
|
||||
}
|
||||
}
|
||||
@@ -16,6 +16,7 @@ func newTestStore(t *testing.T) *PacketStore {
|
||||
distCache: make(map[string]*cachedResult),
|
||||
subpathCache: make(map[string]*cachedResult),
|
||||
rfCacheTTL: 15 * time.Second,
|
||||
invCooldown: 10 * time.Second,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -169,3 +170,164 @@ func TestInvalidateCachesFor_NoFlags(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestInvalidationRateLimited verifies that rapid ingest cycles don't clear
|
||||
// caches immediately — they accumulate dirty flags during the cooldown period
|
||||
// and apply them on the next call after cooldown expires (fixes #533).
|
||||
func TestInvalidationRateLimited(t *testing.T) {
|
||||
s := newTestStore(t)
|
||||
s.invCooldown = 100 * time.Millisecond // short cooldown for testing
|
||||
|
||||
// First invalidation should go through immediately
|
||||
populateAllCaches(s)
|
||||
s.invalidateCachesFor(cacheInvalidation{hasNewObservations: true})
|
||||
state := cachePopulated(s)
|
||||
if state["rf"] {
|
||||
t.Error("rf cache should be cleared on first invalidation")
|
||||
}
|
||||
if !state["topo"] {
|
||||
t.Error("topo cache should survive (no path changes)")
|
||||
}
|
||||
|
||||
// Repopulate and call again within cooldown — should NOT clear
|
||||
populateAllCaches(s)
|
||||
s.invalidateCachesFor(cacheInvalidation{hasNewObservations: true})
|
||||
state = cachePopulated(s)
|
||||
if !state["rf"] {
|
||||
t.Error("rf cache should survive during cooldown period")
|
||||
}
|
||||
|
||||
// Wait for cooldown to expire
|
||||
time.Sleep(150 * time.Millisecond)
|
||||
|
||||
// Next call should apply accumulated + current flags
|
||||
populateAllCaches(s)
|
||||
s.invalidateCachesFor(cacheInvalidation{hasNewPaths: true})
|
||||
state = cachePopulated(s)
|
||||
if state["rf"] {
|
||||
t.Error("rf cache should be cleared (pending from cooldown)")
|
||||
}
|
||||
if state["topo"] {
|
||||
t.Error("topo cache should be cleared (current call has hasNewPaths)")
|
||||
}
|
||||
if !state["hash"] {
|
||||
t.Error("hash cache should survive (no transmission changes)")
|
||||
}
|
||||
}
|
||||
|
||||
// TestInvalidationCooldownAccumulatesFlags verifies that multiple calls during
|
||||
// cooldown merge their flags correctly.
|
||||
func TestInvalidationCooldownAccumulatesFlags(t *testing.T) {
|
||||
s := newTestStore(t)
|
||||
s.invCooldown = 200 * time.Millisecond
|
||||
|
||||
// Initial invalidation (goes through, starts cooldown)
|
||||
s.invalidateCachesFor(cacheInvalidation{hasNewObservations: true})
|
||||
|
||||
// Several calls during cooldown with different flags
|
||||
s.invalidateCachesFor(cacheInvalidation{hasNewPaths: true})
|
||||
s.invalidateCachesFor(cacheInvalidation{hasNewTransmissions: true})
|
||||
s.invalidateCachesFor(cacheInvalidation{hasChannelData: true})
|
||||
|
||||
// Verify pending has all flags
|
||||
s.cacheMu.Lock()
|
||||
if s.pendingInv == nil {
|
||||
t.Fatal("pendingInv should not be nil during cooldown")
|
||||
}
|
||||
if !s.pendingInv.hasNewPaths || !s.pendingInv.hasNewTransmissions || !s.pendingInv.hasChannelData {
|
||||
t.Error("all flags should be accumulated in pendingInv")
|
||||
}
|
||||
// hasNewObservations was applied immediately, not accumulated
|
||||
if s.pendingInv.hasNewObservations {
|
||||
t.Error("hasNewObservations was already applied, should not be in pending")
|
||||
}
|
||||
s.cacheMu.Unlock()
|
||||
|
||||
// Wait for cooldown, then trigger — all accumulated flags should apply
|
||||
time.Sleep(250 * time.Millisecond)
|
||||
populateAllCaches(s)
|
||||
s.invalidateCachesFor(cacheInvalidation{}) // empty trigger
|
||||
state := cachePopulated(s)
|
||||
|
||||
// Pending had paths, transmissions, channels — all those caches should clear
|
||||
if state["topo"] {
|
||||
t.Error("topo should be cleared (pending hasNewPaths)")
|
||||
}
|
||||
if state["hash"] {
|
||||
t.Error("hash should be cleared (pending hasNewTransmissions)")
|
||||
}
|
||||
if state["chan"] {
|
||||
t.Error("chan should be cleared (pending hasChannelData)")
|
||||
}
|
||||
}
|
||||
|
||||
// TestEvictionBypassesCooldown verifies eviction always clears immediately.
|
||||
func TestEvictionBypassesCooldown(t *testing.T) {
|
||||
s := newTestStore(t)
|
||||
s.invCooldown = 10 * time.Second // long cooldown
|
||||
|
||||
// Start cooldown
|
||||
s.invalidateCachesFor(cacheInvalidation{hasNewObservations: true})
|
||||
|
||||
// Eviction during cooldown should still clear everything
|
||||
populateAllCaches(s)
|
||||
s.invalidateCachesFor(cacheInvalidation{eviction: true})
|
||||
state := cachePopulated(s)
|
||||
for name, has := range state {
|
||||
if has {
|
||||
t.Errorf("%s cache should be cleared on eviction even during cooldown", name)
|
||||
}
|
||||
}
|
||||
// pendingInv should be cleared
|
||||
s.cacheMu.Lock()
|
||||
if s.pendingInv != nil {
|
||||
t.Error("pendingInv should be nil after eviction")
|
||||
}
|
||||
s.cacheMu.Unlock()
|
||||
}
|
||||
|
||||
// BenchmarkCacheHitDuringIngestion simulates rapid ingestion and verifies
|
||||
// that cache hits now occur thanks to rate-limited invalidation.
|
||||
func BenchmarkCacheHitDuringIngestion(b *testing.B) {
|
||||
s := &PacketStore{
|
||||
rfCache: make(map[string]*cachedResult),
|
||||
topoCache: make(map[string]*cachedResult),
|
||||
hashCache: make(map[string]*cachedResult),
|
||||
chanCache: make(map[string]*cachedResult),
|
||||
distCache: make(map[string]*cachedResult),
|
||||
subpathCache: make(map[string]*cachedResult),
|
||||
rfCacheTTL: 15 * time.Second,
|
||||
invCooldown: 50 * time.Millisecond,
|
||||
}
|
||||
|
||||
// Trigger first invalidation to start cooldown timer
|
||||
s.invalidateCachesFor(cacheInvalidation{hasNewObservations: true})
|
||||
|
||||
var hits, misses int64
|
||||
for i := 0; i < b.N; i++ {
|
||||
// Populate cache (simulates an analytics query filling the cache)
|
||||
s.cacheMu.Lock()
|
||||
s.rfCache["global"] = &cachedResult{
|
||||
data: map[string]interface{}{"test": true},
|
||||
expiresAt: time.Now().Add(time.Hour),
|
||||
}
|
||||
s.cacheMu.Unlock()
|
||||
|
||||
// Simulate rapid ingest invalidation (should be rate-limited)
|
||||
s.invalidateCachesFor(cacheInvalidation{hasNewObservations: true})
|
||||
|
||||
// Check if cache survived the invalidation
|
||||
s.cacheMu.Lock()
|
||||
if len(s.rfCache) > 0 {
|
||||
hits++
|
||||
} else {
|
||||
misses++
|
||||
}
|
||||
s.cacheMu.Unlock()
|
||||
}
|
||||
|
||||
if hits == 0 {
|
||||
b.Errorf("expected cache hits > 0 with rate-limited invalidation, got 0 hits / %d misses", misses)
|
||||
}
|
||||
b.ReportMetric(float64(hits)/float64(hits+misses)*100, "hit%")
|
||||
}
|
||||
|
||||
+11
-2
@@ -69,8 +69,17 @@ type PacketStoreConfig struct {
|
||||
type GeoFilterConfig = geofilter.Config
|
||||
|
||||
type RetentionConfig struct {
|
||||
NodeDays int `json:"nodeDays"`
|
||||
PacketDays int `json:"packetDays"`
|
||||
NodeDays int `json:"nodeDays"`
|
||||
PacketDays int `json:"packetDays"`
|
||||
MetricsDays int `json:"metricsDays"`
|
||||
}
|
||||
|
||||
// MetricsRetentionDays returns configured metrics retention or 30 days default.
|
||||
func (c *Config) MetricsRetentionDays() int {
|
||||
if c.Retention != nil && c.Retention.MetricsDays > 0 {
|
||||
return c.Retention.MetricsDays
|
||||
}
|
||||
return 30
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
@@ -428,6 +429,49 @@ func TestMaxTransmissionID(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
// --- MaxTransmissionID incremental tracking ---
|
||||
|
||||
func TestMaxTransmissionIDIncremental(t *testing.T) {
|
||||
db := setupTestDB(t)
|
||||
defer db.Close()
|
||||
seedTestData(t, db)
|
||||
store := NewPacketStore(db, nil)
|
||||
store.Load()
|
||||
|
||||
maxTx := store.MaxTransmissionID()
|
||||
maxObs := store.MaxObservationID()
|
||||
|
||||
if maxTx <= 0 {
|
||||
t.Fatalf("expected maxTx > 0 after Load, got %d", maxTx)
|
||||
}
|
||||
if maxObs <= 0 {
|
||||
t.Fatalf("expected maxObs > 0 after Load, got %d", maxObs)
|
||||
}
|
||||
|
||||
// Verify incremental field matches brute-force iteration
|
||||
store.mu.RLock()
|
||||
bruteMaxTx := 0
|
||||
for id := range store.byTxID {
|
||||
if id > bruteMaxTx {
|
||||
bruteMaxTx = id
|
||||
}
|
||||
}
|
||||
bruteMaxObs := 0
|
||||
for id := range store.byObsID {
|
||||
if id > bruteMaxObs {
|
||||
bruteMaxObs = id
|
||||
}
|
||||
}
|
||||
store.mu.RUnlock()
|
||||
|
||||
if maxTx != bruteMaxTx {
|
||||
t.Errorf("maxTxID mismatch: incremental=%d brute=%d", maxTx, bruteMaxTx)
|
||||
}
|
||||
if maxObs != bruteMaxObs {
|
||||
t.Errorf("maxObsID mismatch: incremental=%d brute=%d", maxObs, bruteMaxObs)
|
||||
}
|
||||
}
|
||||
|
||||
// --- Route handler DB fallback (no store) ---
|
||||
|
||||
func TestHandleBulkHealthNoStore(t *testing.T) {
|
||||
@@ -770,6 +814,56 @@ func TestPrefixMapResolve(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestPrefixMapCap(t *testing.T) {
|
||||
// 16-char pubkey — longer than maxPrefixLen
|
||||
nodes := []nodeInfo{
|
||||
{PublicKey: "aabbccdd11223344", Name: "LongKey"},
|
||||
{PublicKey: "eeff0011", Name: "ShortKey"}, // exactly 8 chars
|
||||
}
|
||||
pm := buildPrefixMap(nodes)
|
||||
|
||||
t.Run("short prefixes still work", func(t *testing.T) {
|
||||
n := pm.resolve("aabb")
|
||||
if n == nil || n.Name != "LongKey" {
|
||||
t.Errorf("expected LongKey for short prefix, got %v", n)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("full pubkey exact match works", func(t *testing.T) {
|
||||
n := pm.resolve("aabbccdd11223344")
|
||||
if n == nil || n.Name != "LongKey" {
|
||||
t.Errorf("expected LongKey for full key, got %v", n)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("intermediate prefix beyond cap returns nil", func(t *testing.T) {
|
||||
// 10-char prefix — beyond maxPrefixLen but not full key
|
||||
n := pm.resolve("aabbccdd11")
|
||||
if n != nil {
|
||||
t.Errorf("expected nil for intermediate prefix beyond cap, got %v", n.Name)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("short key within cap has all prefixes", func(t *testing.T) {
|
||||
for l := 2; l <= 8; l++ {
|
||||
pfx := "eeff0011"[:l]
|
||||
n := pm.resolve(pfx)
|
||||
if n == nil || n.Name != "ShortKey" {
|
||||
t.Errorf("prefix %q: expected ShortKey, got %v", pfx, n)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("map size is capped", func(t *testing.T) {
|
||||
// LongKey: 7 prefix entries (2..8) + 1 full key = 8
|
||||
// ShortKey: 7 prefix entries (2..8), no full key entry (len == maxPrefixLen) = 7
|
||||
// No overlapping prefixes between the two nodes → 8 + 7 = 15 unique map keys
|
||||
if len(pm.m) != 15 {
|
||||
t.Errorf("expected 15 map entries (8 for LongKey + 7 for ShortKey), got %d", len(pm.m))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// --- pathLen ---
|
||||
|
||||
func TestPathLen(t *testing.T) {
|
||||
@@ -1333,6 +1427,40 @@ func TestGetNodeLocations(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// --- GetNodeLocationsByKeys ---
|
||||
|
||||
func TestGetNodeLocationsByKeys(t *testing.T) {
|
||||
db := setupTestDB(t)
|
||||
defer db.Close()
|
||||
seedTestData(t, db)
|
||||
|
||||
// Query with a known key
|
||||
pk := "aabbccdd11223344"
|
||||
locs := db.GetNodeLocationsByKeys([]string{pk})
|
||||
if len(locs) != 1 {
|
||||
t.Errorf("expected 1 location, got %d", len(locs))
|
||||
}
|
||||
if entry, ok := locs[strings.ToLower(pk)]; ok {
|
||||
if entry["lat"] == nil {
|
||||
t.Error("expected non-nil lat")
|
||||
}
|
||||
} else {
|
||||
t.Error("expected node location for test repeater")
|
||||
}
|
||||
|
||||
// Query with no keys returns empty map
|
||||
empty := db.GetNodeLocationsByKeys([]string{})
|
||||
if len(empty) != 0 {
|
||||
t.Errorf("expected 0 locations for empty keys, got %d", len(empty))
|
||||
}
|
||||
|
||||
// Query with unknown key returns empty map
|
||||
unknown := db.GetNodeLocationsByKeys([]string{"nonexistent"})
|
||||
if len(unknown) != 0 {
|
||||
t.Errorf("expected 0 locations for unknown key, got %d", len(unknown))
|
||||
}
|
||||
}
|
||||
|
||||
// --- Store edge cases ---
|
||||
|
||||
func TestStoreQueryPacketsEdgeCases(t *testing.T) {
|
||||
@@ -1906,6 +2034,48 @@ func TestTxToMap(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestTxToMapLazyObservations(t *testing.T) {
|
||||
snr := 10.5
|
||||
rssi := -90.0
|
||||
tx := &StoreTx{
|
||||
ID: 1,
|
||||
Hash: "abc",
|
||||
Observations: []*StoreObs{
|
||||
{ID: 10, ObserverID: "obs1", ObserverName: "O1", SNR: &snr, RSSI: &rssi, Timestamp: "2025-01-01"},
|
||||
{ID: 11, ObserverID: "obs2", ObserverName: "O2", SNR: &snr, RSSI: &rssi, Timestamp: "2025-01-02"},
|
||||
},
|
||||
}
|
||||
|
||||
// Without flag: no observations key
|
||||
m := txToMap(tx)
|
||||
if _, ok := m["observations"]; ok {
|
||||
t.Error("txToMap without includeObservations should not include observations key")
|
||||
}
|
||||
|
||||
// With false: no observations key
|
||||
m = txToMap(tx, false)
|
||||
if _, ok := m["observations"]; ok {
|
||||
t.Error("txToMap(tx, false) should not include observations key")
|
||||
}
|
||||
|
||||
// With true: observations included
|
||||
m = txToMap(tx, true)
|
||||
obs, ok := m["observations"]
|
||||
if !ok {
|
||||
t.Fatal("txToMap(tx, true) should include observations key")
|
||||
}
|
||||
obsList, ok := obs.([]map[string]interface{})
|
||||
if !ok {
|
||||
t.Fatal("observations should be []map[string]interface{}")
|
||||
}
|
||||
if len(obsList) != 2 {
|
||||
t.Errorf("expected 2 observations, got %d", len(obsList))
|
||||
}
|
||||
if obsList[0]["observer_id"] != "obs1" {
|
||||
t.Errorf("expected observer_id obs1, got %v", obsList[0]["observer_id"])
|
||||
}
|
||||
}
|
||||
|
||||
// --- filterTxSlice ---
|
||||
|
||||
func TestFilterTxSlice(t *testing.T) {
|
||||
@@ -2099,6 +2269,84 @@ func TestSubpathPrecomputedIndex(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestSubpathTxIndexPopulated(t *testing.T) {
|
||||
db := setupRichTestDB(t)
|
||||
defer db.Close()
|
||||
store := NewPacketStore(db, nil)
|
||||
store.Load()
|
||||
|
||||
// spTxIndex must be populated alongside spIndex
|
||||
if len(store.spTxIndex) == 0 {
|
||||
t.Fatal("expected spTxIndex to be populated after Load()")
|
||||
}
|
||||
|
||||
// Every key in spIndex must also exist in spTxIndex with matching count
|
||||
for key, count := range store.spIndex {
|
||||
txs, ok := store.spTxIndex[key]
|
||||
if !ok {
|
||||
t.Errorf("spTxIndex missing key %q that exists in spIndex", key)
|
||||
continue
|
||||
}
|
||||
if len(txs) != count {
|
||||
t.Errorf("spTxIndex[%q] has %d txs, spIndex count is %d", key, len(txs), count)
|
||||
}
|
||||
}
|
||||
|
||||
// GetSubpathDetail should return correct match count via indexed lookup
|
||||
detail := store.GetSubpathDetail([]string{"eeff", "0011"})
|
||||
if detail == nil {
|
||||
t.Fatal("expected non-nil detail for existing subpath")
|
||||
}
|
||||
matches, _ := detail["totalMatches"].(int)
|
||||
if matches != 1 {
|
||||
t.Errorf("totalMatches = %d, want 1", matches)
|
||||
}
|
||||
|
||||
// Non-existent subpath should return 0 matches
|
||||
detail2 := store.GetSubpathDetail([]string{"zzzz", "yyyy"})
|
||||
if detail2 == nil {
|
||||
t.Fatal("expected non-nil result even for non-existent subpath")
|
||||
}
|
||||
matches2, _ := detail2["totalMatches"].(int)
|
||||
if matches2 != 0 {
|
||||
t.Errorf("totalMatches for non-existent subpath = %d, want 0", matches2)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSubpathDetailMixedCaseHops(t *testing.T) {
|
||||
db := setupRichTestDB(t)
|
||||
defer db.Close()
|
||||
store := NewPacketStore(db, nil)
|
||||
store.Load()
|
||||
|
||||
// Query with lowercase hops to establish baseline
|
||||
lower := store.GetSubpathDetail([]string{"eeff", "0011"})
|
||||
if lower == nil {
|
||||
t.Fatal("expected non-nil detail for lowercase subpath")
|
||||
}
|
||||
lowerMatches, _ := lower["totalMatches"].(int)
|
||||
if lowerMatches == 0 {
|
||||
t.Fatal("expected >0 matches for lowercase subpath")
|
||||
}
|
||||
|
||||
// Query with mixed-case hops — must return the same results (case-insensitive)
|
||||
mixed := store.GetSubpathDetail([]string{"EEFF", "0011"})
|
||||
if mixed == nil {
|
||||
t.Fatal("expected non-nil detail for mixed-case subpath")
|
||||
}
|
||||
mixedMatches, _ := mixed["totalMatches"].(int)
|
||||
if mixedMatches != lowerMatches {
|
||||
t.Errorf("mixed-case totalMatches = %d, want %d (same as lowercase)", mixedMatches, lowerMatches)
|
||||
}
|
||||
|
||||
// All-uppercase should also match
|
||||
upper := store.GetSubpathDetail([]string{"EEFF", "0011"})
|
||||
upperMatches, _ := upper["totalMatches"].(int)
|
||||
if upperMatches != lowerMatches {
|
||||
t.Errorf("uppercase totalMatches = %d, want %d", upperMatches, lowerMatches)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStoreGetAnalyticsRFCacheHit(t *testing.T) {
|
||||
db := setupRichTestDB(t)
|
||||
defer db.Close()
|
||||
@@ -3716,6 +3964,71 @@ func TestGetChannelMessagesAfterIngest(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// --- resolveRegionObservers caching ---
|
||||
|
||||
func TestResolveRegionObserversCaching(t *testing.T) {
|
||||
db := setupTestDB(t)
|
||||
defer db.Close()
|
||||
seedTestData(t, db)
|
||||
|
||||
store := &PacketStore{db: db}
|
||||
|
||||
// First call should populate cache.
|
||||
obs1 := store.resolveRegionObservers("SJC")
|
||||
if obs1 == nil || len(obs1) == 0 {
|
||||
t.Fatal("expected observer IDs for SJC on first call")
|
||||
}
|
||||
|
||||
// Second call should return cached result (same pointer).
|
||||
obs2 := store.resolveRegionObservers("SJC")
|
||||
if len(obs2) != len(obs1) {
|
||||
t.Errorf("cached result differs: got %d, want %d", len(obs2), len(obs1))
|
||||
}
|
||||
|
||||
// Non-existent region should return nil even from cache.
|
||||
obs3 := store.resolveRegionObservers("NONEXIST")
|
||||
if obs3 != nil {
|
||||
t.Errorf("expected nil for NONEXIST, got %v", obs3)
|
||||
}
|
||||
|
||||
// Verify cache fields are set.
|
||||
if store.regionObsCache == nil {
|
||||
t.Error("regionObsCache should be non-nil after calls")
|
||||
}
|
||||
if store.regionObsCacheTime.IsZero() {
|
||||
t.Error("regionObsCacheTime should be set")
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveRegionObserversCacheMissNewRegion(t *testing.T) {
|
||||
db := setupTestDB(t)
|
||||
defer db.Close()
|
||||
seedTestData(t, db)
|
||||
|
||||
store := &PacketStore{db: db}
|
||||
|
||||
// Populate cache with SJC.
|
||||
obs1 := store.resolveRegionObservers("SJC")
|
||||
if obs1 == nil || len(obs1) == 0 {
|
||||
t.Fatal("expected observer IDs for SJC on first call")
|
||||
}
|
||||
|
||||
// Cache is now valid. Request a different region that exists in DB.
|
||||
// Before the fix, this would return nil from the map lookup instead of
|
||||
// fetching from DB, silently returning "no observers" for up to 30s.
|
||||
obs2 := store.resolveRegionObservers("LAX")
|
||||
// LAX may or may not have data in the test DB, but the key point is:
|
||||
// a non-existent region should be fetched (not just nil-returned).
|
||||
// Verify the region key was cached (even if empty).
|
||||
store.regionObsMu.Lock()
|
||||
_, cached := store.regionObsCache["LAX"]
|
||||
store.regionObsMu.Unlock()
|
||||
if !cached {
|
||||
t.Error("LAX should be cached after resolveRegionObservers call, even if empty")
|
||||
}
|
||||
_ = obs2
|
||||
}
|
||||
|
||||
func TestIndexByNodePreCheck(t *testing.T) {
|
||||
store := &PacketStore{
|
||||
byNode: make(map[string][]*StoreTx),
|
||||
@@ -3811,3 +4124,218 @@ func BenchmarkIndexByNode(b *testing.B) {
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// --- Multi-observer comma-separated filter tests ---
|
||||
|
||||
func TestTransmissionsForObserverMultiCSV(t *testing.T) {
|
||||
db := setupTestDB(t)
|
||||
defer db.Close()
|
||||
seedTestData(t, db)
|
||||
store := NewPacketStore(db, nil)
|
||||
store.Load()
|
||||
|
||||
t.Run("comma-separated returns union via index", func(t *testing.T) {
|
||||
result := store.transmissionsForObserver("obs1,obs2", nil)
|
||||
if len(result) == 0 {
|
||||
t.Fatal("expected results for obs1,obs2")
|
||||
}
|
||||
// obs1 has transmissions 1,2,3; obs2 has transmission 1
|
||||
// Union should include all unique transmissions
|
||||
obs1Only := store.transmissionsForObserver("obs1", nil)
|
||||
obs2Only := store.transmissionsForObserver("obs2", nil)
|
||||
if len(result) < len(obs1Only) || len(result) < len(obs2Only) {
|
||||
t.Errorf("union (%d) should be >= each individual set (obs1=%d, obs2=%d)",
|
||||
len(result), len(obs1Only), len(obs2Only))
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("comma-separated with spaces via index", func(t *testing.T) {
|
||||
result := store.transmissionsForObserver("obs1, obs2", nil)
|
||||
if len(result) == 0 {
|
||||
t.Fatal("expected results for 'obs1, obs2' (with space)")
|
||||
}
|
||||
noSpace := store.transmissionsForObserver("obs1,obs2", nil)
|
||||
if len(result) != len(noSpace) {
|
||||
t.Errorf("with-space (%d) should equal no-space (%d)", len(result), len(noSpace))
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("comma-separated returns union via filter path", func(t *testing.T) {
|
||||
allTx := store.packets
|
||||
result := store.transmissionsForObserver("obs1,obs2", allTx)
|
||||
if len(result) == 0 {
|
||||
t.Fatal("expected results for obs1,obs2 via filter path")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("comma-separated with spaces via filter path", func(t *testing.T) {
|
||||
allTx := store.packets
|
||||
withSpace := store.transmissionsForObserver("obs1, obs2", allTx)
|
||||
noSpace := store.transmissionsForObserver("obs1,obs2", allTx)
|
||||
if len(withSpace) != len(noSpace) {
|
||||
t.Errorf("filter path: with-space (%d) should equal no-space (%d)", len(withSpace), len(noSpace))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestBuildTransmissionWhereMultiObserver(t *testing.T) {
|
||||
db := setupTestDB(t)
|
||||
defer db.Close()
|
||||
seedTestData(t, db)
|
||||
|
||||
t.Run("comma-separated produces IN clause", func(t *testing.T) {
|
||||
q := PacketQuery{Observer: "obs1,obs2"}
|
||||
where, args := db.buildTransmissionWhere(q)
|
||||
if len(where) != 1 {
|
||||
t.Fatalf("expected 1 WHERE clause, got %d", len(where))
|
||||
}
|
||||
clause := where[0]
|
||||
if !strings.Contains(clause, "IN (?,?)") {
|
||||
t.Errorf("expected IN (?,?) in clause, got: %s", clause)
|
||||
}
|
||||
if len(args) != 2 {
|
||||
t.Fatalf("expected 2 args, got %d", len(args))
|
||||
}
|
||||
if args[0] != "obs1" || args[1] != "obs2" {
|
||||
t.Errorf("expected [obs1, obs2], got %v", args)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("comma-separated with spaces trims IDs", func(t *testing.T) {
|
||||
q := PacketQuery{Observer: "obs1, obs2"}
|
||||
_, args := db.buildTransmissionWhere(q)
|
||||
if len(args) != 2 {
|
||||
t.Fatalf("expected 2 args, got %d", len(args))
|
||||
}
|
||||
if args[0] != "obs1" || args[1] != "obs2" {
|
||||
t.Errorf("expected trimmed [obs1, obs2], got %v", args)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("single observer still works", func(t *testing.T) {
|
||||
q := PacketQuery{Observer: "obs1"}
|
||||
where, args := db.buildTransmissionWhere(q)
|
||||
if len(where) != 1 {
|
||||
t.Fatalf("expected 1 WHERE clause, got %d", len(where))
|
||||
}
|
||||
if !strings.Contains(where[0], "IN (?)") {
|
||||
t.Errorf("expected IN (?) for single observer, got: %s", where[0])
|
||||
}
|
||||
if len(args) != 1 || args[0] != "obs1" {
|
||||
t.Errorf("expected [obs1], got %v", args)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// --- Distance index incremental update (#365, replaces debounce #557) ---
|
||||
|
||||
func TestDistanceIncrementalUpdate(t *testing.T) {
|
||||
db := setupTestDB(t)
|
||||
defer db.Close()
|
||||
seedTestData(t, db)
|
||||
store := NewPacketStore(db, nil)
|
||||
store.Load()
|
||||
|
||||
// Record initial distance index size.
|
||||
initialHops := len(store.distHops)
|
||||
initialPaths := len(store.distPaths)
|
||||
|
||||
// Insert a new observation with a different path to trigger an incremental update.
|
||||
maxObsID := db.GetMaxObservationID()
|
||||
db.conn.Exec(`INSERT INTO observations (transmission_id, observer_idx, snr, rssi, path_json, timestamp)
|
||||
VALUES (1, 2, 5.0, -100, '["xx","yy","zz"]', ?)`, time.Now().Unix())
|
||||
|
||||
store.IngestNewObservations(maxObsID, 500)
|
||||
|
||||
// Distance index should have been updated incrementally (sizes may differ
|
||||
// if the new path resolves differently, but should not panic or corrupt).
|
||||
_ = len(store.distHops)
|
||||
_ = len(store.distPaths)
|
||||
|
||||
// Insert another observation with yet another path.
|
||||
maxObsID = db.GetMaxObservationID()
|
||||
db.conn.Exec(`INSERT INTO observations (transmission_id, observer_idx, snr, rssi, path_json, timestamp)
|
||||
VALUES (1, 2, 7.0, -95, '["aa","bb","cc","dd"]', ?)`, time.Now().Unix())
|
||||
|
||||
store.IngestNewObservations(maxObsID, 500)
|
||||
|
||||
// Verify the index is still coherent (no duplicates for the same tx).
|
||||
txSeen := make(map[int]int)
|
||||
for _, r := range store.distPaths {
|
||||
if r.tx != nil {
|
||||
txSeen[r.tx.ID]++
|
||||
}
|
||||
}
|
||||
for txID, count := range txSeen {
|
||||
if count > 1 {
|
||||
t.Errorf("distPaths has %d entries for tx %d (expected at most 1)", count, txID)
|
||||
}
|
||||
}
|
||||
|
||||
t.Logf("Distance index: %d→%d hops, %d→%d paths (incremental)",
|
||||
initialHops, len(store.distHops), initialPaths, len(store.distPaths))
|
||||
}
|
||||
|
||||
func TestHandleBatchObservations(t *testing.T) {
|
||||
_, router := setupNoStoreServer(t)
|
||||
|
||||
t.Run("empty hashes returns empty results", func(t *testing.T) {
|
||||
body := strings.NewReader(`{"hashes":[]}`)
|
||||
req := httptest.NewRequest("POST", "/api/packets/observations", body)
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
if w.Code != 200 {
|
||||
t.Fatalf("expected 200, got %d: %s", w.Code, w.Body.String())
|
||||
}
|
||||
var resp map[string]interface{}
|
||||
json.Unmarshal(w.Body.Bytes(), &resp)
|
||||
results, ok := resp["results"].(map[string]interface{})
|
||||
if !ok || len(results) != 0 {
|
||||
t.Fatalf("expected empty results map, got %v", resp)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("invalid JSON returns 400", func(t *testing.T) {
|
||||
body := strings.NewReader(`not json`)
|
||||
req := httptest.NewRequest("POST", "/api/packets/observations", body)
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
if w.Code != 400 {
|
||||
t.Fatalf("expected 400, got %d", w.Code)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("too many hashes returns 400", func(t *testing.T) {
|
||||
hashes := make([]string, 201)
|
||||
for i := range hashes {
|
||||
hashes[i] = fmt.Sprintf("hash%d", i)
|
||||
}
|
||||
data, _ := json.Marshal(map[string][]string{"hashes": hashes})
|
||||
req := httptest.NewRequest("POST", "/api/packets/observations", bytes.NewReader(data))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
if w.Code != 400 {
|
||||
t.Fatalf("expected 400, got %d", w.Code)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("valid hashes with no store returns empty results", func(t *testing.T) {
|
||||
body := strings.NewReader(`{"hashes":["abc123","def456"]}`)
|
||||
req := httptest.NewRequest("POST", "/api/packets/observations", body)
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
if w.Code != 200 {
|
||||
t.Fatalf("expected 200, got %d: %s", w.Code, w.Body.String())
|
||||
}
|
||||
var resp map[string]interface{}
|
||||
json.Unmarshal(w.Body.Bytes(), &resp)
|
||||
_, ok := resp["results"].(map[string]interface{})
|
||||
if !ok {
|
||||
t.Fatalf("expected results map, got %v", resp)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
+392
-10
@@ -15,9 +15,10 @@ import (
|
||||
|
||||
// DB wraps a read-only connection to the MeshCore SQLite database.
|
||||
type DB struct {
|
||||
conn *sql.DB
|
||||
path string // filesystem path to the database file
|
||||
isV3 bool // v3 schema: observer_idx in observations (vs observer_id in v2)
|
||||
conn *sql.DB
|
||||
path string // filesystem path to the database file
|
||||
isV3 bool // v3 schema: observer_idx in observations (vs observer_id in v2)
|
||||
hasResolvedPath bool // observations table has resolved_path column
|
||||
}
|
||||
|
||||
// OpenDB opens a read-only SQLite connection with WAL mode.
|
||||
@@ -61,9 +62,13 @@ func (db *DB) detectSchema() {
|
||||
var colType sql.NullString
|
||||
var notNull, pk int
|
||||
var dflt sql.NullString
|
||||
if rows.Scan(&cid, &colName, &colType, ¬Null, &dflt, &pk) == nil && colName == "observer_idx" {
|
||||
db.isV3 = true
|
||||
return
|
||||
if rows.Scan(&cid, &colName, &colType, ¬Null, &dflt, &pk) == nil {
|
||||
if colName == "observer_idx" {
|
||||
db.isV3 = true
|
||||
}
|
||||
if colName == "resolved_path" {
|
||||
db.hasResolvedPath = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -372,7 +377,8 @@ type PacketQuery struct {
|
||||
Until string
|
||||
Region string
|
||||
Node string
|
||||
Order string // ASC or DESC
|
||||
Order string // ASC or DESC
|
||||
ExpandObservations bool // when true, include observation sub-maps in txToMap output
|
||||
}
|
||||
|
||||
// PacketResult wraps paginated packet list.
|
||||
@@ -608,12 +614,17 @@ func (db *DB) buildTransmissionWhere(q PacketQuery) ([]string, []interface{}) {
|
||||
args = append(args, "%"+pk+"%")
|
||||
}
|
||||
if q.Observer != "" {
|
||||
ids := strings.Split(q.Observer, ",")
|
||||
placeholders := strings.Repeat("?,", len(ids))
|
||||
placeholders = placeholders[:len(placeholders)-1]
|
||||
if db.isV3 {
|
||||
where = append(where, "EXISTS (SELECT 1 FROM observations oi JOIN observers obi ON obi.rowid = oi.observer_idx WHERE oi.transmission_id = t.id AND obi.id = ?)")
|
||||
where = append(where, "EXISTS (SELECT 1 FROM observations oi JOIN observers obi ON obi.rowid = oi.observer_idx WHERE oi.transmission_id = t.id AND obi.id IN ("+placeholders+"))")
|
||||
} else {
|
||||
where = append(where, "EXISTS (SELECT 1 FROM observations oi WHERE oi.transmission_id = t.id AND oi.observer_id = ?)")
|
||||
where = append(where, "EXISTS (SELECT 1 FROM observations oi WHERE oi.transmission_id = t.id AND oi.observer_id IN ("+placeholders+"))")
|
||||
}
|
||||
for _, id := range ids {
|
||||
args = append(args, strings.TrimSpace(id))
|
||||
}
|
||||
args = append(args, q.Observer)
|
||||
}
|
||||
if q.Region != "" {
|
||||
if db.isV3 {
|
||||
@@ -1487,6 +1498,39 @@ func (db *DB) GetNodeLocations() map[string]map[string]interface{} {
|
||||
return result
|
||||
}
|
||||
|
||||
// GetNodeLocationsByKeys returns location data only for the given public keys.
|
||||
// This avoids fetching ALL nodes when only a few keys need to be matched.
|
||||
func (db *DB) GetNodeLocationsByKeys(keys []string) map[string]map[string]interface{} {
|
||||
result := make(map[string]map[string]interface{})
|
||||
if len(keys) == 0 {
|
||||
return result
|
||||
}
|
||||
placeholders := make([]string, len(keys))
|
||||
args := make([]interface{}, len(keys))
|
||||
for i, k := range keys {
|
||||
placeholders[i] = "?"
|
||||
args[i] = strings.ToLower(k)
|
||||
}
|
||||
query := "SELECT public_key, lat, lon, role FROM nodes WHERE LOWER(public_key) IN (" + strings.Join(placeholders, ",") + ")"
|
||||
rows, err := db.conn.Query(query, args...)
|
||||
if err != nil {
|
||||
return result
|
||||
}
|
||||
defer rows.Close()
|
||||
for rows.Next() {
|
||||
var pk string
|
||||
var role sql.NullString
|
||||
var lat, lon sql.NullFloat64
|
||||
rows.Scan(&pk, &lat, &lon, &role)
|
||||
result[strings.ToLower(pk)] = map[string]interface{}{
|
||||
"lat": nullFloat(lat),
|
||||
"lon": nullFloat(lon),
|
||||
"role": nullStr(role),
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// QueryMultiNodePackets returns transmissions referencing any of the given pubkeys.
|
||||
func (db *DB) QueryMultiNodePackets(pubkeys []string, limit, offset int, order, since, until string) (*PacketResult, error) {
|
||||
if len(pubkeys) == 0 {
|
||||
@@ -1690,3 +1734,341 @@ func (db *DB) PruneOldPackets(days int) (int64, error) {
|
||||
n, _ := res.RowsAffected()
|
||||
return n, tx.Commit()
|
||||
}
|
||||
|
||||
// MetricsSample represents a single row from observer_metrics with computed deltas.
|
||||
type MetricsSample struct {
|
||||
Timestamp string `json:"timestamp"`
|
||||
NoiseFloor *float64 `json:"noise_floor"`
|
||||
TxAirSecs *int `json:"tx_air_secs,omitempty"`
|
||||
RxAirSecs *int `json:"rx_air_secs,omitempty"`
|
||||
RecvErrors *int `json:"recv_errors,omitempty"`
|
||||
BatteryMv *int `json:"battery_mv"`
|
||||
PacketsSent *int `json:"packets_sent,omitempty"`
|
||||
PacketsRecv *int `json:"packets_recv,omitempty"`
|
||||
TxAirtimePct *float64 `json:"tx_airtime_pct"`
|
||||
RxAirtimePct *float64 `json:"rx_airtime_pct"`
|
||||
RecvErrorRate *float64 `json:"recv_error_rate"`
|
||||
IsReboot bool `json:"is_reboot_sample,omitempty"`
|
||||
}
|
||||
|
||||
// rawMetricsSample is the raw DB row before delta computation.
|
||||
type rawMetricsSample struct {
|
||||
Timestamp string
|
||||
NoiseFloor *float64
|
||||
TxAirSecs *int
|
||||
RxAirSecs *int
|
||||
RecvErrors *int
|
||||
BatteryMv *int
|
||||
PacketsSent *int
|
||||
PacketsRecv *int
|
||||
}
|
||||
|
||||
// GetObserverMetrics returns time-series metrics with server-side delta computation.
|
||||
// resolution: "5m" (raw), "1h", "1d"
|
||||
// sampleIntervalSec: expected interval between samples (default 300)
|
||||
func (db *DB) GetObserverMetrics(observerID, since, until, resolution string, sampleIntervalSec int) ([]MetricsSample, []string, error) {
|
||||
if sampleIntervalSec <= 0 {
|
||||
sampleIntervalSec = 300
|
||||
}
|
||||
|
||||
// Build query based on resolution
|
||||
var query string
|
||||
args := []interface{}{observerID}
|
||||
|
||||
// Determine the effective bucket size for gap threshold scaling.
|
||||
// For raw data (5m), use sampleIntervalSec. For aggregated resolutions,
|
||||
// use the bucket duration so consecutive buckets aren't treated as gaps.
|
||||
bucketSizeSec := sampleIntervalSec
|
||||
switch resolution {
|
||||
case "1h":
|
||||
bucketSizeSec = 3600
|
||||
// Use LAST value per bucket (latest timestamp) instead of MAX to preserve
|
||||
// reboot semantics: if a device reboots mid-bucket, the last sample is the
|
||||
// post-reboot baseline, not the pre-reboot high-water mark.
|
||||
query = `SELECT ts, noise_floor, tx_air_secs, rx_air_secs, recv_errors, battery_mv, packets_sent, packets_recv FROM (
|
||||
SELECT
|
||||
strftime('%Y-%m-%dT%H:00:00Z', timestamp) as ts,
|
||||
noise_floor, tx_air_secs, rx_air_secs, recv_errors, battery_mv, packets_sent, packets_recv,
|
||||
ROW_NUMBER() OVER (PARTITION BY observer_id, strftime('%Y-%m-%dT%H:00:00Z', timestamp) ORDER BY timestamp DESC) as rn
|
||||
FROM observer_metrics WHERE observer_id = ?`
|
||||
case "1d":
|
||||
bucketSizeSec = 86400
|
||||
query = `SELECT ts, noise_floor, tx_air_secs, rx_air_secs, recv_errors, battery_mv, packets_sent, packets_recv FROM (
|
||||
SELECT
|
||||
strftime('%Y-%m-%dT00:00:00Z', timestamp) as ts,
|
||||
noise_floor, tx_air_secs, rx_air_secs, recv_errors, battery_mv, packets_sent, packets_recv,
|
||||
ROW_NUMBER() OVER (PARTITION BY observer_id, strftime('%Y-%m-%dT00:00:00Z', timestamp) ORDER BY timestamp DESC) as rn
|
||||
FROM observer_metrics WHERE observer_id = ?`
|
||||
default: // "5m" or raw
|
||||
query = `SELECT timestamp, noise_floor, tx_air_secs, rx_air_secs, recv_errors, battery_mv, packets_sent, packets_recv
|
||||
FROM observer_metrics WHERE observer_id = ?`
|
||||
}
|
||||
|
||||
if since != "" {
|
||||
query += " AND timestamp >= ?"
|
||||
args = append(args, since)
|
||||
}
|
||||
if until != "" {
|
||||
query += " AND timestamp <= ?"
|
||||
args = append(args, until)
|
||||
}
|
||||
|
||||
switch resolution {
|
||||
case "1h", "1d":
|
||||
query += ") WHERE rn = 1 ORDER BY ts ASC"
|
||||
default:
|
||||
query += " ORDER BY timestamp ASC"
|
||||
}
|
||||
|
||||
rows, err := db.conn.Query(query, args...)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var raw []rawMetricsSample
|
||||
for rows.Next() {
|
||||
var s rawMetricsSample
|
||||
if err := rows.Scan(&s.Timestamp, &s.NoiseFloor, &s.TxAirSecs, &s.RxAirSecs, &s.RecvErrors, &s.BatteryMv, &s.PacketsSent, &s.PacketsRecv); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
raw = append(raw, s)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Compute deltas between consecutive samples.
|
||||
// bucketSizeSec determines gap threshold: for raw data it's sampleIntervalSec,
|
||||
// for aggregated resolutions it's the bucket duration (3600 for 1h, 86400 for 1d).
|
||||
return computeDeltas(raw, bucketSizeSec)
|
||||
}
|
||||
|
||||
// computeDeltas computes per-interval rates from cumulative counters.
|
||||
// Handles reboots (counter reset) and gaps (missing samples).
|
||||
// bucketSizeSec is the expected interval between consecutive points
|
||||
// (sampleInterval for raw data, bucket duration for aggregated resolutions).
|
||||
func computeDeltas(raw []rawMetricsSample, bucketSizeSec int) ([]MetricsSample, []string, error) {
|
||||
if len(raw) == 0 {
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
gapThreshold := float64(bucketSizeSec) * 2.0
|
||||
result := make([]MetricsSample, 0, len(raw))
|
||||
var reboots []string
|
||||
|
||||
for i, cur := range raw {
|
||||
s := MetricsSample{
|
||||
Timestamp: cur.Timestamp,
|
||||
NoiseFloor: cur.NoiseFloor,
|
||||
BatteryMv: cur.BatteryMv,
|
||||
}
|
||||
|
||||
if i == 0 {
|
||||
// First sample: no delta possible
|
||||
result = append(result, s)
|
||||
continue
|
||||
}
|
||||
|
||||
prev := raw[i-1]
|
||||
|
||||
// Check for gap
|
||||
curT, err1 := time.Parse(time.RFC3339, cur.Timestamp)
|
||||
prevT, err2 := time.Parse(time.RFC3339, prev.Timestamp)
|
||||
if err1 != nil || err2 != nil {
|
||||
result = append(result, s)
|
||||
continue
|
||||
}
|
||||
intervalSecs := curT.Sub(prevT).Seconds()
|
||||
if intervalSecs > gapThreshold {
|
||||
// Gap detected: insert null deltas (don't interpolate)
|
||||
result = append(result, s)
|
||||
continue
|
||||
}
|
||||
if intervalSecs <= 0 {
|
||||
result = append(result, s)
|
||||
continue
|
||||
}
|
||||
|
||||
// Detect reboot: any cumulative counter decreased
|
||||
isReboot := false
|
||||
if cur.TxAirSecs != nil && prev.TxAirSecs != nil && *cur.TxAirSecs < *prev.TxAirSecs {
|
||||
isReboot = true
|
||||
}
|
||||
if cur.RxAirSecs != nil && prev.RxAirSecs != nil && *cur.RxAirSecs < *prev.RxAirSecs {
|
||||
isReboot = true
|
||||
}
|
||||
if cur.RecvErrors != nil && prev.RecvErrors != nil && *cur.RecvErrors < *prev.RecvErrors {
|
||||
isReboot = true
|
||||
}
|
||||
if cur.PacketsSent != nil && prev.PacketsSent != nil && *cur.PacketsSent < *prev.PacketsSent {
|
||||
isReboot = true
|
||||
}
|
||||
if cur.PacketsRecv != nil && prev.PacketsRecv != nil && *cur.PacketsRecv < *prev.PacketsRecv {
|
||||
isReboot = true
|
||||
}
|
||||
|
||||
if isReboot {
|
||||
s.IsReboot = true
|
||||
reboots = append(reboots, cur.Timestamp)
|
||||
// Skip delta computation for reboot samples — use as new baseline
|
||||
result = append(result, s)
|
||||
continue
|
||||
}
|
||||
|
||||
// Compute TX airtime percentage
|
||||
if cur.TxAirSecs != nil && prev.TxAirSecs != nil {
|
||||
delta := float64(*cur.TxAirSecs - *prev.TxAirSecs)
|
||||
pct := (delta / intervalSecs) * 100.0
|
||||
if pct < 0 {
|
||||
pct = 0
|
||||
}
|
||||
if pct > 100 {
|
||||
pct = 100
|
||||
}
|
||||
result_pct := math.Round(pct*100) / 100
|
||||
s.TxAirtimePct = &result_pct
|
||||
}
|
||||
|
||||
// Compute RX airtime percentage
|
||||
if cur.RxAirSecs != nil && prev.RxAirSecs != nil {
|
||||
delta := float64(*cur.RxAirSecs - *prev.RxAirSecs)
|
||||
pct := (delta / intervalSecs) * 100.0
|
||||
if pct < 0 {
|
||||
pct = 0
|
||||
}
|
||||
if pct > 100 {
|
||||
pct = 100
|
||||
}
|
||||
result_pct := math.Round(pct*100) / 100
|
||||
s.RxAirtimePct = &result_pct
|
||||
}
|
||||
|
||||
// Compute recv error rate
|
||||
if cur.RecvErrors != nil && prev.RecvErrors != nil &&
|
||||
cur.PacketsRecv != nil && prev.PacketsRecv != nil {
|
||||
deltaErrors := float64(*cur.RecvErrors - *prev.RecvErrors)
|
||||
deltaRecv := float64(*cur.PacketsRecv - *prev.PacketsRecv)
|
||||
total := deltaRecv + deltaErrors
|
||||
if total > 0 {
|
||||
rate := (deltaErrors / total) * 100.0
|
||||
rate = math.Round(rate*100) / 100
|
||||
s.RecvErrorRate = &rate
|
||||
}
|
||||
}
|
||||
|
||||
result = append(result, s)
|
||||
}
|
||||
|
||||
return result, reboots, nil
|
||||
}
|
||||
|
||||
// MetricsSummaryRow holds summary data for one observer.
|
||||
type MetricsSummaryRow struct {
|
||||
ObserverID string `json:"observer_id"`
|
||||
ObserverName *string `json:"observer_name"`
|
||||
IATA string `json:"iata,omitempty"`
|
||||
CurrentNF *float64 `json:"current_noise_floor"`
|
||||
AvgNF *float64 `json:"avg_noise_floor_24h"`
|
||||
MaxNF *float64 `json:"max_noise_floor_24h"`
|
||||
CurrentBattMv *int `json:"battery_mv"`
|
||||
SampleCount int `json:"sample_count"`
|
||||
Sparkline []*float64 `json:"sparkline"`
|
||||
}
|
||||
|
||||
// GetMetricsSummary returns a fleet summary of observer metrics within a time window.
|
||||
// Uses a CTE with ROW_NUMBER to get latest values in a single pass (no correlated subqueries).
|
||||
// Also returns sparkline data (noise_floor time series) per observer.
|
||||
func (db *DB) GetMetricsSummary(since string) ([]MetricsSummaryRow, error) {
|
||||
query := `
|
||||
WITH ranked AS (
|
||||
SELECT observer_id, noise_floor, battery_mv,
|
||||
ROW_NUMBER() OVER (PARTITION BY observer_id ORDER BY timestamp DESC) as rn
|
||||
FROM observer_metrics
|
||||
WHERE timestamp >= ?
|
||||
)
|
||||
SELECT m.observer_id, o.name, COALESCE(o.iata, '') as iata,
|
||||
r.noise_floor as current_nf,
|
||||
AVG(m.noise_floor) as avg_nf,
|
||||
MAX(m.noise_floor) as max_nf,
|
||||
r.battery_mv as current_batt,
|
||||
COUNT(*) as sample_count
|
||||
FROM observer_metrics m
|
||||
LEFT JOIN observers o ON o.id = m.observer_id
|
||||
LEFT JOIN ranked r ON r.observer_id = m.observer_id AND r.rn = 1
|
||||
WHERE m.timestamp >= ?
|
||||
GROUP BY m.observer_id
|
||||
ORDER BY max_nf DESC
|
||||
`
|
||||
rows, err := db.conn.Query(query, since, since)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var result []MetricsSummaryRow
|
||||
for rows.Next() {
|
||||
var s MetricsSummaryRow
|
||||
if err := rows.Scan(&s.ObserverID, &s.ObserverName, &s.IATA, &s.CurrentNF, &s.AvgNF, &s.MaxNF, &s.CurrentBattMv, &s.SampleCount); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result = append(result, s)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Fetch sparkline data (noise_floor series) for all observers in one query
|
||||
if len(result) > 0 {
|
||||
sparkQuery := `SELECT observer_id, noise_floor FROM observer_metrics
|
||||
WHERE timestamp >= ? ORDER BY observer_id, timestamp ASC`
|
||||
sparkRows, err := db.conn.Query(sparkQuery, since)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer sparkRows.Close()
|
||||
|
||||
sparkMap := make(map[string][]*float64)
|
||||
for sparkRows.Next() {
|
||||
var oid string
|
||||
var nf *float64
|
||||
if err := sparkRows.Scan(&oid, &nf); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sparkMap[oid] = append(sparkMap[oid], nf)
|
||||
}
|
||||
if err := sparkRows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for i := range result {
|
||||
if s, ok := sparkMap[result[i].ObserverID]; ok {
|
||||
result[i].Sparkline = s
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// PruneOldMetrics deletes observer_metrics rows older than retentionDays.
|
||||
func (db *DB) PruneOldMetrics(retentionDays int) (int64, error) {
|
||||
dsn := fmt.Sprintf("file:%s?_journal_mode=WAL&_busy_timeout=10000", db.path)
|
||||
rw, err := sql.Open("sqlite", dsn)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
rw.SetMaxOpenConns(1)
|
||||
defer rw.Close()
|
||||
|
||||
cutoff := time.Now().UTC().AddDate(0, 0, -retentionDays).Format(time.RFC3339)
|
||||
res, err := rw.Exec(`DELETE FROM observer_metrics WHERE timestamp < ?`, cutoff)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
n, _ := res.RowsAffected()
|
||||
if n > 0 {
|
||||
log.Printf("[metrics] Pruned %d observer_metrics rows older than %d days", n, retentionDays)
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
@@ -75,6 +75,21 @@ func setupTestDB(t *testing.T) *DB {
|
||||
timestamp INTEGER NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS observer_metrics (
|
||||
observer_id TEXT NOT NULL,
|
||||
timestamp TEXT NOT NULL,
|
||||
noise_floor REAL,
|
||||
tx_air_secs INTEGER,
|
||||
rx_air_secs INTEGER,
|
||||
recv_errors INTEGER,
|
||||
battery_mv INTEGER,
|
||||
packets_sent INTEGER,
|
||||
packets_recv INTEGER,
|
||||
PRIMARY KEY (observer_id, timestamp)
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_observer_metrics_timestamp ON observer_metrics(timestamp);
|
||||
|
||||
`
|
||||
if _, err := conn.Exec(schema); err != nil {
|
||||
t.Fatal(err)
|
||||
@@ -1537,3 +1552,367 @@ func TestNodeTelemetryFields(t *testing.T) {
|
||||
func TestMain(m *testing.M) {
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
func TestGetObserverMetrics(t *testing.T) {
|
||||
db := setupTestDB(t)
|
||||
seedTestData(t, db)
|
||||
|
||||
now := time.Now().UTC()
|
||||
t1 := now.Add(-2 * time.Hour).Format(time.RFC3339)
|
||||
t2 := now.Add(-1 * time.Hour).Format(time.RFC3339)
|
||||
t3 := now.Format(time.RFC3339)
|
||||
|
||||
db.conn.Exec("INSERT INTO observer_metrics (observer_id, timestamp, noise_floor, tx_air_secs, rx_air_secs, recv_errors, battery_mv) VALUES (?, ?, ?, ?, ?, ?, ?)",
|
||||
"obs1", t1, -112.5, 100, 500, 3, 3720)
|
||||
db.conn.Exec("INSERT INTO observer_metrics (observer_id, timestamp, noise_floor, tx_air_secs, rx_air_secs, recv_errors) VALUES (?, ?, ?, ?, ?, ?)",
|
||||
"obs1", t2, -110.0, 200, 800, 5)
|
||||
db.conn.Exec("INSERT INTO observer_metrics (observer_id, timestamp, noise_floor, tx_air_secs, rx_air_secs, recv_errors) VALUES (?, ?, ?, ?, ?, ?)",
|
||||
"obs1", t3, -108.0, 300, 1100, 8)
|
||||
db.conn.Exec("INSERT INTO observer_metrics (observer_id, timestamp, noise_floor) VALUES (?, ?, ?)",
|
||||
"obs2", t1, -115.0)
|
||||
|
||||
// Query all for obs1
|
||||
since := now.Add(-3 * time.Hour).Format(time.RFC3339)
|
||||
metrics, reboots, err := db.GetObserverMetrics("obs1", since, "", "5m", 3600)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(metrics) != 3 {
|
||||
t.Errorf("expected 3 metrics, got %d", len(metrics))
|
||||
}
|
||||
if len(reboots) != 0 {
|
||||
t.Errorf("expected 0 reboots, got %d", len(reboots))
|
||||
}
|
||||
|
||||
// Verify first row has noise_floor
|
||||
if metrics[0].NoiseFloor == nil || *metrics[0].NoiseFloor != -112.5 {
|
||||
t.Errorf("first noise_floor = %v, want -112.5", metrics[0].NoiseFloor)
|
||||
}
|
||||
// First row: no delta possible (first sample)
|
||||
if metrics[0].TxAirtimePct != nil {
|
||||
t.Errorf("first sample should have nil tx_airtime_pct, got %v", *metrics[0].TxAirtimePct)
|
||||
}
|
||||
|
||||
// Second row should have computed deltas
|
||||
// TX: (200-100) / 3600 * 100 ≈ 2.78%
|
||||
if metrics[1].TxAirtimePct == nil {
|
||||
t.Errorf("second sample tx_airtime_pct should not be nil")
|
||||
} else if *metrics[1].TxAirtimePct < 2.0 || *metrics[1].TxAirtimePct > 3.5 {
|
||||
t.Errorf("second sample tx_airtime_pct = %v, want ~2.78", *metrics[1].TxAirtimePct)
|
||||
}
|
||||
|
||||
// Query with until filter
|
||||
metrics2, _, err := db.GetObserverMetrics("obs1", since, t2, "5m", 3600)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(metrics2) != 2 {
|
||||
t.Errorf("expected 2 metrics with until filter, got %d", len(metrics2))
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetMetricsSummary(t *testing.T) {
|
||||
db := setupTestDB(t)
|
||||
seedTestData(t, db)
|
||||
|
||||
now := time.Now().UTC()
|
||||
t1 := now.Add(-2 * time.Hour).Format(time.RFC3339)
|
||||
t2 := now.Add(-1 * time.Hour).Format(time.RFC3339)
|
||||
|
||||
db.conn.Exec("INSERT INTO observer_metrics (observer_id, timestamp, noise_floor, battery_mv) VALUES (?, ?, ?, ?)",
|
||||
"obs1", t1, -112.0, 3720)
|
||||
db.conn.Exec("INSERT INTO observer_metrics (observer_id, timestamp, noise_floor) VALUES (?, ?, ?)",
|
||||
"obs1", t2, -108.0)
|
||||
db.conn.Exec("INSERT INTO observer_metrics (observer_id, timestamp, noise_floor) VALUES (?, ?, ?)",
|
||||
"obs2", t1, -115.0)
|
||||
|
||||
since := now.Add(-24 * time.Hour).Format(time.RFC3339)
|
||||
summary, err := db.GetMetricsSummary(since)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(summary) != 2 {
|
||||
t.Fatalf("expected 2 observers in summary, got %d", len(summary))
|
||||
}
|
||||
|
||||
// Results sorted by max_nf DESC
|
||||
// obs1 has max -108, obs2 has max -115
|
||||
if summary[0].ObserverID != "obs1" {
|
||||
t.Errorf("first observer should be obs1 (highest max NF), got %s", summary[0].ObserverID)
|
||||
}
|
||||
if summary[0].CurrentNF == nil || *summary[0].CurrentNF != -108.0 {
|
||||
t.Errorf("obs1 current NF = %v, want -108.0", summary[0].CurrentNF)
|
||||
}
|
||||
if summary[0].SampleCount != 2 {
|
||||
t.Errorf("obs1 sample count = %d, want 2", summary[0].SampleCount)
|
||||
}
|
||||
// Verify sparkline data is included
|
||||
if len(summary[0].Sparkline) != 2 {
|
||||
t.Errorf("obs1 sparkline length = %d, want 2", len(summary[0].Sparkline))
|
||||
}
|
||||
if len(summary[1].Sparkline) != 1 {
|
||||
t.Errorf("obs2 sparkline length = %d, want 1", len(summary[1].Sparkline))
|
||||
}
|
||||
// Sparkline should be ordered by timestamp ASC
|
||||
if summary[0].Sparkline[0] != nil && *summary[0].Sparkline[0] != -112.0 {
|
||||
t.Errorf("obs1 sparkline[0] = %v, want -112.0", *summary[0].Sparkline[0])
|
||||
}
|
||||
if summary[0].Sparkline[1] != nil && *summary[0].Sparkline[1] != -108.0 {
|
||||
t.Errorf("obs1 sparkline[1] = %v, want -108.0", *summary[0].Sparkline[1])
|
||||
}
|
||||
}
|
||||
|
||||
func TestObserverMetricsAPIEndpoints(t *testing.T) {
|
||||
db := setupTestDB(t)
|
||||
seedTestData(t, db)
|
||||
|
||||
now := time.Now().UTC()
|
||||
t1 := now.Add(-1 * time.Hour).Format(time.RFC3339)
|
||||
|
||||
db.conn.Exec("INSERT INTO observer_metrics (observer_id, timestamp, noise_floor) VALUES (?, ?, ?)",
|
||||
"obs1", t1, -112.0)
|
||||
|
||||
// Query directly to verify
|
||||
metrics, _, err := db.GetObserverMetrics("obs1", "", "", "5m", 300)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(metrics) != 1 {
|
||||
t.Errorf("expected 1 metric, got %d", len(metrics))
|
||||
}
|
||||
}
|
||||
|
||||
func TestComputeDeltas(t *testing.T) {
|
||||
intPtr := func(v int) *int { return &v }
|
||||
floatPtr := func(v float64) *float64 { return &v }
|
||||
|
||||
t.Run("empty input", func(t *testing.T) {
|
||||
result, reboots, err := computeDeltas(nil, 300)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if result != nil {
|
||||
t.Errorf("expected nil, got %v", result)
|
||||
}
|
||||
if reboots != nil {
|
||||
t.Errorf("expected nil reboots, got %v", reboots)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("normal delta computation", func(t *testing.T) {
|
||||
raw := []rawMetricsSample{
|
||||
{Timestamp: "2026-04-05T00:00:00Z", NoiseFloor: floatPtr(-112), TxAirSecs: intPtr(100), RxAirSecs: intPtr(500), RecvErrors: intPtr(3), PacketsRecv: intPtr(1000)},
|
||||
{Timestamp: "2026-04-05T00:05:00Z", NoiseFloor: floatPtr(-110), TxAirSecs: intPtr(115), RxAirSecs: intPtr(525), RecvErrors: intPtr(5), PacketsRecv: intPtr(1100)},
|
||||
}
|
||||
result, reboots, err := computeDeltas(raw, 300)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(result) != 2 {
|
||||
t.Fatalf("expected 2 results, got %d", len(result))
|
||||
}
|
||||
if len(reboots) != 0 {
|
||||
t.Errorf("expected 0 reboots, got %d", len(reboots))
|
||||
}
|
||||
// First sample: no deltas
|
||||
if result[0].TxAirtimePct != nil {
|
||||
t.Errorf("first sample should have nil tx_airtime_pct")
|
||||
}
|
||||
// Second sample: TX delta = 15 secs / 300 secs * 100 = 5%
|
||||
if result[1].TxAirtimePct == nil {
|
||||
t.Fatal("second sample tx_airtime_pct should not be nil")
|
||||
}
|
||||
if *result[1].TxAirtimePct != 5.0 {
|
||||
t.Errorf("tx_airtime_pct = %v, want 5.0", *result[1].TxAirtimePct)
|
||||
}
|
||||
// RX delta = 25 secs / 300 secs * 100 ≈ 8.33%
|
||||
if result[1].RxAirtimePct == nil {
|
||||
t.Fatal("second sample rx_airtime_pct should not be nil")
|
||||
}
|
||||
if *result[1].RxAirtimePct < 8.3 || *result[1].RxAirtimePct > 8.4 {
|
||||
t.Errorf("rx_airtime_pct = %v, want ~8.33", *result[1].RxAirtimePct)
|
||||
}
|
||||
// Error rate: delta_errors=2, delta_recv=100, rate = 2/(100+2)*100 ≈ 1.96%
|
||||
if result[1].RecvErrorRate == nil {
|
||||
t.Fatal("second sample recv_error_rate should not be nil")
|
||||
}
|
||||
if *result[1].RecvErrorRate < 1.9 || *result[1].RecvErrorRate > 2.0 {
|
||||
t.Errorf("recv_error_rate = %v, want ~1.96", *result[1].RecvErrorRate)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("reboot detection", func(t *testing.T) {
|
||||
raw := []rawMetricsSample{
|
||||
{Timestamp: "2026-04-05T00:00:00Z", TxAirSecs: intPtr(1000), RxAirSecs: intPtr(5000)},
|
||||
{Timestamp: "2026-04-05T00:05:00Z", TxAirSecs: intPtr(10), RxAirSecs: intPtr(20)}, // reboot!
|
||||
{Timestamp: "2026-04-05T00:10:00Z", TxAirSecs: intPtr(25), RxAirSecs: intPtr(45)},
|
||||
}
|
||||
result, reboots, err := computeDeltas(raw, 300)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(reboots) != 1 {
|
||||
t.Fatalf("expected 1 reboot, got %d", len(reboots))
|
||||
}
|
||||
if reboots[0] != "2026-04-05T00:05:00Z" {
|
||||
t.Errorf("reboot timestamp = %s", reboots[0])
|
||||
}
|
||||
if !result[1].IsReboot {
|
||||
t.Error("second sample should be marked as reboot")
|
||||
}
|
||||
// Reboot sample should have nil deltas
|
||||
if result[1].TxAirtimePct != nil {
|
||||
t.Error("reboot sample should have nil tx_airtime_pct")
|
||||
}
|
||||
// Third sample should have valid deltas from post-reboot baseline
|
||||
if result[2].TxAirtimePct == nil {
|
||||
t.Fatal("third sample tx_airtime_pct should not be nil")
|
||||
}
|
||||
if *result[2].TxAirtimePct != 5.0 { // 15/300*100
|
||||
t.Errorf("third sample tx_airtime_pct = %v, want 5.0", *result[2].TxAirtimePct)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("gap detection", func(t *testing.T) {
|
||||
raw := []rawMetricsSample{
|
||||
{Timestamp: "2026-04-05T00:00:00Z", TxAirSecs: intPtr(100)},
|
||||
{Timestamp: "2026-04-05T00:15:00Z", TxAirSecs: intPtr(200)}, // 15min gap > 2*300s
|
||||
}
|
||||
result, _, err := computeDeltas(raw, 300)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Gap sample should have nil deltas
|
||||
if result[1].TxAirtimePct != nil {
|
||||
t.Error("gap sample should have nil tx_airtime_pct")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetObserverMetricsResolution(t *testing.T) {
|
||||
db := setupTestDB(t)
|
||||
seedTestData(t, db)
|
||||
|
||||
db.conn.Exec("INSERT INTO observer_metrics (observer_id, timestamp, noise_floor, tx_air_secs) VALUES (?, ?, ?, ?)",
|
||||
"obs1", "2026-04-05T00:00:00Z", -112.0, 100)
|
||||
db.conn.Exec("INSERT INTO observer_metrics (observer_id, timestamp, noise_floor, tx_air_secs) VALUES (?, ?, ?, ?)",
|
||||
"obs1", "2026-04-05T00:05:00Z", -110.0, 200)
|
||||
db.conn.Exec("INSERT INTO observer_metrics (observer_id, timestamp, noise_floor, tx_air_secs) VALUES (?, ?, ?, ?)",
|
||||
"obs1", "2026-04-05T01:00:00Z", -108.0, 500)
|
||||
db.conn.Exec("INSERT INTO observer_metrics (observer_id, timestamp, noise_floor, tx_air_secs) VALUES (?, ?, ?, ?)",
|
||||
"obs1", "2026-04-05T01:05:00Z", -106.0, 600)
|
||||
|
||||
// 5m resolution: all 4 rows
|
||||
m5, _, err := db.GetObserverMetrics("obs1", "2026-04-04T00:00:00Z", "", "5m", 300)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(m5) != 4 {
|
||||
t.Errorf("5m resolution: expected 4 rows, got %d", len(m5))
|
||||
}
|
||||
|
||||
// 1h resolution: 2 buckets
|
||||
m1h, _, err := db.GetObserverMetrics("obs1", "2026-04-04T00:00:00Z", "", "1h", 300)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(m1h) != 2 {
|
||||
t.Errorf("1h resolution: expected 2 rows, got %d", len(m1h))
|
||||
}
|
||||
|
||||
// 1d resolution: 1 bucket
|
||||
m1d, _, err := db.GetObserverMetrics("obs1", "2026-04-04T00:00:00Z", "", "1d", 300)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(m1d) != 1 {
|
||||
t.Errorf("1d resolution: expected 1 row, got %d", len(m1d))
|
||||
}
|
||||
}
|
||||
|
||||
func TestHourlyResolutionDeltasNotNull(t *testing.T) {
|
||||
db := setupTestDB(t)
|
||||
seedTestData(t, db)
|
||||
|
||||
// Two hourly buckets, each with one sample. With old MAX+hardcoded gap threshold,
|
||||
// the 3600s gap would exceed sampleInterval*2 (600s) and deltas would be null.
|
||||
db.conn.Exec("INSERT INTO observer_metrics (observer_id, timestamp, noise_floor, tx_air_secs, rx_air_secs, recv_errors, packets_sent, packets_recv) VALUES (?, ?, ?, ?, ?, ?, ?, ?)",
|
||||
"obs_hr", "2026-04-05T10:00:00Z", -110.0, 100, 200, 5, 50, 100)
|
||||
db.conn.Exec("INSERT INTO observer_metrics (observer_id, timestamp, noise_floor, tx_air_secs, rx_air_secs, recv_errors, packets_sent, packets_recv) VALUES (?, ?, ?, ?, ?, ?, ?, ?)",
|
||||
"obs_hr", "2026-04-05T11:00:00Z", -108.0, 200, 400, 10, 80, 200)
|
||||
|
||||
m, _, err := db.GetObserverMetrics("obs_hr", "2026-04-04T00:00:00Z", "", "1h", 300)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(m) != 2 {
|
||||
t.Fatalf("expected 2 rows, got %d", len(m))
|
||||
}
|
||||
// Second row should have computed deltas (not null)
|
||||
if m[1].TxAirtimePct == nil {
|
||||
t.Error("1h resolution: tx_airtime_pct should not be nil — gap threshold must scale with resolution")
|
||||
}
|
||||
}
|
||||
|
||||
func TestLastValuePreservesReboot(t *testing.T) {
|
||||
db := setupTestDB(t)
|
||||
seedTestData(t, db)
|
||||
|
||||
// Hour bucket with two samples: pre-reboot (high) and post-reboot (low).
|
||||
// With MAX(), the pre-reboot value wins and the reboot is hidden.
|
||||
// With LAST (latest timestamp), the post-reboot value wins.
|
||||
db.conn.Exec("INSERT INTO observer_metrics (observer_id, timestamp, noise_floor, tx_air_secs, rx_air_secs, recv_errors, packets_sent, packets_recv) VALUES (?, ?, ?, ?, ?, ?, ?, ?)",
|
||||
"obs_rb", "2026-04-05T10:00:00Z", -110.0, 1000, 2000, 500, 400, 800) // pre-reboot baseline
|
||||
db.conn.Exec("INSERT INTO observer_metrics (observer_id, timestamp, noise_floor, tx_air_secs, rx_air_secs, recv_errors, packets_sent, packets_recv) VALUES (?, ?, ?, ?, ?, ?, ?, ?)",
|
||||
"obs_rb", "2026-04-05T10:20:00Z", -110.0, 5000, 6000, 900, 700, 1200) // pre-reboot peak
|
||||
db.conn.Exec("INSERT INTO observer_metrics (observer_id, timestamp, noise_floor, tx_air_secs, rx_air_secs, recv_errors, packets_sent, packets_recv) VALUES (?, ?, ?, ?, ?, ?, ?, ?)",
|
||||
"obs_rb", "2026-04-05T10:40:00Z", -110.0, 10, 20, 1, 5, 10) // post-reboot (counter reset)
|
||||
|
||||
// Next hour bucket
|
||||
db.conn.Exec("INSERT INTO observer_metrics (observer_id, timestamp, noise_floor, tx_air_secs, rx_air_secs, recv_errors, packets_sent, packets_recv) VALUES (?, ?, ?, ?, ?, ?, ?, ?)",
|
||||
"obs_rb", "2026-04-05T11:00:00Z", -108.0, 100, 120, 5, 20, 50)
|
||||
|
||||
m, reboots, err := db.GetObserverMetrics("obs_rb", "2026-04-04T00:00:00Z", "", "1h", 300)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(m) != 2 {
|
||||
t.Fatalf("expected 2 rows, got %d", len(m))
|
||||
}
|
||||
|
||||
// First bucket should use the LAST value (post-reboot: tx_air_secs=10).
|
||||
// Second bucket (tx_air_secs=100) is a normal increase from 10→100.
|
||||
// With LAST-value semantics, the second bucket should have valid deltas (not a reboot).
|
||||
// With MAX(), first bucket would have tx_air_secs=5000, and second=100 would
|
||||
// trigger a false reboot detection.
|
||||
if m[1].IsReboot {
|
||||
t.Error("second bucket should NOT be flagged as reboot with LAST-value aggregation")
|
||||
}
|
||||
if m[1].TxAirtimePct == nil {
|
||||
t.Error("second bucket should have non-nil tx_airtime_pct")
|
||||
}
|
||||
_ = reboots // reboots list is informational
|
||||
}
|
||||
|
||||
func TestParseWindowDuration(t *testing.T) {
|
||||
tests := []struct {
|
||||
input string
|
||||
want time.Duration
|
||||
err bool
|
||||
}{
|
||||
{"1h", time.Hour, false},
|
||||
{"24h", 24 * time.Hour, false},
|
||||
{"3d", 3 * 24 * time.Hour, false},
|
||||
{"30d", 30 * 24 * time.Hour, false},
|
||||
{"invalid", 0, true},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
got, err := parseWindowDuration(tc.input)
|
||||
if tc.err && err == nil {
|
||||
t.Errorf("parseWindowDuration(%q) expected error", tc.input)
|
||||
}
|
||||
if !tc.err && got != tc.want {
|
||||
t.Errorf("parseWindowDuration(%q) = %v, want %v", tc.input, got, tc.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -162,24 +162,50 @@ func TestEvictStale_NoEvictionWhenDisabled(t *testing.T) {
|
||||
|
||||
func TestEvictStale_MemoryBasedEviction(t *testing.T) {
|
||||
now := time.Now().UTC()
|
||||
// Create enough packets to exceed a small memory limit
|
||||
// 1000 packets * 5KB + 2000 obs * 500B ≈ 6MB
|
||||
store := makeTestStore(1000, now.Add(-1*time.Hour), 0)
|
||||
// All packets are recent (1h old) so time-based won't trigger
|
||||
// All packets are recent (1h old) so time-based won't trigger.
|
||||
store.retentionHours = 24
|
||||
store.maxMemoryMB = 3 // ~3MB limit, should evict roughly half
|
||||
store.maxMemoryMB = 3
|
||||
// Inject deterministic estimator: simulates 6MB (over 3MB limit).
|
||||
// Uses packet count so it scales correctly after eviction.
|
||||
store.memoryEstimator = func() float64 {
|
||||
return float64(len(store.packets)*5120+store.totalObs*500) / 1048576.0
|
||||
}
|
||||
|
||||
evicted := store.EvictStale()
|
||||
if evicted == 0 {
|
||||
t.Fatal("expected some evictions for memory cap")
|
||||
}
|
||||
// After eviction, estimated memory should be <= 3MB
|
||||
estMB := store.estimatedMemoryMB()
|
||||
if estMB > 3.5 { // small tolerance
|
||||
if estMB > 3.5 {
|
||||
t.Fatalf("expected <=3.5MB after eviction, got %.1fMB", estMB)
|
||||
}
|
||||
}
|
||||
|
||||
// TestEvictStale_MemoryBasedEviction_UnderestimatedHeap verifies that eviction
|
||||
// fires correctly when actual heap is much larger than a formula-based estimate
|
||||
// would report — the scenario that caused OOM kills in production.
|
||||
func TestEvictStale_MemoryBasedEviction_UnderestimatedHeap(t *testing.T) {
|
||||
now := time.Now().UTC()
|
||||
store := makeTestStore(1000, now.Add(-1*time.Hour), 0)
|
||||
store.retentionHours = 24
|
||||
store.maxMemoryMB = 500
|
||||
// Simulate actual heap 5x over budget (like production: ~5GB actual vs ~1GB limit).
|
||||
store.memoryEstimator = func() float64 {
|
||||
return 2500.0 // 2500MB actual vs 500MB limit
|
||||
}
|
||||
|
||||
evicted := store.EvictStale()
|
||||
if evicted == 0 {
|
||||
t.Fatal("expected evictions when heap is 5x over limit")
|
||||
}
|
||||
// Should keep roughly 500/2500 * 0.9 = 18% of packets → ~180 of 1000.
|
||||
remaining := len(store.packets)
|
||||
if remaining > 250 {
|
||||
t.Fatalf("expected most packets evicted (heap 5x over), but %d of 1000 remain", remaining)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEvictStale_CleansNodeIndexes(t *testing.T) {
|
||||
now := time.Now().UTC()
|
||||
store := makeTestStore(10, now.Add(-48*time.Hour), 0)
|
||||
|
||||
@@ -2,6 +2,8 @@ package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
@@ -220,6 +222,44 @@ func TestSortedCopy(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestSortedCopyLarge(t *testing.T) {
|
||||
// Regression: verify correct sort on larger input
|
||||
rng := rand.New(rand.NewSource(42))
|
||||
n := 1000
|
||||
input := make([]float64, n)
|
||||
for i := range input {
|
||||
input[i] = rng.Float64() * 1000
|
||||
}
|
||||
result := sortedCopy(input)
|
||||
if len(result) != n {
|
||||
t.Fatalf("expected %d elements, got %d", n, len(result))
|
||||
}
|
||||
for i := 1; i < len(result); i++ {
|
||||
if result[i] < result[i-1] {
|
||||
t.Fatalf("not sorted at index %d: %v > %v", i, result[i-1], result[i])
|
||||
}
|
||||
}
|
||||
// Original unchanged
|
||||
if input[0] == result[0] && input[1] == result[1] && input[2] == result[2] {
|
||||
// Could be coincidence but very unlikely with random data
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkSortedCopy(b *testing.B) {
|
||||
rng := rand.New(rand.NewSource(42))
|
||||
for _, size := range []int{256, 1000, 10000} {
|
||||
data := make([]float64, size)
|
||||
for i := range data {
|
||||
data[i] = rng.Float64() * 1000
|
||||
}
|
||||
b.Run(fmt.Sprintf("n=%d", size), func(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
sortedCopy(data)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestLastN(t *testing.T) {
|
||||
arr := []map[string]interface{}{
|
||||
{"id": 1}, {"id": 2}, {"id": 3}, {"id": 4}, {"id": 5},
|
||||
|
||||
+94
-5
@@ -144,6 +144,50 @@ func main() {
|
||||
log.Fatalf("[store] failed to load: %v", err)
|
||||
}
|
||||
|
||||
// Initialize persisted neighbor graph
|
||||
dbPath = database.path
|
||||
if err := ensureNeighborEdgesTable(dbPath); err != nil {
|
||||
log.Printf("[neighbor] warning: could not create neighbor_edges table: %v", err)
|
||||
}
|
||||
// Add resolved_path column if missing.
|
||||
// NOTE on startup ordering (review item #10): ensureResolvedPathColumn runs AFTER
|
||||
// OpenDB/detectSchema, so db.hasResolvedPath will be false on first run with a
|
||||
// pre-existing DB. This means Load() won't SELECT resolved_path from SQLite.
|
||||
// That's OK: backfillResolvedPaths (below) computes and persists them in-memory
|
||||
// AND to SQLite. On next restart, detectSchema finds the column and Load() reads it.
|
||||
if err := ensureResolvedPathColumn(dbPath); err != nil {
|
||||
log.Printf("[store] warning: could not add resolved_path column: %v", err)
|
||||
} else {
|
||||
database.hasResolvedPath = true // detectSchema ran before column was added; fix the flag
|
||||
}
|
||||
|
||||
// Load or build neighbor graph
|
||||
if neighborEdgesTableExists(database.conn) {
|
||||
store.graph = loadNeighborEdgesFromDB(database.conn)
|
||||
log.Printf("[neighbor] loaded persisted neighbor graph")
|
||||
} else {
|
||||
log.Printf("[neighbor] no persisted edges found, building from store...")
|
||||
rw, rwErr := openRW(dbPath)
|
||||
if rwErr == nil {
|
||||
edgeCount := buildAndPersistEdges(store, rw)
|
||||
rw.Close()
|
||||
log.Printf("[neighbor] persisted %d edges", edgeCount)
|
||||
}
|
||||
store.graph = BuildFromStore(store)
|
||||
}
|
||||
|
||||
// Backfill resolved_path for observations that don't have it yet
|
||||
if backfilled := backfillResolvedPaths(store, dbPath); backfilled > 0 {
|
||||
log.Printf("[store] backfilled resolved_path for %d observations", backfilled)
|
||||
}
|
||||
|
||||
// Re-pick best observation now that resolved paths are populated
|
||||
store.mu.Lock()
|
||||
for _, tx := range store.packets {
|
||||
pickBestObservation(tx)
|
||||
}
|
||||
store.mu.Unlock()
|
||||
|
||||
// WebSocket hub
|
||||
hub := NewHub()
|
||||
|
||||
@@ -180,8 +224,15 @@ func main() {
|
||||
defer stopEviction()
|
||||
|
||||
// Auto-prune old packets if retention.packetDays is configured
|
||||
var stopPrune func()
|
||||
if cfg.Retention != nil && cfg.Retention.PacketDays > 0 {
|
||||
days := cfg.Retention.PacketDays
|
||||
pruneTicker := time.NewTicker(24 * time.Hour)
|
||||
pruneDone := make(chan struct{})
|
||||
stopPrune = func() {
|
||||
pruneTicker.Stop()
|
||||
close(pruneDone)
|
||||
}
|
||||
go func() {
|
||||
time.Sleep(1 * time.Minute)
|
||||
if n, err := database.PruneOldPackets(days); err != nil {
|
||||
@@ -189,17 +240,47 @@ func main() {
|
||||
} else {
|
||||
log.Printf("[prune] deleted %d transmissions older than %d days", n, days)
|
||||
}
|
||||
for range time.Tick(24 * time.Hour) {
|
||||
if n, err := database.PruneOldPackets(days); err != nil {
|
||||
log.Printf("[prune] error: %v", err)
|
||||
} else {
|
||||
log.Printf("[prune] deleted %d transmissions older than %d days", n, days)
|
||||
for {
|
||||
select {
|
||||
case <-pruneTicker.C:
|
||||
if n, err := database.PruneOldPackets(days); err != nil {
|
||||
log.Printf("[prune] error: %v", err)
|
||||
} else {
|
||||
log.Printf("[prune] deleted %d transmissions older than %d days", n, days)
|
||||
}
|
||||
case <-pruneDone:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
log.Printf("[prune] auto-prune enabled: packets older than %d days will be removed daily", days)
|
||||
}
|
||||
|
||||
// Auto-prune old metrics
|
||||
var stopMetricsPrune func()
|
||||
{
|
||||
metricsDays := cfg.MetricsRetentionDays()
|
||||
metricsPruneTicker := time.NewTicker(24 * time.Hour)
|
||||
metricsPruneDone := make(chan struct{})
|
||||
stopMetricsPrune = func() {
|
||||
metricsPruneTicker.Stop()
|
||||
close(metricsPruneDone)
|
||||
}
|
||||
go func() {
|
||||
time.Sleep(2 * time.Minute) // stagger after packet prune
|
||||
database.PruneOldMetrics(metricsDays)
|
||||
for {
|
||||
select {
|
||||
case <-metricsPruneTicker.C:
|
||||
database.PruneOldMetrics(metricsDays)
|
||||
case <-metricsPruneDone:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
log.Printf("[metrics-prune] auto-prune enabled: metrics older than %d days", metricsDays)
|
||||
}
|
||||
|
||||
// Graceful shutdown
|
||||
httpServer := &http.Server{
|
||||
Addr: fmt.Sprintf(":%d", cfg.Port),
|
||||
@@ -218,6 +299,14 @@ func main() {
|
||||
// 1. Stop accepting new WebSocket/poll data
|
||||
poller.Stop()
|
||||
|
||||
// 1b. Stop auto-prune ticker
|
||||
if stopPrune != nil {
|
||||
stopPrune()
|
||||
}
|
||||
if stopMetricsPrune != nil {
|
||||
stopMetricsPrune()
|
||||
}
|
||||
|
||||
// 2. Gracefully drain HTTP connections (up to 15s)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
|
||||
defer cancel()
|
||||
|
||||
@@ -18,7 +18,7 @@ const (
|
||||
// Time-decay half-life: 7 days.
|
||||
affinityHalfLifeHours = 168.0
|
||||
// Cache TTL for the built graph.
|
||||
neighborGraphTTL = 60 * time.Second
|
||||
neighborGraphTTL = 5 * time.Minute
|
||||
// Auto-resolve confidence: best must be >= this factor × second-best.
|
||||
affinityConfidenceRatio = 3.0
|
||||
// Minimum observation count to auto-resolve.
|
||||
@@ -130,6 +130,17 @@ func BuildFromStore(store *PacketStore) *NeighborGraph {
|
||||
return BuildFromStoreWithLog(store, false)
|
||||
}
|
||||
|
||||
// cachedToLower returns strings.ToLower(s), caching results to avoid
|
||||
// repeated allocations for the same pubkey string.
|
||||
func cachedToLower(cache map[string]string, s string) string {
|
||||
if v, ok := cache[s]; ok {
|
||||
return v
|
||||
}
|
||||
v := strings.ToLower(s)
|
||||
cache[s] = v
|
||||
return v
|
||||
}
|
||||
|
||||
// BuildFromStoreWithLog constructs the neighbor graph, optionally logging disambiguation decisions.
|
||||
func BuildFromStoreWithLog(store *PacketStore, enableLog bool) *NeighborGraph {
|
||||
g := NewNeighborGraph()
|
||||
@@ -149,30 +160,27 @@ func BuildFromStoreWithLog(store *PacketStore, enableLog bool) *NeighborGraph {
|
||||
// Use cached nodes+PM (avoids DB call if cache is fresh).
|
||||
_, pm := store.getCachedNodesAndPM()
|
||||
|
||||
// Local cache for strings.ToLower — pubkeys are immutable and repeat
|
||||
// across hundreds of thousands of observations.
|
||||
lowerCache := make(map[string]string, 256)
|
||||
|
||||
// Phase 1: Extract edges from every transmission + observation.
|
||||
for _, tx := range packets {
|
||||
isAdvert := tx.PayloadType != nil && *tx.PayloadType == 4
|
||||
fromNode := "" // originator pubkey (from byNode index key)
|
||||
// Find the originator pubkey — it's the key in store.byNode.
|
||||
// StoreTx doesn't store from_node directly; we find it via decoded JSON
|
||||
// or the byNode index. However, iterating byNode is expensive.
|
||||
// The originator pubkey is in the decoded JSON "from_node" field,
|
||||
// but parsing JSON per tx is expensive too.
|
||||
// Actually, let's look at how byNode is keyed.
|
||||
// Looking at store.go, byNode maps pubkey → transmissions where that
|
||||
// pubkey is the "from" node. We need the reverse: tx → from_node.
|
||||
// The from_node is embedded in DecodedJSON.
|
||||
// For efficiency, let's extract it once.
|
||||
fromNode = extractFromNode(tx)
|
||||
fromNode := extractFromNode(tx)
|
||||
// Pre-compute lowered originator once per tx (not per observation).
|
||||
fromLower := ""
|
||||
if fromNode != "" {
|
||||
fromLower = cachedToLower(lowerCache, fromNode)
|
||||
}
|
||||
|
||||
for _, obs := range tx.Observations {
|
||||
path := parsePathJSON(obs.PathJSON)
|
||||
observerPK := strings.ToLower(obs.ObserverID)
|
||||
observerPK := cachedToLower(lowerCache, obs.ObserverID)
|
||||
|
||||
if len(path) == 0 {
|
||||
// Zero-hop
|
||||
if isAdvert && fromNode != "" {
|
||||
fromLower := strings.ToLower(fromNode)
|
||||
if isAdvert && fromLower != "" {
|
||||
if fromLower != observerPK { // self-edge guard
|
||||
g.upsertEdge(fromLower, observerPK, "", observerPK, obs.SNR, parseTimestamp(obs.Timestamp))
|
||||
}
|
||||
@@ -181,20 +189,19 @@ func BuildFromStoreWithLog(store *PacketStore, enableLog bool) *NeighborGraph {
|
||||
}
|
||||
|
||||
// Edge 1: originator ↔ path[0] — ADVERTs only
|
||||
if isAdvert && fromNode != "" {
|
||||
firstHop := strings.ToLower(path[0])
|
||||
fromLower := strings.ToLower(fromNode)
|
||||
if isAdvert && fromLower != "" {
|
||||
firstHop := cachedToLower(lowerCache, path[0])
|
||||
if fromLower != firstHop { // self-edge guard (shouldn't happen but spec says check)
|
||||
candidates := pm.m[firstHop]
|
||||
g.upsertEdgeWithCandidates(fromLower, firstHop, candidates, observerPK, obs.SNR, parseTimestamp(obs.Timestamp))
|
||||
g.upsertEdgeWithCandidates(fromLower, firstHop, candidates, observerPK, obs.SNR, parseTimestamp(obs.Timestamp), lowerCache)
|
||||
}
|
||||
}
|
||||
|
||||
// Edge 2: observer ↔ path[last] — ALL packet types
|
||||
lastHop := strings.ToLower(path[len(path)-1])
|
||||
lastHop := cachedToLower(lowerCache, path[len(path)-1])
|
||||
if observerPK != lastHop { // self-edge guard
|
||||
candidates := pm.m[lastHop]
|
||||
g.upsertEdgeWithCandidates(observerPK, lastHop, candidates, observerPK, obs.SNR, parseTimestamp(obs.Timestamp))
|
||||
g.upsertEdgeWithCandidates(observerPK, lastHop, candidates, observerPK, obs.SNR, parseTimestamp(obs.Timestamp), lowerCache)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -211,12 +218,10 @@ func BuildFromStoreWithLog(store *PacketStore, enableLog bool) *NeighborGraph {
|
||||
|
||||
// extractFromNode pulls the originator pubkey from a StoreTx's DecodedJSON.
|
||||
// ADVERTs use "pubKey", other packets may use "from_node" or "from".
|
||||
// Uses the cached ParsedDecoded() accessor to avoid repeated json.Unmarshal.
|
||||
func extractFromNode(tx *StoreTx) string {
|
||||
if tx.DecodedJSON == "" {
|
||||
return ""
|
||||
}
|
||||
var decoded map[string]interface{}
|
||||
if err := jsonUnmarshalFast(tx.DecodedJSON, &decoded); err != nil {
|
||||
decoded := tx.ParsedDecoded()
|
||||
if decoded == nil {
|
||||
return ""
|
||||
}
|
||||
// ADVERTs store the originator pubkey as "pubKey"; other packets may use
|
||||
@@ -275,9 +280,9 @@ func (g *NeighborGraph) upsertEdge(pubkeyA, pubkeyB, prefix, observer string, sn
|
||||
}
|
||||
|
||||
// upsertEdgeWithCandidates handles prefix-based edges that may be ambiguous.
|
||||
func (g *NeighborGraph) upsertEdgeWithCandidates(knownPK, prefix string, candidates []nodeInfo, observer string, snr *float64, ts time.Time) {
|
||||
func (g *NeighborGraph) upsertEdgeWithCandidates(knownPK, prefix string, candidates []nodeInfo, observer string, snr *float64, ts time.Time, lc map[string]string) {
|
||||
if len(candidates) == 1 {
|
||||
resolved := strings.ToLower(candidates[0].PublicKey)
|
||||
resolved := cachedToLower(lc, candidates[0].PublicKey)
|
||||
if resolved == knownPK {
|
||||
return // self-edge guard
|
||||
}
|
||||
@@ -288,7 +293,7 @@ func (g *NeighborGraph) upsertEdgeWithCandidates(knownPK, prefix string, candida
|
||||
// Filter out self from candidates
|
||||
filtered := make([]string, 0, len(candidates))
|
||||
for _, c := range candidates {
|
||||
pk := strings.ToLower(c.PublicKey)
|
||||
pk := cachedToLower(lc, c.PublicKey)
|
||||
if pk != knownPK {
|
||||
filtered = append(filtered, pk)
|
||||
}
|
||||
|
||||
@@ -717,3 +717,120 @@ func TestNeighborGraph_CacheTTL(t *testing.T) {
|
||||
t.Error("old graph should be stale")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNeighborGraph_TTLIsReasonable(t *testing.T) {
|
||||
// TTL must be long enough to avoid rebuild storms on busy meshes,
|
||||
// but short enough to reflect topology changes within minutes.
|
||||
if neighborGraphTTL < 1*time.Minute {
|
||||
t.Errorf("neighborGraphTTL too short (%v), will cause rebuild storms", neighborGraphTTL)
|
||||
}
|
||||
if neighborGraphTTL > 10*time.Minute {
|
||||
t.Errorf("neighborGraphTTL too long (%v), topology changes will be stale", neighborGraphTTL)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCachedToLower(t *testing.T) {
|
||||
cache := make(map[string]string)
|
||||
// Basic lowercasing
|
||||
if got := cachedToLower(cache, "AABB"); got != "aabb" {
|
||||
t.Errorf("expected 'aabb', got %q", got)
|
||||
}
|
||||
// Verify it was cached
|
||||
if _, ok := cache["AABB"]; !ok {
|
||||
t.Error("expected 'AABB' to be in cache")
|
||||
}
|
||||
// Same input returns cached result
|
||||
if got := cachedToLower(cache, "AABB"); got != "aabb" {
|
||||
t.Errorf("expected cached 'aabb', got %q", got)
|
||||
}
|
||||
// Already lowercase stays the same
|
||||
if got := cachedToLower(cache, "aabb"); got != "aabb" {
|
||||
t.Errorf("expected 'aabb', got %q", got)
|
||||
}
|
||||
// Empty string
|
||||
if got := cachedToLower(cache, ""); got != "" {
|
||||
t.Errorf("expected empty, got %q", got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsedDecoded_Caching(t *testing.T) {
|
||||
tx := &StoreTx{DecodedJSON: `{"pubKey":"abc123","name":"test"}`}
|
||||
// First call parses
|
||||
d1 := tx.ParsedDecoded()
|
||||
if d1 == nil {
|
||||
t.Fatal("expected non-nil parsed result")
|
||||
}
|
||||
if d1["pubKey"] != "abc123" {
|
||||
t.Errorf("expected pubKey=abc123, got %v", d1["pubKey"])
|
||||
}
|
||||
// Second call must return the exact same map (pointer equality proves caching)
|
||||
d2 := tx.ParsedDecoded()
|
||||
if &d1 == nil || &d2 == nil {
|
||||
t.Fatal("unexpected nil")
|
||||
}
|
||||
// Mutate d1 and verify d2 sees the mutation — proves same underlying map
|
||||
d1["_sentinel"] = true
|
||||
if d2["_sentinel"] != true {
|
||||
t.Error("expected same map instance from second call (caching broken)")
|
||||
}
|
||||
delete(d1, "_sentinel") // clean up
|
||||
}
|
||||
|
||||
func TestParsedDecoded_EmptyJSON(t *testing.T) {
|
||||
tx := &StoreTx{DecodedJSON: ""}
|
||||
d := tx.ParsedDecoded()
|
||||
if d != nil {
|
||||
t.Errorf("expected nil for empty DecodedJSON, got %v", d)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsedDecoded_InvalidJSON(t *testing.T) {
|
||||
tx := &StoreTx{DecodedJSON: "not json"}
|
||||
d := tx.ParsedDecoded()
|
||||
if d != nil {
|
||||
t.Errorf("expected nil for invalid JSON, got %v", d)
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtractFromNode_UsesCachedParse(t *testing.T) {
|
||||
tx := &StoreTx{DecodedJSON: `{"pubKey":"aabb1122"}`}
|
||||
// First call to extractFromNode should use ParsedDecoded
|
||||
from := extractFromNode(tx)
|
||||
if from != "aabb1122" {
|
||||
t.Errorf("expected aabb1122, got %q", from)
|
||||
}
|
||||
// ParsedDecoded should now be cached
|
||||
d := tx.ParsedDecoded()
|
||||
if d == nil || d["pubKey"] != "aabb1122" {
|
||||
t.Error("expected ParsedDecoded to return cached result")
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkBuildFromStore(b *testing.B) {
|
||||
// Simulate a dataset with many packets and repeated pubkeys
|
||||
nodes := []nodeInfo{
|
||||
{PublicKey: "aaaa1111", Name: "NodeA"},
|
||||
{PublicKey: "bbbb2222", Name: "NodeB"},
|
||||
{PublicKey: "cccc3333", Name: "NodeC"},
|
||||
{PublicKey: "dddd4444", Name: "NodeD"},
|
||||
}
|
||||
const numPackets = 1000
|
||||
packets := make([]*StoreTx, 0, numPackets)
|
||||
for i := 0; i < numPackets; i++ {
|
||||
pt := 4 // ADVERT
|
||||
packets = append(packets, &StoreTx{
|
||||
ID: i,
|
||||
PayloadType: &pt,
|
||||
DecodedJSON: `{"pubKey":"aaaa1111"}`,
|
||||
Observations: []*StoreObs{
|
||||
{ObserverID: "bbbb2222", PathJSON: `["cccc"]`, Timestamp: nowStr, SNR: ngFloatPtr(-5.0)},
|
||||
},
|
||||
})
|
||||
}
|
||||
store := ngTestStore(nodes, packets)
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
BuildFromStore(store)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,531 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// persistSem limits concurrent async persistence goroutines to 1.
|
||||
// Without this, each ingest cycle spawns a goroutine that opens a new
|
||||
// SQLite RW connection; under sustained load goroutines pile up with
|
||||
// no backpressure, causing contention and busy-timeout cascades.
|
||||
var persistSem = make(chan struct{}, 1)
|
||||
|
||||
// ─── neighbor_edges table ──────────────────────────────────────────────────────
|
||||
|
||||
// ensureNeighborEdgesTable creates the neighbor_edges table if it doesn't exist.
|
||||
// Uses a separate read-write connection since the main DB is read-only.
|
||||
func ensureNeighborEdgesTable(dbPath string) error {
|
||||
rw, err := openRW(dbPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("open rw for neighbor_edges: %w", err)
|
||||
}
|
||||
defer rw.Close()
|
||||
|
||||
_, err = rw.Exec(`CREATE TABLE IF NOT EXISTS neighbor_edges (
|
||||
node_a TEXT NOT NULL,
|
||||
node_b TEXT NOT NULL,
|
||||
count INTEGER DEFAULT 1,
|
||||
last_seen TEXT,
|
||||
PRIMARY KEY (node_a, node_b)
|
||||
)`)
|
||||
return err
|
||||
}
|
||||
|
||||
// loadNeighborEdgesFromDB loads all edges from the neighbor_edges table
|
||||
// and builds an in-memory NeighborGraph.
|
||||
func loadNeighborEdgesFromDB(conn *sql.DB) *NeighborGraph {
|
||||
g := NewNeighborGraph()
|
||||
|
||||
rows, err := conn.Query("SELECT node_a, node_b, count, last_seen FROM neighbor_edges")
|
||||
if err != nil {
|
||||
log.Printf("[neighbor] failed to load neighbor_edges: %v", err)
|
||||
return g
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
count := 0
|
||||
for rows.Next() {
|
||||
var a, b string
|
||||
var cnt int
|
||||
var lastSeen sql.NullString
|
||||
if err := rows.Scan(&a, &b, &cnt, &lastSeen); err != nil {
|
||||
continue
|
||||
}
|
||||
ts := time.Time{}
|
||||
if lastSeen.Valid {
|
||||
ts = parseTimestamp(lastSeen.String)
|
||||
}
|
||||
// Build edge directly (both nodes are full pubkeys from persisted data)
|
||||
key := makeEdgeKey(a, b)
|
||||
g.mu.Lock()
|
||||
e, exists := g.edges[key]
|
||||
if !exists {
|
||||
e = &NeighborEdge{
|
||||
NodeA: key.A,
|
||||
NodeB: key.B,
|
||||
Observers: make(map[string]bool),
|
||||
FirstSeen: ts,
|
||||
LastSeen: ts,
|
||||
Count: cnt,
|
||||
}
|
||||
g.edges[key] = e
|
||||
g.byNode[key.A] = append(g.byNode[key.A], e)
|
||||
g.byNode[key.B] = append(g.byNode[key.B], e)
|
||||
} else {
|
||||
e.Count += cnt
|
||||
if ts.After(e.LastSeen) {
|
||||
e.LastSeen = ts
|
||||
}
|
||||
}
|
||||
g.mu.Unlock()
|
||||
count++
|
||||
}
|
||||
|
||||
if count > 0 {
|
||||
g.mu.Lock()
|
||||
g.builtAt = time.Now()
|
||||
g.mu.Unlock()
|
||||
log.Printf("[neighbor] loaded %d edges from neighbor_edges table", count)
|
||||
}
|
||||
|
||||
return g
|
||||
}
|
||||
|
||||
// ─── shared async persistence helper ───────────────────────────────────────────
|
||||
|
||||
// persistObsUpdate holds data for a resolved_path SQLite update.
|
||||
type persistObsUpdate struct {
|
||||
obsID int
|
||||
resolvedPath string
|
||||
}
|
||||
|
||||
// persistEdgeUpdate holds data for a neighbor_edges SQLite upsert.
|
||||
type persistEdgeUpdate struct {
|
||||
a, b, ts string
|
||||
}
|
||||
|
||||
// asyncPersistResolvedPathsAndEdges writes resolved_path updates and neighbor
|
||||
// edge upserts to SQLite in a background goroutine. Shared between
|
||||
// IngestNewFromDB and IngestNewObservations to avoid DRY violation.
|
||||
func asyncPersistResolvedPathsAndEdges(dbPath string, obsUpdates []persistObsUpdate, edgeUpdates []persistEdgeUpdate, logPrefix string) {
|
||||
if len(obsUpdates) == 0 && len(edgeUpdates) == 0 {
|
||||
return
|
||||
}
|
||||
// Try-acquire semaphore BEFORE spawning goroutine. If another
|
||||
// persistence operation is already running, drop this batch —
|
||||
// data lives in memory and will be backfilled on restart.
|
||||
select {
|
||||
case persistSem <- struct{}{}:
|
||||
// Acquired — spawn goroutine to do the work.
|
||||
default:
|
||||
log.Printf("[store] %s skipped: persistence already in progress", logPrefix)
|
||||
return
|
||||
}
|
||||
go func() {
|
||||
defer func() { <-persistSem }()
|
||||
|
||||
rw, err := openRW(dbPath)
|
||||
if err != nil {
|
||||
log.Printf("[store] %s rw open error: %v", logPrefix, err)
|
||||
return
|
||||
}
|
||||
defer rw.Close()
|
||||
|
||||
if len(obsUpdates) > 0 {
|
||||
sqlTx, err := rw.Begin()
|
||||
if err == nil {
|
||||
stmt, err := sqlTx.Prepare("UPDATE observations SET resolved_path = ? WHERE id = ?")
|
||||
if err == nil {
|
||||
var firstErr error
|
||||
for _, u := range obsUpdates {
|
||||
if _, err := stmt.Exec(u.resolvedPath, u.obsID); err != nil && firstErr == nil {
|
||||
firstErr = err
|
||||
}
|
||||
}
|
||||
stmt.Close()
|
||||
if firstErr != nil {
|
||||
log.Printf("[store] %s resolved_path error (first): %v", logPrefix, firstErr)
|
||||
}
|
||||
} else {
|
||||
log.Printf("[store] %s resolved_path prepare error: %v", logPrefix, err)
|
||||
}
|
||||
sqlTx.Commit()
|
||||
}
|
||||
}
|
||||
|
||||
if len(edgeUpdates) > 0 {
|
||||
sqlTx, err := rw.Begin()
|
||||
if err == nil {
|
||||
stmt, err := sqlTx.Prepare(`INSERT INTO neighbor_edges (node_a, node_b, count, last_seen)
|
||||
VALUES (?, ?, 1, ?)
|
||||
ON CONFLICT(node_a, node_b) DO UPDATE SET
|
||||
count = count + 1, last_seen = MAX(last_seen, excluded.last_seen)`)
|
||||
if err == nil {
|
||||
var firstErr error
|
||||
for _, e := range edgeUpdates {
|
||||
if _, err := stmt.Exec(e.a, e.b, e.ts); err != nil && firstErr == nil {
|
||||
firstErr = err
|
||||
}
|
||||
}
|
||||
stmt.Close()
|
||||
if firstErr != nil {
|
||||
log.Printf("[store] %s edge error (first): %v", logPrefix, firstErr)
|
||||
}
|
||||
} else {
|
||||
log.Printf("[store] %s edge prepare error: %v", logPrefix, err)
|
||||
}
|
||||
sqlTx.Commit()
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// neighborEdgesTableExists checks if the neighbor_edges table has any data.
|
||||
func neighborEdgesTableExists(conn *sql.DB) bool {
|
||||
var cnt int
|
||||
err := conn.QueryRow("SELECT COUNT(*) FROM neighbor_edges").Scan(&cnt)
|
||||
if err != nil {
|
||||
return false // table doesn't exist
|
||||
}
|
||||
return cnt > 0
|
||||
}
|
||||
|
||||
// buildAndPersistEdges scans all packets in the store, extracts edges per
|
||||
// ADVERT/non-ADVERT rules, and persists them to SQLite.
|
||||
func buildAndPersistEdges(store *PacketStore, rw *sql.DB) int {
|
||||
store.mu.RLock()
|
||||
packets := make([]*StoreTx, len(store.packets))
|
||||
copy(packets, store.packets)
|
||||
store.mu.RUnlock()
|
||||
|
||||
_, pm := store.getCachedNodesAndPM()
|
||||
|
||||
tx, err := rw.Begin()
|
||||
if err != nil {
|
||||
log.Printf("[neighbor] begin tx error: %v", err)
|
||||
return 0
|
||||
}
|
||||
defer tx.Rollback()
|
||||
|
||||
stmt, err := tx.Prepare(`INSERT INTO neighbor_edges (node_a, node_b, count, last_seen)
|
||||
VALUES (?, ?, 1, ?)
|
||||
ON CONFLICT(node_a, node_b) DO UPDATE SET
|
||||
count = count + 1, last_seen = MAX(last_seen, excluded.last_seen)`)
|
||||
if err != nil {
|
||||
log.Printf("[neighbor] prepare stmt error: %v", err)
|
||||
return 0
|
||||
}
|
||||
defer stmt.Close()
|
||||
|
||||
edgeCount := 0
|
||||
var firstErr error
|
||||
for _, pkt := range packets {
|
||||
for _, obs := range pkt.Observations {
|
||||
for _, ec := range extractEdgesFromObs(obs, pkt, pm) {
|
||||
if _, err := stmt.Exec(ec.A, ec.B, ec.Timestamp); err != nil && firstErr == nil {
|
||||
firstErr = err
|
||||
}
|
||||
edgeCount++
|
||||
}
|
||||
}
|
||||
}
|
||||
if firstErr != nil {
|
||||
log.Printf("[neighbor] edge exec error (first): %v", firstErr)
|
||||
}
|
||||
|
||||
if err := tx.Commit(); err != nil {
|
||||
log.Printf("[neighbor] commit error: %v", err)
|
||||
return 0
|
||||
}
|
||||
return edgeCount
|
||||
}
|
||||
|
||||
// ─── resolved_path column ──────────────────────────────────────────────────────
|
||||
|
||||
// ensureResolvedPathColumn adds the resolved_path column to observations if missing.
|
||||
func ensureResolvedPathColumn(dbPath string) error {
|
||||
rw, err := openRW(dbPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer rw.Close()
|
||||
|
||||
// Check if column already exists
|
||||
rows, err := rw.Query("PRAGMA table_info(observations)")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
for rows.Next() {
|
||||
var cid int
|
||||
var colName string
|
||||
var colType sql.NullString
|
||||
var notNull, pk int
|
||||
var dflt sql.NullString
|
||||
if rows.Scan(&cid, &colName, &colType, ¬Null, &dflt, &pk) == nil && colName == "resolved_path" {
|
||||
return nil // already exists
|
||||
}
|
||||
}
|
||||
|
||||
_, err = rw.Exec("ALTER TABLE observations ADD COLUMN resolved_path TEXT")
|
||||
if err != nil {
|
||||
return fmt.Errorf("add resolved_path column: %w", err)
|
||||
}
|
||||
log.Println("[store] Added resolved_path column to observations")
|
||||
return nil
|
||||
}
|
||||
|
||||
// resolvePathForObs resolves hop prefixes to full pubkeys for an observation.
|
||||
// Returns nil if path is empty.
|
||||
func resolvePathForObs(pathJSON, observerID string, tx *StoreTx, pm *prefixMap, graph *NeighborGraph) []*string {
|
||||
hops := parsePathJSON(pathJSON)
|
||||
if len(hops) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Build context pubkeys: observer + originator (if known)
|
||||
contextPKs := make([]string, 0, 3)
|
||||
if observerID != "" {
|
||||
contextPKs = append(contextPKs, strings.ToLower(observerID))
|
||||
}
|
||||
fromNode := extractFromNode(tx)
|
||||
if fromNode != "" {
|
||||
contextPKs = append(contextPKs, strings.ToLower(fromNode))
|
||||
}
|
||||
|
||||
resolved := make([]*string, len(hops))
|
||||
for i, hop := range hops {
|
||||
// Add adjacent hops as context for disambiguation
|
||||
ctx := make([]string, len(contextPKs), len(contextPKs)+2)
|
||||
copy(ctx, contextPKs)
|
||||
// Add previously resolved hops as context
|
||||
if i > 0 && resolved[i-1] != nil {
|
||||
ctx = append(ctx, *resolved[i-1])
|
||||
}
|
||||
|
||||
node, _, _ := pm.resolveWithContext(hop, ctx, graph)
|
||||
if node != nil {
|
||||
pk := strings.ToLower(node.PublicKey)
|
||||
resolved[i] = &pk
|
||||
}
|
||||
}
|
||||
|
||||
return resolved
|
||||
}
|
||||
|
||||
// marshalResolvedPath converts []*string to JSON for storage.
|
||||
func marshalResolvedPath(rp []*string) string {
|
||||
if len(rp) == 0 {
|
||||
return ""
|
||||
}
|
||||
b, err := json.Marshal(rp)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return string(b)
|
||||
}
|
||||
|
||||
// unmarshalResolvedPath parses a resolved_path JSON string.
|
||||
func unmarshalResolvedPath(s string) []*string {
|
||||
if s == "" {
|
||||
return nil
|
||||
}
|
||||
var result []*string
|
||||
if json.Unmarshal([]byte(s), &result) != nil {
|
||||
return nil
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// backfillResolvedPaths resolves paths for all observations that have NULL resolved_path.
|
||||
func backfillResolvedPaths(store *PacketStore, dbPath string) int {
|
||||
// Collect pending observations and snapshot immutable fields under read lock.
|
||||
// graph is set in main.go before backfill is called; nil-safe throughout (review item #6).
|
||||
type obsRef struct {
|
||||
obsID int
|
||||
pathJSON string
|
||||
observerID string
|
||||
txJSON string // snapshot of DecodedJSON for extractFromNode
|
||||
payloadType *int
|
||||
}
|
||||
store.mu.RLock()
|
||||
pm := store.nodePM
|
||||
graph := store.graph
|
||||
var pending []obsRef
|
||||
for _, tx := range store.packets {
|
||||
for _, obs := range tx.Observations {
|
||||
if obs.ResolvedPath == nil && obs.PathJSON != "" && obs.PathJSON != "[]" {
|
||||
pending = append(pending, obsRef{
|
||||
obsID: obs.ID,
|
||||
pathJSON: obs.PathJSON,
|
||||
observerID: obs.ObserverID,
|
||||
txJSON: tx.DecodedJSON,
|
||||
payloadType: tx.PayloadType,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
store.mu.RUnlock()
|
||||
|
||||
if len(pending) == 0 || pm == nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
// Resolve paths outside the lock — resolvePathForObs only reads pm and graph.
|
||||
type resolved struct {
|
||||
obsID int
|
||||
rp []*string
|
||||
rpJSON string
|
||||
}
|
||||
var results []resolved
|
||||
for _, ref := range pending {
|
||||
// Build a minimal StoreTx for extractFromNode (only needs DecodedJSON + PayloadType).
|
||||
fakeTx := &StoreTx{DecodedJSON: ref.txJSON, PayloadType: ref.payloadType}
|
||||
rp := resolvePathForObs(ref.pathJSON, ref.observerID, fakeTx, pm, graph)
|
||||
if len(rp) > 0 {
|
||||
rpJSON := marshalResolvedPath(rp)
|
||||
if rpJSON != "" {
|
||||
results = append(results, resolved{ref.obsID, rp, rpJSON})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(results) == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
// Persist to SQLite (no lock needed — separate RW connection).
|
||||
rw, err := openRW(dbPath)
|
||||
if err != nil {
|
||||
log.Printf("[store] backfill: open rw error: %v", err)
|
||||
return 0
|
||||
}
|
||||
defer rw.Close()
|
||||
|
||||
sqlTx, err := rw.Begin()
|
||||
if err != nil {
|
||||
log.Printf("[store] backfill: begin tx error: %v", err)
|
||||
return 0
|
||||
}
|
||||
defer sqlTx.Rollback()
|
||||
|
||||
stmt, err := sqlTx.Prepare("UPDATE observations SET resolved_path = ? WHERE id = ?")
|
||||
if err != nil {
|
||||
log.Printf("[store] backfill: prepare error: %v", err)
|
||||
return 0
|
||||
}
|
||||
defer stmt.Close()
|
||||
|
||||
var firstErr error
|
||||
for _, r := range results {
|
||||
if _, err := stmt.Exec(r.rpJSON, r.obsID); err != nil && firstErr == nil {
|
||||
firstErr = err
|
||||
}
|
||||
}
|
||||
if firstErr != nil {
|
||||
log.Printf("[store] backfill resolved_path exec error (first): %v", firstErr)
|
||||
}
|
||||
|
||||
if err := sqlTx.Commit(); err != nil {
|
||||
log.Printf("[store] backfill: commit error: %v", err)
|
||||
return 0
|
||||
}
|
||||
|
||||
// Update in-memory state under write lock.
|
||||
store.mu.Lock()
|
||||
count := 0
|
||||
for _, r := range results {
|
||||
if obs, ok := store.byObsID[r.obsID]; ok {
|
||||
obs.ResolvedPath = r.rp
|
||||
count++
|
||||
}
|
||||
}
|
||||
store.mu.Unlock()
|
||||
|
||||
return count
|
||||
}
|
||||
|
||||
// ─── Shared helpers ────────────────────────────────────────────────────────────
|
||||
|
||||
// edgeCandidate represents an extracted edge to be persisted.
|
||||
type edgeCandidate struct {
|
||||
A, B, Timestamp string
|
||||
}
|
||||
|
||||
// extractEdgesFromObs extracts neighbor edge candidates from a single observation.
|
||||
// For ADVERTs: originator↔path[0] (if unambiguous). For ALL types: observer↔path[last] (if unambiguous).
|
||||
// Also handles zero-hop ADVERTs (originator↔observer direct link).
|
||||
func extractEdgesFromObs(obs *StoreObs, tx *StoreTx, pm *prefixMap) []edgeCandidate {
|
||||
isAdvert := tx.PayloadType != nil && *tx.PayloadType == 4
|
||||
fromNode := extractFromNode(tx)
|
||||
path := parsePathJSON(obs.PathJSON)
|
||||
observerPK := strings.ToLower(obs.ObserverID)
|
||||
ts := obs.Timestamp
|
||||
var edges []edgeCandidate
|
||||
|
||||
if len(path) == 0 {
|
||||
if isAdvert && fromNode != "" {
|
||||
fromLower := strings.ToLower(fromNode)
|
||||
if fromLower != observerPK {
|
||||
a, b := fromLower, observerPK
|
||||
if a > b {
|
||||
a, b = b, a
|
||||
}
|
||||
edges = append(edges, edgeCandidate{a, b, ts})
|
||||
}
|
||||
}
|
||||
return edges
|
||||
}
|
||||
|
||||
// Edge 1: originator ↔ path[0] — ADVERTs only (resolve prefix to full pubkey)
|
||||
if isAdvert && fromNode != "" && pm != nil {
|
||||
firstHop := strings.ToLower(path[0])
|
||||
fromLower := strings.ToLower(fromNode)
|
||||
candidates := pm.m[firstHop]
|
||||
if len(candidates) == 1 {
|
||||
resolved := strings.ToLower(candidates[0].PublicKey)
|
||||
if resolved != fromLower {
|
||||
a, b := fromLower, resolved
|
||||
if a > b {
|
||||
a, b = b, a
|
||||
}
|
||||
edges = append(edges, edgeCandidate{a, b, ts})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Edge 2: observer ↔ path[last] — ALL packet types
|
||||
if pm != nil {
|
||||
lastHop := strings.ToLower(path[len(path)-1])
|
||||
candidates := pm.m[lastHop]
|
||||
if len(candidates) == 1 {
|
||||
resolved := strings.ToLower(candidates[0].PublicKey)
|
||||
if resolved != observerPK {
|
||||
a, b := observerPK, resolved
|
||||
if a > b {
|
||||
a, b = b, a
|
||||
}
|
||||
edges = append(edges, edgeCandidate{a, b, ts})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return edges
|
||||
}
|
||||
|
||||
// openRW opens a read-write SQLite connection (same pattern as PruneOldPackets).
|
||||
func openRW(dbPath string) (*sql.DB, error) {
|
||||
dsn := fmt.Sprintf("file:%s?_journal_mode=WAL&_busy_timeout=10000", dbPath)
|
||||
rw, err := sql.Open("sqlite", dsn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rw.SetMaxOpenConns(1)
|
||||
return rw, nil
|
||||
}
|
||||
@@ -0,0 +1,534 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
_ "modernc.org/sqlite"
|
||||
)
|
||||
|
||||
// createTestDBWithSchema creates a temp SQLite DB with the standard schema + resolved_path column.
|
||||
func createTestDBWithSchema(t *testing.T) (*DB, string) {
|
||||
t.Helper()
|
||||
dir := t.TempDir()
|
||||
dbPath := filepath.Join(dir, "test.db")
|
||||
|
||||
conn, err := sql.Open("sqlite", "file:"+dbPath+"?_journal_mode=WAL")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Create tables
|
||||
conn.Exec(`CREATE TABLE transmissions (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
raw_hex TEXT, hash TEXT UNIQUE, first_seen TEXT,
|
||||
route_type INTEGER, payload_type INTEGER, payload_version INTEGER,
|
||||
decoded_json TEXT
|
||||
)`)
|
||||
conn.Exec(`CREATE TABLE observers (
|
||||
id TEXT PRIMARY KEY, name TEXT, iata TEXT
|
||||
)`)
|
||||
conn.Exec(`CREATE TABLE observations (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
transmission_id INTEGER NOT NULL REFERENCES transmissions(id),
|
||||
observer_id TEXT, observer_name TEXT, direction TEXT,
|
||||
snr REAL, rssi REAL, score INTEGER,
|
||||
path_json TEXT, timestamp TEXT,
|
||||
resolved_path TEXT
|
||||
)`)
|
||||
conn.Exec(`CREATE TABLE nodes (
|
||||
public_key TEXT PRIMARY KEY, name TEXT, role TEXT,
|
||||
lat REAL, lon REAL, last_seen TEXT, first_seen TEXT,
|
||||
advert_count INTEGER DEFAULT 0
|
||||
)`)
|
||||
|
||||
conn.Close()
|
||||
|
||||
db, err := OpenDB(dbPath)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return db, dbPath
|
||||
}
|
||||
|
||||
func TestResolvePathForObs(t *testing.T) {
|
||||
// Build a prefix map with known nodes
|
||||
nodes := []nodeInfo{
|
||||
{PublicKey: "aabbccddee1234567890aabbccddee1234567890aabbccddee1234567890aabb", Name: "Node-AA"},
|
||||
{PublicKey: "bbccddee1234567890aabbccddee1234567890aabbccddee1234567890aabb11", Name: "Node-BB"},
|
||||
}
|
||||
pm := buildPrefixMap(nodes)
|
||||
graph := NewNeighborGraph()
|
||||
|
||||
tx := &StoreTx{
|
||||
DecodedJSON: `{"pubKey": "originator1234567890"}`,
|
||||
PayloadType: intPtr(4),
|
||||
}
|
||||
|
||||
// Unambiguous prefixes should resolve
|
||||
rp := resolvePathForObs(`["aa","bb"]`, "observer1", tx, pm, graph)
|
||||
if len(rp) != 2 {
|
||||
t.Fatalf("expected 2 resolved hops, got %d", len(rp))
|
||||
}
|
||||
if rp[0] == nil || !strings.HasPrefix(*rp[0], "aabbcc") {
|
||||
t.Errorf("expected first hop to resolve to Node-AA, got %v", rp[0])
|
||||
}
|
||||
if rp[1] == nil || !strings.HasPrefix(*rp[1], "bbccdd") {
|
||||
t.Errorf("expected second hop to resolve to Node-BB, got %v", rp[1])
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolvePathForObs_EmptyPath(t *testing.T) {
|
||||
pm := buildPrefixMap(nil)
|
||||
rp := resolvePathForObs(`[]`, "", &StoreTx{}, pm, nil)
|
||||
if rp != nil {
|
||||
t.Errorf("expected nil for empty path, got %v", rp)
|
||||
}
|
||||
|
||||
rp = resolvePathForObs("", "", &StoreTx{}, pm, nil)
|
||||
if rp != nil {
|
||||
t.Errorf("expected nil for empty string, got %v", rp)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolvePathForObs_Unresolvable(t *testing.T) {
|
||||
nodes := []nodeInfo{
|
||||
{PublicKey: "aabbccddee1234567890aabbccddee1234567890aabbccddee1234567890aabb", Name: "Node-AA"},
|
||||
}
|
||||
pm := buildPrefixMap(nodes)
|
||||
|
||||
// "zz" prefix doesn't match any node
|
||||
rp := resolvePathForObs(`["zz"]`, "", &StoreTx{}, pm, nil)
|
||||
if len(rp) != 1 {
|
||||
t.Fatalf("expected 1 hop, got %d", len(rp))
|
||||
}
|
||||
if rp[0] != nil {
|
||||
t.Errorf("expected nil for unresolvable hop, got %v", *rp[0])
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalUnmarshalResolvedPath(t *testing.T) {
|
||||
pk1 := "aabbccdd"
|
||||
var rp []*string
|
||||
rp = append(rp, &pk1, nil)
|
||||
|
||||
j := marshalResolvedPath(rp)
|
||||
if j == "" {
|
||||
t.Fatal("expected non-empty JSON")
|
||||
}
|
||||
|
||||
parsed := unmarshalResolvedPath(j)
|
||||
if len(parsed) != 2 {
|
||||
t.Fatalf("expected 2 elements, got %d", len(parsed))
|
||||
}
|
||||
if parsed[0] == nil || *parsed[0] != "aabbccdd" {
|
||||
t.Errorf("first element wrong: %v", parsed[0])
|
||||
}
|
||||
if parsed[1] != nil {
|
||||
t.Errorf("second element should be nil, got %v", *parsed[1])
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalResolvedPath_Empty(t *testing.T) {
|
||||
if marshalResolvedPath(nil) != "" {
|
||||
t.Error("expected empty for nil")
|
||||
}
|
||||
if marshalResolvedPath([]*string{}) != "" {
|
||||
t.Error("expected empty for empty slice")
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnmarshalResolvedPath_Invalid(t *testing.T) {
|
||||
if unmarshalResolvedPath("") != nil {
|
||||
t.Error("expected nil for empty string")
|
||||
}
|
||||
if unmarshalResolvedPath("not json") != nil {
|
||||
t.Error("expected nil for invalid JSON")
|
||||
}
|
||||
}
|
||||
|
||||
func TestEnsureNeighborEdgesTable(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
dbPath := filepath.Join(dir, "test.db")
|
||||
|
||||
// Create initial DB
|
||||
conn, _ := sql.Open("sqlite", "file:"+dbPath+"?_journal_mode=WAL")
|
||||
conn.Exec("CREATE TABLE test (id INTEGER PRIMARY KEY)")
|
||||
conn.Close()
|
||||
|
||||
if err := ensureNeighborEdgesTable(dbPath); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Verify table exists
|
||||
conn, _ = sql.Open("sqlite", "file:"+dbPath+"?mode=ro")
|
||||
defer conn.Close()
|
||||
var cnt int
|
||||
if err := conn.QueryRow("SELECT COUNT(*) FROM neighbor_edges").Scan(&cnt); err != nil {
|
||||
t.Fatalf("neighbor_edges table not created: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadNeighborEdgesFromDB(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
dbPath := filepath.Join(dir, "test.db")
|
||||
|
||||
conn, _ := sql.Open("sqlite", "file:"+dbPath+"?_journal_mode=WAL")
|
||||
conn.Exec(`CREATE TABLE neighbor_edges (
|
||||
node_a TEXT NOT NULL, node_b TEXT NOT NULL,
|
||||
count INTEGER DEFAULT 1, last_seen TEXT,
|
||||
PRIMARY KEY (node_a, node_b)
|
||||
)`)
|
||||
conn.Exec("INSERT INTO neighbor_edges VALUES ('aaa', 'bbb', 5, '2024-01-01T00:00:00Z')")
|
||||
conn.Exec("INSERT INTO neighbor_edges VALUES ('ccc', 'ddd', 3, '2024-01-02T00:00:00Z')")
|
||||
|
||||
g := loadNeighborEdgesFromDB(conn)
|
||||
conn.Close()
|
||||
|
||||
// Should have 2 edges
|
||||
edges := g.AllEdges()
|
||||
if len(edges) != 2 {
|
||||
t.Errorf("expected 2 edges, got %d", len(edges))
|
||||
}
|
||||
|
||||
// Check neighbors
|
||||
n := g.Neighbors("aaa")
|
||||
if len(n) != 1 {
|
||||
t.Errorf("expected 1 neighbor for aaa, got %d", len(n))
|
||||
}
|
||||
}
|
||||
|
||||
func TestStoreObsResolvedPathInBroadcast(t *testing.T) {
|
||||
// Verify resolved_path appears in broadcast maps
|
||||
pk := "aabbccdd"
|
||||
obs := &StoreObs{
|
||||
ID: 1,
|
||||
ObserverID: "obs1",
|
||||
ObserverName: "Observer 1",
|
||||
PathJSON: `["aa"]`,
|
||||
ResolvedPath: []*string{&pk},
|
||||
Timestamp: "2024-01-01T00:00:00Z",
|
||||
}
|
||||
|
||||
tx := &StoreTx{
|
||||
ID: 1,
|
||||
Hash: "abc123",
|
||||
Observations: []*StoreObs{obs},
|
||||
}
|
||||
pickBestObservation(tx)
|
||||
|
||||
if tx.ResolvedPath == nil {
|
||||
t.Fatal("expected ResolvedPath to be set on tx after pickBestObservation")
|
||||
}
|
||||
if *tx.ResolvedPath[0] != "aabbccdd" {
|
||||
t.Errorf("expected resolved path to be aabbccdd, got %s", *tx.ResolvedPath[0])
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolvedPathInTxToMap(t *testing.T) {
|
||||
pk := "aabbccdd"
|
||||
tx := &StoreTx{
|
||||
ID: 1,
|
||||
Hash: "abc123",
|
||||
PathJSON: `["aa"]`,
|
||||
ResolvedPath: []*string{&pk},
|
||||
obsKeys: make(map[string]bool),
|
||||
}
|
||||
|
||||
m := txToMap(tx)
|
||||
rp, ok := m["resolved_path"]
|
||||
if !ok {
|
||||
t.Fatal("resolved_path not in txToMap output")
|
||||
}
|
||||
rpSlice, ok := rp.([]*string)
|
||||
if !ok || len(rpSlice) != 1 || *rpSlice[0] != "aabbccdd" {
|
||||
t.Errorf("unexpected resolved_path: %v", rp)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolvedPathOmittedWhenNil(t *testing.T) {
|
||||
tx := &StoreTx{
|
||||
ID: 1,
|
||||
Hash: "abc123",
|
||||
obsKeys: make(map[string]bool),
|
||||
}
|
||||
|
||||
m := txToMap(tx)
|
||||
if _, ok := m["resolved_path"]; ok {
|
||||
t.Error("resolved_path should not be in map when nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestEnsureResolvedPathColumn(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
dbPath := filepath.Join(dir, "test.db")
|
||||
|
||||
conn, _ := sql.Open("sqlite", "file:"+dbPath+"?_journal_mode=WAL")
|
||||
conn.Exec(`CREATE TABLE observations (
|
||||
id INTEGER PRIMARY KEY, transmission_id INTEGER,
|
||||
observer_id TEXT, path_json TEXT, timestamp TEXT
|
||||
)`)
|
||||
conn.Close()
|
||||
|
||||
if err := ensureResolvedPathColumn(dbPath); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Verify column exists
|
||||
conn, _ = sql.Open("sqlite", "file:"+dbPath+"?mode=ro")
|
||||
defer conn.Close()
|
||||
rows, _ := conn.Query("PRAGMA table_info(observations)")
|
||||
found := false
|
||||
for rows.Next() {
|
||||
var cid int
|
||||
var colName string
|
||||
var colType sql.NullString
|
||||
var notNull, pk int
|
||||
var dflt sql.NullString
|
||||
rows.Scan(&cid, &colName, &colType, ¬Null, &dflt, &pk)
|
||||
if colName == "resolved_path" {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
rows.Close()
|
||||
if !found {
|
||||
t.Error("resolved_path column not added")
|
||||
}
|
||||
|
||||
// Running again should be idempotent
|
||||
if err := ensureResolvedPathColumn(dbPath); err != nil {
|
||||
t.Fatal("second call should be idempotent:", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDBDetectsResolvedPathColumn(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
dbPath := filepath.Join(dir, "test.db")
|
||||
|
||||
// Create DB without resolved_path
|
||||
conn, _ := sql.Open("sqlite", "file:"+dbPath+"?_journal_mode=WAL")
|
||||
conn.Exec(`CREATE TABLE observations (id INTEGER PRIMARY KEY, observer_idx INTEGER)`)
|
||||
conn.Exec(`CREATE TABLE transmissions (id INTEGER PRIMARY KEY)`)
|
||||
conn.Close()
|
||||
|
||||
db, err := OpenDB(dbPath)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if db.hasResolvedPath {
|
||||
t.Error("should not detect resolved_path when column missing")
|
||||
}
|
||||
db.Close()
|
||||
|
||||
// Add resolved_path column
|
||||
conn, _ = sql.Open("sqlite", "file:"+dbPath+"?_journal_mode=WAL")
|
||||
conn.Exec("ALTER TABLE observations ADD COLUMN resolved_path TEXT")
|
||||
conn.Close()
|
||||
|
||||
db, err = OpenDB(dbPath)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !db.hasResolvedPath {
|
||||
t.Error("should detect resolved_path when column exists")
|
||||
}
|
||||
db.Close()
|
||||
}
|
||||
|
||||
func TestLoadWithResolvedPath(t *testing.T) {
|
||||
db, dbPath := createTestDBWithSchema(t)
|
||||
defer db.Close()
|
||||
|
||||
// Insert test data
|
||||
rw, _ := openRW(dbPath)
|
||||
rw.Exec(`INSERT INTO transmissions (id, hash, first_seen, payload_type, decoded_json)
|
||||
VALUES (1, 'hash1', '2024-01-01T00:00:00Z', 4, '{"pubKey":"origpk"}')`)
|
||||
rw.Exec(`INSERT INTO observations (id, transmission_id, observer_id, observer_name, path_json, timestamp, resolved_path)
|
||||
VALUES (1, 1, 'obs1', 'Observer1', '["aa"]', '2024-01-01T00:00:00Z', '["aabbccdd"]')`)
|
||||
rw.Close()
|
||||
|
||||
store := NewPacketStore(db, nil)
|
||||
if err := store.Load(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(store.packets) != 1 {
|
||||
t.Fatalf("expected 1 packet, got %d", len(store.packets))
|
||||
}
|
||||
|
||||
tx := store.packets[0]
|
||||
if len(tx.Observations) != 1 {
|
||||
t.Fatalf("expected 1 observation, got %d", len(tx.Observations))
|
||||
}
|
||||
|
||||
obs := tx.Observations[0]
|
||||
if obs.ResolvedPath == nil {
|
||||
t.Fatal("expected ResolvedPath to be loaded")
|
||||
}
|
||||
if len(obs.ResolvedPath) != 1 || *obs.ResolvedPath[0] != "aabbccdd" {
|
||||
t.Errorf("unexpected ResolvedPath: %v", obs.ResolvedPath)
|
||||
}
|
||||
|
||||
// Check that pickBestObservation propagated resolved_path to tx
|
||||
if tx.ResolvedPath == nil || len(tx.ResolvedPath) != 1 {
|
||||
t.Error("expected ResolvedPath to be propagated to tx")
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolvedPathInAPIResponse(t *testing.T) {
|
||||
// Test that TransmissionResp properly marshals resolved_path
|
||||
pk := "aabbccddee"
|
||||
resp := TransmissionResp{
|
||||
ID: 1,
|
||||
Hash: "test",
|
||||
ResolvedPath: []*string{&pk, nil},
|
||||
}
|
||||
|
||||
data, err := json.Marshal(resp)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var m map[string]interface{}
|
||||
json.Unmarshal(data, &m)
|
||||
|
||||
rp, ok := m["resolved_path"]
|
||||
if !ok {
|
||||
t.Fatal("resolved_path missing from JSON")
|
||||
}
|
||||
rpArr, ok := rp.([]interface{})
|
||||
if !ok || len(rpArr) != 2 {
|
||||
t.Fatalf("unexpected resolved_path shape: %v", rp)
|
||||
}
|
||||
if rpArr[0] != "aabbccddee" {
|
||||
t.Errorf("first element wrong: %v", rpArr[0])
|
||||
}
|
||||
if rpArr[1] != nil {
|
||||
t.Errorf("second element should be null: %v", rpArr[1])
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolvedPathOmittedWhenEmpty(t *testing.T) {
|
||||
resp := TransmissionResp{
|
||||
ID: 1,
|
||||
Hash: "test",
|
||||
}
|
||||
|
||||
data, _ := json.Marshal(resp)
|
||||
var m map[string]interface{}
|
||||
json.Unmarshal(data, &m)
|
||||
|
||||
if _, ok := m["resolved_path"]; ok {
|
||||
t.Error("resolved_path should be omitted when nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtractEdgesFromObs_AdvertNoPath(t *testing.T) {
|
||||
tx := &StoreTx{
|
||||
DecodedJSON: `{"pubKey":"aaaa1111"}`,
|
||||
PayloadType: intPtr(4),
|
||||
}
|
||||
obs := &StoreObs{
|
||||
ObserverID: "bbbb2222",
|
||||
PathJSON: "",
|
||||
Timestamp: "2024-01-01T00:00:00Z",
|
||||
}
|
||||
|
||||
edges := extractEdgesFromObs(obs, tx, nil)
|
||||
if len(edges) != 1 {
|
||||
t.Fatalf("expected 1 edge for zero-hop advert, got %d", len(edges))
|
||||
}
|
||||
// Canonical ordering: aaaa < bbbb
|
||||
if edges[0].A != "aaaa1111" || edges[0].B != "bbbb2222" {
|
||||
t.Errorf("unexpected edge: %+v", edges[0])
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtractEdgesFromObs_NonAdvertNoPath(t *testing.T) {
|
||||
tx := &StoreTx{PayloadType: intPtr(1)}
|
||||
obs := &StoreObs{ObserverID: "obs1", PathJSON: ""}
|
||||
edges := extractEdgesFromObs(obs, tx, nil)
|
||||
if len(edges) != 0 {
|
||||
t.Errorf("expected 0 edges for non-advert without path, got %d", len(edges))
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtractEdgesFromObs_WithPath(t *testing.T) {
|
||||
nodes := []nodeInfo{
|
||||
{PublicKey: "aabbccddee1234567890aabbccddee1234567890aabbccddee1234567890aabb", Name: "Node-AA"},
|
||||
{PublicKey: "ffgghhii1234567890aabbccddee1234567890aabbccddee1234567890aabb11", Name: "Node-FF"},
|
||||
}
|
||||
pm := buildPrefixMap(nodes)
|
||||
|
||||
tx := &StoreTx{
|
||||
DecodedJSON: `{"pubKey":"originator00"}`,
|
||||
PayloadType: intPtr(4),
|
||||
}
|
||||
obs := &StoreObs{
|
||||
ObserverID: "observer00",
|
||||
PathJSON: `["aa","ff"]`,
|
||||
Timestamp: "2024-01-01T00:00:00Z",
|
||||
}
|
||||
|
||||
edges := extractEdgesFromObs(obs, tx, pm)
|
||||
// Should get: originator↔aa (advert), observer↔ff (last hop)
|
||||
if len(edges) != 2 {
|
||||
t.Fatalf("expected 2 edges, got %d", len(edges))
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtractEdgesFromObs_SameNodeNoEdge(t *testing.T) {
|
||||
tx := &StoreTx{
|
||||
DecodedJSON: `{"pubKey":"same1234"}`,
|
||||
PayloadType: intPtr(4),
|
||||
}
|
||||
obs := &StoreObs{
|
||||
ObserverID: "same1234",
|
||||
PathJSON: "",
|
||||
Timestamp: "2024-01-01T00:00:00Z",
|
||||
}
|
||||
edges := extractEdgesFromObs(obs, tx, nil)
|
||||
if len(edges) != 0 {
|
||||
t.Errorf("expected 0 edges when originator == observer, got %d", len(edges))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
func TestPersistSemaphoreTryAcquireSkipsBatch(t *testing.T) {
|
||||
// Verify that persistSem is a buffered channel of size 1.
|
||||
if cap(persistSem) != 1 {
|
||||
t.Errorf("persistSem capacity = %d, want 1", cap(persistSem))
|
||||
}
|
||||
// Acquire the semaphore to simulate an in-progress persistence.
|
||||
persistSem <- struct{}{}
|
||||
|
||||
// asyncPersistResolvedPathsAndEdges should skip (not block, not
|
||||
// spawn a goroutine) when the semaphore is already held.
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
asyncPersistResolvedPathsAndEdges(
|
||||
"/nonexistent/path.db",
|
||||
[]persistObsUpdate{{obsID: 1, resolvedPath: "x"}},
|
||||
nil,
|
||||
"test",
|
||||
)
|
||||
close(done)
|
||||
}()
|
||||
|
||||
// If the function blocks on the semaphore instead of skipping,
|
||||
// this select will hit the timeout.
|
||||
select {
|
||||
case <-done:
|
||||
// Expected: returned immediately because semaphore was busy.
|
||||
case <-time.After(500 * time.Millisecond):
|
||||
<-persistSem
|
||||
t.Fatal("asyncPersistResolvedPathsAndEdges blocked instead of skipping when semaphore was held")
|
||||
}
|
||||
|
||||
<-persistSem // release
|
||||
}
|
||||
@@ -0,0 +1,134 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// TestObsDedupCorrectness verifies that the map-based dedup produces correct
|
||||
// results: no duplicate observations (same observerID + pathJSON) on a single
|
||||
// transmission.
|
||||
func TestObsDedupCorrectness(t *testing.T) {
|
||||
tx := &StoreTx{
|
||||
ID: 1,
|
||||
Hash: "abc123",
|
||||
obsKeys: make(map[string]bool),
|
||||
}
|
||||
|
||||
// Add 5 unique observations
|
||||
for i := 0; i < 5; i++ {
|
||||
obsID := fmt.Sprintf("obs-%d", i)
|
||||
pathJSON := fmt.Sprintf(`["path-%d"]`, i)
|
||||
dk := obsID + "|" + pathJSON
|
||||
if tx.obsKeys[dk] {
|
||||
t.Fatalf("observation %d should not be a duplicate", i)
|
||||
}
|
||||
tx.Observations = append(tx.Observations, &StoreObs{
|
||||
ID: i,
|
||||
ObserverID: obsID,
|
||||
PathJSON: pathJSON,
|
||||
})
|
||||
tx.obsKeys[dk] = true
|
||||
tx.ObservationCount++
|
||||
}
|
||||
|
||||
if tx.ObservationCount != 5 {
|
||||
t.Fatalf("expected 5 observations, got %d", tx.ObservationCount)
|
||||
}
|
||||
|
||||
// Try to add duplicates of each — all should be rejected
|
||||
for i := 0; i < 5; i++ {
|
||||
obsID := fmt.Sprintf("obs-%d", i)
|
||||
pathJSON := fmt.Sprintf(`["path-%d"]`, i)
|
||||
dk := obsID + "|" + pathJSON
|
||||
if !tx.obsKeys[dk] {
|
||||
t.Fatalf("observation %d should be detected as duplicate", i)
|
||||
}
|
||||
}
|
||||
|
||||
// Same observer, different path — should NOT be a duplicate
|
||||
dk := "obs-0" + "|" + `["different-path"]`
|
||||
if tx.obsKeys[dk] {
|
||||
t.Fatal("different path should not be a duplicate")
|
||||
}
|
||||
|
||||
// Different observer, same path — should NOT be a duplicate
|
||||
dk = "obs-new" + "|" + `["path-0"]`
|
||||
if tx.obsKeys[dk] {
|
||||
t.Fatal("different observer should not be a duplicate")
|
||||
}
|
||||
}
|
||||
|
||||
// TestObsDedupNilMapSafety ensures obsKeys lazy init works for pre-existing
|
||||
// transmissions that may not have the map initialized.
|
||||
func TestObsDedupNilMapSafety(t *testing.T) {
|
||||
tx := &StoreTx{ID: 1, Hash: "abc"}
|
||||
// obsKeys is nil — the lazy init pattern used in IngestNewFromDB/IngestNewObservations
|
||||
if tx.obsKeys == nil {
|
||||
tx.obsKeys = make(map[string]bool)
|
||||
}
|
||||
dk := "obs1|path1"
|
||||
if tx.obsKeys[dk] {
|
||||
t.Fatal("should not be duplicate on empty map")
|
||||
}
|
||||
tx.obsKeys[dk] = true
|
||||
if !tx.obsKeys[dk] {
|
||||
t.Fatal("should be duplicate after insert")
|
||||
}
|
||||
}
|
||||
|
||||
// BenchmarkObsDedupMap benchmarks the map-based O(1) dedup approach.
|
||||
func BenchmarkObsDedupMap(b *testing.B) {
|
||||
for _, obsCount := range []int{10, 50, 100, 500} {
|
||||
b.Run(fmt.Sprintf("obs=%d", obsCount), func(b *testing.B) {
|
||||
// Pre-populate a tx with obsCount observations
|
||||
tx := &StoreTx{
|
||||
ID: 1,
|
||||
obsKeys: make(map[string]bool),
|
||||
}
|
||||
for i := 0; i < obsCount; i++ {
|
||||
obsID := fmt.Sprintf("obs-%d", i)
|
||||
pathJSON := fmt.Sprintf(`["hop-%d"]`, i)
|
||||
dk := obsID + "|" + pathJSON
|
||||
tx.Observations = append(tx.Observations, &StoreObs{
|
||||
ObserverID: obsID,
|
||||
PathJSON: pathJSON,
|
||||
})
|
||||
tx.obsKeys[dk] = true
|
||||
}
|
||||
|
||||
// Benchmark: check dedup for a new observation (not duplicate)
|
||||
newDK := "new-obs|new-path"
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_ = tx.obsKeys[newDK]
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// BenchmarkObsDedupLinear benchmarks the old O(n) linear scan for comparison.
|
||||
func BenchmarkObsDedupLinear(b *testing.B) {
|
||||
for _, obsCount := range []int{10, 50, 100, 500} {
|
||||
b.Run(fmt.Sprintf("obs=%d", obsCount), func(b *testing.B) {
|
||||
tx := &StoreTx{ID: 1}
|
||||
for i := 0; i < obsCount; i++ {
|
||||
tx.Observations = append(tx.Observations, &StoreObs{
|
||||
ObserverID: fmt.Sprintf("obs-%d", i),
|
||||
PathJSON: fmt.Sprintf(`["hop-%d"]`, i),
|
||||
})
|
||||
}
|
||||
|
||||
newObsID := "new-obs"
|
||||
newPath := "new-path"
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for _, existing := range tx.Observations {
|
||||
if existing.ObserverID == newObsID && existing.PathJSON == newPath {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -166,6 +166,7 @@ func TestResolveHopsAPI_UniquePrefix(t *testing.T) {
|
||||
// Insert a unique node
|
||||
srv.db.conn.Exec("INSERT OR IGNORE INTO nodes (public_key, name, lat, lon) VALUES (?, ?, ?, ?)",
|
||||
"ff11223344", "UniqueNode", 37.0, -122.0)
|
||||
srv.store.InvalidateNodeCache()
|
||||
|
||||
req := httptest.NewRequest("GET", "/api/resolve-hops?hops=ff11223344", nil)
|
||||
rr := httptest.NewRecorder()
|
||||
@@ -192,6 +193,7 @@ func TestResolveHopsAPI_AmbiguousNoContext(t *testing.T) {
|
||||
"ee1aaaaaaa", "Node-E1", 37.0, -122.0)
|
||||
srv.db.conn.Exec("INSERT OR IGNORE INTO nodes (public_key, name, lat, lon) VALUES (?, ?, ?, ?)",
|
||||
"ee1bbbbbbb", "Node-E2", 38.0, -121.0)
|
||||
srv.store.InvalidateNodeCache()
|
||||
|
||||
req := httptest.NewRequest("GET", "/api/resolve-hops?hops=ee1", nil)
|
||||
rr := httptest.NewRecorder()
|
||||
@@ -204,8 +206,10 @@ func TestResolveHopsAPI_AmbiguousNoContext(t *testing.T) {
|
||||
if hr == nil {
|
||||
t.Fatal("expected hop in resolved map")
|
||||
}
|
||||
if hr.Confidence != "ambiguous" {
|
||||
t.Fatalf("expected ambiguous, got %s", hr.Confidence)
|
||||
// With both candidates having GPS and no affinity context, the resolver
|
||||
// picks the GPS-preferred candidate → confidence is "gps_preference".
|
||||
if hr.Confidence != "gps_preference" {
|
||||
t.Fatalf("expected gps_preference, got %s", hr.Confidence)
|
||||
}
|
||||
if len(hr.Candidates) != 2 {
|
||||
t.Fatalf("expected 2 candidates, got %d", len(hr.Candidates))
|
||||
|
||||
+260
-55
@@ -118,6 +118,7 @@ func (s *Server) RegisterRoutes(r *mux.Router) {
|
||||
r.Handle("/api/debug/affinity", s.requireAPIKey(http.HandlerFunc(s.handleDebugAffinity))).Methods("GET")
|
||||
|
||||
// Packet endpoints
|
||||
r.HandleFunc("/api/packets/observations", s.handleBatchObservations).Methods("POST")
|
||||
r.HandleFunc("/api/packets/timestamps", s.handlePacketTimestamps).Methods("GET")
|
||||
r.HandleFunc("/api/packets/{id}", s.handlePacketDetail).Methods("GET")
|
||||
r.HandleFunc("/api/packets", s.handlePackets).Methods("GET")
|
||||
@@ -145,6 +146,7 @@ func (s *Server) RegisterRoutes(r *mux.Router) {
|
||||
r.HandleFunc("/api/analytics/hash-sizes", s.handleAnalyticsHashSizes).Methods("GET")
|
||||
r.HandleFunc("/api/analytics/hash-collisions", s.handleAnalyticsHashCollisions).Methods("GET")
|
||||
r.HandleFunc("/api/analytics/subpaths", s.handleAnalyticsSubpaths).Methods("GET")
|
||||
r.HandleFunc("/api/analytics/subpaths-bulk", s.handleAnalyticsSubpathsBulk).Methods("GET")
|
||||
r.HandleFunc("/api/analytics/subpath-detail", s.handleAnalyticsSubpathDetail).Methods("GET")
|
||||
r.HandleFunc("/api/analytics/neighbor-graph", s.handleNeighborGraph).Methods("GET")
|
||||
|
||||
@@ -152,6 +154,8 @@ func (s *Server) RegisterRoutes(r *mux.Router) {
|
||||
r.HandleFunc("/api/resolve-hops", s.handleResolveHops).Methods("GET")
|
||||
r.HandleFunc("/api/channels/{hash}/messages", s.handleChannelMessages).Methods("GET")
|
||||
r.HandleFunc("/api/channels", s.handleChannels).Methods("GET")
|
||||
r.HandleFunc("/api/observers/metrics/summary", s.handleMetricsSummary).Methods("GET")
|
||||
r.HandleFunc("/api/observers/{id}/metrics", s.handleObserverMetrics).Methods("GET")
|
||||
r.HandleFunc("/api/observers/{id}/analytics", s.handleObserverAnalytics).Methods("GET")
|
||||
r.HandleFunc("/api/observers/{id}", s.handleObserverDetail).Methods("GET")
|
||||
r.HandleFunc("/api/observers", s.handleObservers).Methods("GET")
|
||||
@@ -718,7 +722,8 @@ func (s *Server) handlePackets(w http.ResponseWriter, r *http.Request) {
|
||||
Until: r.URL.Query().Get("until"),
|
||||
Region: r.URL.Query().Get("region"),
|
||||
Node: r.URL.Query().Get("node"),
|
||||
Order: "DESC",
|
||||
Order: "DESC",
|
||||
ExpandObservations: r.URL.Query().Get("expand") == "observations",
|
||||
}
|
||||
if r.URL.Query().Get("order") == "asc" {
|
||||
q.Order = "ASC"
|
||||
@@ -760,13 +765,6 @@ func (s *Server) handlePackets(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
// Strip observations from default response
|
||||
if r.URL.Query().Get("expand") != "observations" {
|
||||
for _, p := range result.Packets {
|
||||
delete(p, "observations")
|
||||
}
|
||||
}
|
||||
|
||||
writeJSON(w, result)
|
||||
}
|
||||
|
||||
@@ -791,6 +789,38 @@ var muxBraceParam = regexp.MustCompile(`\{([^}]+)\}`)
|
||||
// perfHexFallback matches hex IDs for perf path normalization fallback.
|
||||
var perfHexFallback = regexp.MustCompile(`[0-9a-f]{8,}`)
|
||||
|
||||
// handleBatchObservations returns observations for multiple hashes in a single request.
|
||||
// POST /api/packets/observations with JSON body: {"hashes": ["abc123", "def456", ...]}
|
||||
// Response: {"results": {"abc123": [...observations...], "def456": [...], ...}}
|
||||
// Limited to 200 hashes per request to prevent abuse.
|
||||
func (s *Server) handleBatchObservations(w http.ResponseWriter, r *http.Request) {
|
||||
var body struct {
|
||||
Hashes []string `json:"hashes"`
|
||||
}
|
||||
if err := json.NewDecoder(r.Body).Decode(&body); err != nil {
|
||||
writeError(w, 400, "invalid JSON body")
|
||||
return
|
||||
}
|
||||
const maxHashes = 200
|
||||
if len(body.Hashes) > maxHashes {
|
||||
writeError(w, 400, fmt.Sprintf("too many hashes (max %d)", maxHashes))
|
||||
return
|
||||
}
|
||||
if len(body.Hashes) == 0 {
|
||||
writeJSON(w, map[string]interface{}{"results": map[string]interface{}{}})
|
||||
return
|
||||
}
|
||||
|
||||
results := make(map[string][]ObservationResp, len(body.Hashes))
|
||||
if s.store != nil {
|
||||
for _, hash := range body.Hashes {
|
||||
obs := s.store.GetObservationsForHash(hash)
|
||||
results[hash] = mapSliceToObservations(obs)
|
||||
}
|
||||
}
|
||||
writeJSON(w, map[string]interface{}{"results": results})
|
||||
}
|
||||
|
||||
func (s *Server) handlePacketDetail(w http.ResponseWriter, r *http.Request) {
|
||||
param := mux.Vars(r)["id"]
|
||||
var packet map[string]interface{}
|
||||
@@ -1065,16 +1095,44 @@ func (s *Server) handleNodePaths(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
prefix1 := strings.ToLower(pubkey)
|
||||
if len(prefix1) > 2 {
|
||||
prefix1 = prefix1[:2]
|
||||
}
|
||||
prefix2 := strings.ToLower(pubkey)
|
||||
// Use the precomputed byPathHop index instead of scanning all packets.
|
||||
// Look up by full pubkey (resolved hops) and by short prefixes (raw hops).
|
||||
lowerPK := strings.ToLower(pubkey)
|
||||
prefix2 := lowerPK
|
||||
if len(prefix2) > 4 {
|
||||
prefix2 = prefix2[:4]
|
||||
}
|
||||
prefix1 := lowerPK
|
||||
if len(prefix1) > 2 {
|
||||
prefix1 = prefix1[:2]
|
||||
}
|
||||
|
||||
s.store.mu.RLock()
|
||||
_, pm := s.store.getCachedNodesAndPM()
|
||||
|
||||
// Collect candidate transmissions from the index, deduplicating by tx ID.
|
||||
seen := make(map[int]bool)
|
||||
var candidates []*StoreTx
|
||||
addCandidates := func(key string) {
|
||||
for _, tx := range s.store.byPathHop[key] {
|
||||
if !seen[tx.ID] {
|
||||
seen[tx.ID] = true
|
||||
candidates = append(candidates, tx)
|
||||
}
|
||||
}
|
||||
}
|
||||
addCandidates(lowerPK) // full pubkey match (from resolved_path)
|
||||
addCandidates(prefix1) // 2-char raw hop match
|
||||
addCandidates(prefix2) // 4-char raw hop match
|
||||
// Also check any raw hops that start with prefix2 (longer prefixes).
|
||||
// Raw hops are typically 2 chars, so iterate only keys with HasPrefix
|
||||
// on the small set of index keys rather than all packets.
|
||||
for key := range s.store.byPathHop {
|
||||
if len(key) > 4 && len(key) < len(lowerPK) && strings.HasPrefix(key, prefix2) {
|
||||
addCandidates(key)
|
||||
}
|
||||
}
|
||||
|
||||
type pathAgg struct {
|
||||
Hops []PathHopResp
|
||||
Count int
|
||||
@@ -1088,28 +1146,13 @@ func (s *Server) handleNodePaths(w http.ResponseWriter, r *http.Request) {
|
||||
if cached, ok := hopCache[hop]; ok {
|
||||
return cached
|
||||
}
|
||||
r := pm.resolve(hop)
|
||||
r, _, _ := pm.resolveWithContext(hop, nil, s.store.graph)
|
||||
hopCache[hop] = r
|
||||
return r
|
||||
}
|
||||
for _, tx := range s.store.packets {
|
||||
hops := txGetParsedPath(tx)
|
||||
if len(hops) == 0 {
|
||||
continue
|
||||
}
|
||||
found := false
|
||||
for _, hop := range hops {
|
||||
hl := strings.ToLower(hop)
|
||||
if hl == prefix1 || hl == prefix2 || strings.HasPrefix(hl, prefix2) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, tx := range candidates {
|
||||
totalTransmissions++
|
||||
hops := txGetParsedPath(tx)
|
||||
resolvedHops := make([]PathHopResp, len(hops))
|
||||
sigParts := make([]string, len(hops))
|
||||
for i, hop := range hops {
|
||||
@@ -1337,6 +1380,57 @@ func (s *Server) handleAnalyticsSubpaths(w http.ResponseWriter, r *http.Request)
|
||||
})
|
||||
}
|
||||
|
||||
// handleAnalyticsSubpathsBulk returns multiple length-range buckets in a single
|
||||
// response, avoiding repeated scans of the same packet data. Query format:
|
||||
// ?groups=2-2:50,3-3:30,4-4:20,5-8:15 (minLen-maxLen:limit per group)
|
||||
func (s *Server) handleAnalyticsSubpathsBulk(w http.ResponseWriter, r *http.Request) {
|
||||
region := r.URL.Query().Get("region")
|
||||
groupsParam := r.URL.Query().Get("groups")
|
||||
if groupsParam == "" {
|
||||
writeJSON(w, ErrorResp{Error: "groups parameter required (e.g. groups=2-2:50,3-3:30)"})
|
||||
return
|
||||
}
|
||||
|
||||
var groups []subpathGroup
|
||||
for _, g := range strings.Split(groupsParam, ",") {
|
||||
parts := strings.SplitN(g, ":", 2)
|
||||
if len(parts) != 2 {
|
||||
writeJSON(w, ErrorResp{Error: "invalid group format: " + g})
|
||||
return
|
||||
}
|
||||
rangeParts := strings.SplitN(parts[0], "-", 2)
|
||||
if len(rangeParts) != 2 {
|
||||
writeJSON(w, ErrorResp{Error: "invalid range format: " + parts[0]})
|
||||
return
|
||||
}
|
||||
mn, err1 := strconv.Atoi(rangeParts[0])
|
||||
mx, err2 := strconv.Atoi(rangeParts[1])
|
||||
lim, err3 := strconv.Atoi(parts[1])
|
||||
if err1 != nil || err2 != nil || err3 != nil || mn < 2 || mx < mn || lim < 1 {
|
||||
writeJSON(w, ErrorResp{Error: "invalid group: " + g})
|
||||
return
|
||||
}
|
||||
groups = append(groups, subpathGroup{mn, mx, lim})
|
||||
}
|
||||
|
||||
if s.store == nil {
|
||||
results := make([]map[string]interface{}, len(groups))
|
||||
for i := range groups {
|
||||
results[i] = map[string]interface{}{"subpaths": []interface{}{}, "totalPaths": 0}
|
||||
}
|
||||
writeJSON(w, map[string]interface{}{"results": results})
|
||||
return
|
||||
}
|
||||
|
||||
results := s.store.GetAnalyticsSubpathsBulk(region, groups)
|
||||
writeJSON(w, map[string]interface{}{"results": results})
|
||||
}
|
||||
|
||||
// subpathGroup defines a length-range + limit for the bulk subpaths endpoint.
|
||||
type subpathGroup struct {
|
||||
MinLen, MaxLen, Limit int
|
||||
}
|
||||
|
||||
func (s *Server) handleAnalyticsSubpathDetail(w http.ResponseWriter, r *http.Request) {
|
||||
hops := r.URL.Query().Get("hops")
|
||||
if hops == "" {
|
||||
@@ -1406,24 +1500,25 @@ func (s *Server) handleResolveHops(w http.ResponseWriter, r *http.Request) {
|
||||
continue
|
||||
}
|
||||
hopLower := strings.ToLower(hop)
|
||||
rows, err := s.db.conn.Query("SELECT public_key, name, lat, lon FROM nodes WHERE LOWER(public_key) LIKE ?", hopLower+"%")
|
||||
if err != nil {
|
||||
resolved[hop] = &HopResolution{Name: nil, Candidates: []HopCandidate{}, Conflicts: []interface{}{}, Confidence: "ambiguous"}
|
||||
continue
|
||||
}
|
||||
|
||||
// Resolve candidates from the in-memory prefix map instead of
|
||||
// issuing per-hop DB queries (fixes N+1 pattern, see #369).
|
||||
var candidates []HopCandidate
|
||||
for rows.Next() {
|
||||
var pk string
|
||||
var name sql.NullString
|
||||
var lat, lon sql.NullFloat64
|
||||
rows.Scan(&pk, &name, &lat, &lon)
|
||||
candidates = append(candidates, HopCandidate{
|
||||
Name: nullStr(name), Pubkey: pk,
|
||||
Lat: nullFloat(lat), Lon: nullFloat(lon),
|
||||
})
|
||||
if pm != nil {
|
||||
if matched, ok := pm.m[hopLower]; ok {
|
||||
for _, ni := range matched {
|
||||
c := HopCandidate{Pubkey: ni.PublicKey}
|
||||
if ni.Name != "" {
|
||||
c.Name = ni.Name
|
||||
}
|
||||
if ni.HasGPS {
|
||||
c.Lat = ni.Lat
|
||||
c.Lon = ni.Lon
|
||||
}
|
||||
candidates = append(candidates, c)
|
||||
}
|
||||
}
|
||||
}
|
||||
rows.Close()
|
||||
|
||||
if len(candidates) == 0 {
|
||||
resolved[hop] = &HopResolution{Name: nil, Candidates: []HopCandidate{}, Conflicts: []interface{}{}, Confidence: "no_match"}
|
||||
@@ -1546,8 +1641,12 @@ func (s *Server) handleObservers(w http.ResponseWriter, r *http.Request) {
|
||||
oneHourAgo := time.Now().Add(-1 * time.Hour).Unix()
|
||||
pktCounts := s.db.GetObserverPacketCounts(oneHourAgo)
|
||||
|
||||
// Batch lookup: node locations (observer ID may match a node public_key)
|
||||
nodeLocations := s.db.GetNodeLocations()
|
||||
// Batch lookup: node locations only for observer IDs (not all nodes)
|
||||
observerIDs := make([]string, len(observers))
|
||||
for i, o := range observers {
|
||||
observerIDs[i] = o.ID
|
||||
}
|
||||
nodeLocations := s.db.GetNodeLocationsByKeys(observerIDs)
|
||||
|
||||
result := make([]ObserverResp, 0, len(observers))
|
||||
for _, o := range observers {
|
||||
@@ -1958,13 +2057,7 @@ func percentile(sorted []float64, p float64) float64 {
|
||||
func sortedCopy(arr []float64) []float64 {
|
||||
cp := make([]float64, len(arr))
|
||||
copy(cp, arr)
|
||||
for i := 0; i < len(cp); i++ {
|
||||
for j := i + 1; j < len(cp); j++ {
|
||||
if cp[j] < cp[i] {
|
||||
cp[i], cp[j] = cp[j], cp[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
sort.Float64s(cp)
|
||||
return cp
|
||||
}
|
||||
|
||||
@@ -2003,6 +2096,9 @@ func mapSliceToTransmissions(maps []map[string]interface{}) []TransmissionResp {
|
||||
tx.PathJSON = m["path_json"]
|
||||
tx.Direction = m["direction"]
|
||||
tx.Score = m["score"]
|
||||
if rp, ok := m["resolved_path"].([]*string); ok {
|
||||
tx.ResolvedPath = rp
|
||||
}
|
||||
result = append(result, tx)
|
||||
}
|
||||
return result
|
||||
@@ -2024,6 +2120,9 @@ func mapSliceToObservations(maps []map[string]interface{}) []ObservationResp {
|
||||
obs.RSSI = m["rssi"]
|
||||
obs.PathJSON = m["path_json"]
|
||||
obs.Timestamp = m["timestamp"]
|
||||
if rp, ok := m["resolved_path"].([]*string); ok {
|
||||
obs.ResolvedPath = rp
|
||||
}
|
||||
result = append(result, obs)
|
||||
}
|
||||
return result
|
||||
@@ -2056,6 +2155,112 @@ func nullFloatVal(n sql.NullFloat64) float64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (s *Server) handleObserverMetrics(w http.ResponseWriter, r *http.Request) {
|
||||
id := mux.Vars(r)["id"]
|
||||
since := r.URL.Query().Get("since")
|
||||
until := r.URL.Query().Get("until")
|
||||
resolution := r.URL.Query().Get("resolution")
|
||||
|
||||
// Default to last 24h if no since provided
|
||||
if since == "" {
|
||||
since = time.Now().UTC().Add(-24 * time.Hour).Format(time.RFC3339)
|
||||
}
|
||||
|
||||
// Validate resolution
|
||||
if resolution == "" {
|
||||
resolution = "5m"
|
||||
}
|
||||
switch resolution {
|
||||
case "5m", "1h", "1d":
|
||||
// valid
|
||||
default:
|
||||
writeError(w, 400, "invalid resolution: "+resolution+". Must be 5m, 1h, or 1d")
|
||||
return
|
||||
}
|
||||
|
||||
// Sample interval (default 300s = 5min)
|
||||
sampleInterval := 300
|
||||
|
||||
metrics, reboots, err := s.db.GetObserverMetrics(id, since, until, resolution, sampleInterval)
|
||||
if err != nil {
|
||||
writeError(w, 500, err.Error())
|
||||
return
|
||||
}
|
||||
if metrics == nil {
|
||||
metrics = []MetricsSample{}
|
||||
}
|
||||
if reboots == nil {
|
||||
reboots = []string{}
|
||||
}
|
||||
|
||||
// Get observer name
|
||||
obs, _ := s.db.GetObserverByID(id)
|
||||
var name *string
|
||||
if obs != nil {
|
||||
name = obs.Name
|
||||
}
|
||||
|
||||
writeJSON(w, map[string]interface{}{
|
||||
"observer_id": id,
|
||||
"observer_name": name,
|
||||
"reboots": reboots,
|
||||
"metrics": metrics,
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Server) handleMetricsSummary(w http.ResponseWriter, r *http.Request) {
|
||||
window := r.URL.Query().Get("window")
|
||||
if window == "" {
|
||||
window = "24h"
|
||||
}
|
||||
region := r.URL.Query().Get("region")
|
||||
|
||||
// Parse window duration
|
||||
dur, err := parseWindowDuration(window)
|
||||
if err != nil {
|
||||
writeError(w, 400, "invalid window: "+window)
|
||||
return
|
||||
}
|
||||
|
||||
since := time.Now().UTC().Add(-dur).Format(time.RFC3339)
|
||||
summary, err := s.db.GetMetricsSummary(since)
|
||||
if err != nil {
|
||||
writeError(w, 500, err.Error())
|
||||
return
|
||||
}
|
||||
if summary == nil {
|
||||
summary = []MetricsSummaryRow{}
|
||||
}
|
||||
|
||||
// Filter by region if specified
|
||||
if region != "" {
|
||||
filtered := make([]MetricsSummaryRow, 0)
|
||||
for _, row := range summary {
|
||||
if strings.EqualFold(row.IATA, region) {
|
||||
filtered = append(filtered, row)
|
||||
}
|
||||
}
|
||||
summary = filtered
|
||||
}
|
||||
|
||||
writeJSON(w, map[string]interface{}{
|
||||
"observers": summary,
|
||||
})
|
||||
}
|
||||
|
||||
// parseWindowDuration parses strings like "24h", "3d", "7d", "30d".
|
||||
func parseWindowDuration(window string) (time.Duration, error) {
|
||||
if strings.HasSuffix(window, "d") {
|
||||
daysStr := strings.TrimSuffix(window, "d")
|
||||
days, err := strconv.Atoi(daysStr)
|
||||
if err != nil || days <= 0 {
|
||||
return 0, fmt.Errorf("invalid days: %s", daysStr)
|
||||
}
|
||||
return time.Duration(days) * 24 * time.Hour, nil
|
||||
}
|
||||
return time.ParseDuration(window)
|
||||
}
|
||||
|
||||
func (s *Server) handleAdminPrune(w http.ResponseWriter, r *http.Request) {
|
||||
days := 0
|
||||
if d := r.URL.Query().Get("days"); d != "" {
|
||||
|
||||
+447
-12
@@ -1105,6 +1105,63 @@ func TestAnalyticsSubpaths(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestAnalyticsSubpathsBulk(t *testing.T) {
|
||||
_, router := setupTestServer(t)
|
||||
|
||||
// Valid request with multiple groups.
|
||||
req := httptest.NewRequest("GET", "/api/analytics/subpaths-bulk?groups=2-2:50,3-3:30,5-8:15", nil)
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
if w.Code != 200 {
|
||||
t.Fatalf("expected 200, got %d", w.Code)
|
||||
}
|
||||
var body map[string]interface{}
|
||||
json.Unmarshal(w.Body.Bytes(), &body)
|
||||
results, ok := body["results"].([]interface{})
|
||||
if !ok {
|
||||
t.Fatal("expected results array")
|
||||
}
|
||||
if len(results) != 3 {
|
||||
t.Errorf("expected 3 result groups, got %d", len(results))
|
||||
}
|
||||
// Each result should have subpaths and totalPaths.
|
||||
for i, r := range results {
|
||||
rm, ok := r.(map[string]interface{})
|
||||
if !ok {
|
||||
t.Fatalf("result %d not a map", i)
|
||||
}
|
||||
if _, ok := rm["subpaths"]; !ok {
|
||||
t.Errorf("result %d missing subpaths", i)
|
||||
}
|
||||
if _, ok := rm["totalPaths"]; !ok {
|
||||
t.Errorf("result %d missing totalPaths", i)
|
||||
}
|
||||
}
|
||||
|
||||
// Missing groups param → error.
|
||||
req2 := httptest.NewRequest("GET", "/api/analytics/subpaths-bulk", nil)
|
||||
w2 := httptest.NewRecorder()
|
||||
router.ServeHTTP(w2, req2)
|
||||
if w2.Code != 200 {
|
||||
t.Fatalf("expected 200 with error body, got %d", w2.Code)
|
||||
}
|
||||
var errBody map[string]interface{}
|
||||
json.Unmarshal(w2.Body.Bytes(), &errBody)
|
||||
if _, ok := errBody["error"]; !ok {
|
||||
t.Error("expected error field for missing groups param")
|
||||
}
|
||||
|
||||
// Invalid group format.
|
||||
req3 := httptest.NewRequest("GET", "/api/analytics/subpaths-bulk?groups=bad", nil)
|
||||
w3 := httptest.NewRecorder()
|
||||
router.ServeHTTP(w3, req3)
|
||||
var errBody3 map[string]interface{}
|
||||
json.Unmarshal(w3.Body.Bytes(), &errBody3)
|
||||
if _, ok := errBody3["error"]; !ok {
|
||||
t.Error("expected error for invalid group format")
|
||||
}
|
||||
}
|
||||
|
||||
func TestAnalyticsSubpathDetailWithHops(t *testing.T) {
|
||||
_, router := setupTestServer(t)
|
||||
req := httptest.NewRequest("GET", "/api/analytics/subpath-detail?hops=aa,bb", nil)
|
||||
@@ -1170,6 +1227,11 @@ func TestResolveHopsAmbiguous(t *testing.T) {
|
||||
cfg := &Config{Port: 3000}
|
||||
hub := NewHub()
|
||||
srv := NewServer(db, cfg, hub)
|
||||
store := NewPacketStore(db, nil)
|
||||
if err := store.Load(); err != nil {
|
||||
t.Fatalf("store.Load failed: %v", err)
|
||||
}
|
||||
srv.store = store
|
||||
router := mux.NewRouter()
|
||||
srv.RegisterRoutes(router)
|
||||
|
||||
@@ -2105,7 +2167,7 @@ tx := &StoreTx{
|
||||
ID: 9000 + i,
|
||||
RawHex: rawHex,
|
||||
Hash: "testhash" + strconv.Itoa(i),
|
||||
FirstSeen: "2024-01-01T00:00:00Z",
|
||||
FirstSeen: time.Now().UTC().Format("2006-01-02T15:04:05.000Z"),
|
||||
PayloadType: &payloadType,
|
||||
DecodedJSON: decoded,
|
||||
}
|
||||
@@ -2151,7 +2213,7 @@ for i, raw := range raws {
|
||||
ID: 8000 + i,
|
||||
RawHex: raw,
|
||||
Hash: "dominant" + strconv.Itoa(i),
|
||||
FirstSeen: "2024-01-01T00:00:00Z",
|
||||
FirstSeen: time.Now().UTC().Format("2006-01-02T15:04:05.000Z"),
|
||||
PayloadType: &payloadType,
|
||||
DecodedJSON: decoded,
|
||||
}
|
||||
@@ -2190,12 +2252,13 @@ func TestGetNodeHashSizeInfoLatestWins(t *testing.T) {
|
||||
// 4 historical 1-byte adverts, then 1 recent 2-byte advert (latest).
|
||||
// Mode would pick 1 (majority), but latest-wins should pick 2.
|
||||
raws := []string{raw1byte, raw1byte, raw1byte, raw1byte, raw2byte}
|
||||
baseTime := time.Now().UTC().Add(-1 * time.Hour)
|
||||
for i, raw := range raws {
|
||||
tx := &StoreTx{
|
||||
ID: 7000 + i,
|
||||
RawHex: raw,
|
||||
Hash: "latest" + strconv.Itoa(i),
|
||||
FirstSeen: "2024-01-01T0" + strconv.Itoa(i) + ":00:00Z",
|
||||
FirstSeen: baseTime.Add(time.Duration(i) * time.Minute).Format("2006-01-02T15:04:05.000Z"),
|
||||
PayloadType: &payloadType,
|
||||
DecodedJSON: decoded,
|
||||
}
|
||||
@@ -2236,12 +2299,13 @@ func TestGetNodeHashSizeInfoIgnoreDirectZeroHop(t *testing.T) {
|
||||
|
||||
payloadType := 4
|
||||
raws := []string{rawFlood2B, rawDirect0, rawFlood2B, rawDirect0, rawFlood2B}
|
||||
baseTime2 := time.Now().UTC().Add(-1 * time.Hour)
|
||||
for i, raw := range raws {
|
||||
tx := &StoreTx{
|
||||
ID: 9150 + i,
|
||||
RawHex: raw,
|
||||
Hash: "dirignore" + strconv.Itoa(i),
|
||||
FirstSeen: "2024-01-01T0" + strconv.Itoa(i) + ":00:00Z",
|
||||
FirstSeen: baseTime2.Add(time.Duration(i) * time.Minute).Format("2006-01-02T15:04:05.000Z"),
|
||||
PayloadType: &payloadType,
|
||||
DecodedJSON: decoded,
|
||||
}
|
||||
@@ -2284,7 +2348,7 @@ func TestGetNodeHashSizeInfoOnlyDirectZeroHopIgnored(t *testing.T) {
|
||||
ID: 9160,
|
||||
RawHex: rawDirect0,
|
||||
Hash: "onlydirect0",
|
||||
FirstSeen: "2024-01-01T00:00:00Z",
|
||||
FirstSeen: time.Now().UTC().Format("2006-01-02T15:04:05.000Z"),
|
||||
PayloadType: &payloadType,
|
||||
DecodedJSON: decoded,
|
||||
}
|
||||
@@ -2320,7 +2384,7 @@ func TestGetNodeHashSizeInfoDirectNonZeroHopCounted(t *testing.T) {
|
||||
ID: 9170,
|
||||
RawHex: rawDirectNonZero,
|
||||
Hash: "dirnonzero0",
|
||||
FirstSeen: "2024-01-01T00:00:00Z",
|
||||
FirstSeen: time.Now().UTC().Format("2006-01-02T15:04:05.000Z"),
|
||||
PayloadType: &payloadType,
|
||||
DecodedJSON: decoded,
|
||||
}
|
||||
@@ -2355,7 +2419,7 @@ func TestGetNodeHashSizeInfoNoAdverts(t *testing.T) {
|
||||
ID: 6000,
|
||||
RawHex: "0440aabb",
|
||||
Hash: "noadverts0",
|
||||
FirstSeen: "2024-01-01T00:00:00Z",
|
||||
FirstSeen: time.Now().UTC().Format("2006-01-02T15:04:05.000Z"),
|
||||
PayloadType: &payloadType,
|
||||
DecodedJSON: `{"pubKey":"` + pk + `"}`,
|
||||
}
|
||||
@@ -2397,7 +2461,7 @@ func TestHashAnalyticsZeroHopAdvert(t *testing.T) {
|
||||
ID: 8000,
|
||||
RawHex: raw,
|
||||
Hash: "zerohop0",
|
||||
FirstSeen: "2024-01-01T00:00:00Z",
|
||||
FirstSeen: time.Now().UTC().Format("2006-01-02T15:04:05.000Z"),
|
||||
PayloadType: &payloadType,
|
||||
DecodedJSON: decoded,
|
||||
// No PathJSON → txGetParsedPath returns nil (zero hops)
|
||||
@@ -2451,7 +2515,7 @@ func TestAnalyticsHashSizeSameNameDifferentPubkey(t *testing.T) {
|
||||
ID: 6100 + i,
|
||||
RawHex: raw2byte,
|
||||
Hash: "samename" + strconv.Itoa(i),
|
||||
FirstSeen: "2024-01-01T00:00:00Z",
|
||||
FirstSeen: time.Now().UTC().Format("2006-01-02T15:04:05.000Z"),
|
||||
PayloadType: &payloadType,
|
||||
DecodedJSON: decoded,
|
||||
PathJSON: `["AABB"]`,
|
||||
@@ -2491,6 +2555,158 @@ t.Errorf("field %q is null, expected []", field)
|
||||
}
|
||||
}
|
||||
}
|
||||
func TestInconsistentNodesExcludesCompanions(t *testing.T) {
|
||||
// Issue #566: inconsistentNodes should only include repeaters and room servers.
|
||||
db := setupTestDB(t)
|
||||
seedTestData(t, db)
|
||||
store := NewPacketStore(db, nil)
|
||||
if err := store.Load(); err != nil {
|
||||
t.Fatalf("store.Load failed: %v", err)
|
||||
}
|
||||
|
||||
now := time.Now().UTC().Format("2006-01-02T15:04:05.000Z")
|
||||
payloadType := 4
|
||||
|
||||
// Create three nodes: repeater, room_server, companion — all with inconsistent hash sizes
|
||||
nodes := []struct {
|
||||
pk string
|
||||
role string
|
||||
}{
|
||||
{"aa11111111111111111111111111111111111111111111111111111111111111", "repeater"},
|
||||
{"bb22222222222222222222222222222222222222222222222222222222222222", "room_server"},
|
||||
{"cc33333333333333333333333333333333333333333333333333333333333333", "companion"},
|
||||
}
|
||||
|
||||
for ni, n := range nodes {
|
||||
db.conn.Exec("INSERT OR IGNORE INTO nodes (public_key, name, role) VALUES (?, ?, ?)", n.pk, "Node-"+n.role, n.role)
|
||||
decoded := `{"name":"Node-` + n.role + `","pubKey":"` + n.pk + `"}`
|
||||
// Create flip-flop pattern: 1-byte, 2-byte, 1-byte (transitions=2 → inconsistent)
|
||||
// Use header 0x11 (routeType=FLOOD, payloadType=4) and pathByte 0x41/0x81
|
||||
// (non-zero hop count) so packets aren't skipped by direct zero-hop filter.
|
||||
raws := []string{"11" + "41" + "aabb", "11" + "81" + "aabb", "11" + "41" + "aabb"}
|
||||
for i, raw := range raws {
|
||||
tx := &StoreTx{
|
||||
ID: 7000 + ni*10 + i,
|
||||
RawHex: raw,
|
||||
Hash: "incon-" + n.role + strconv.Itoa(i),
|
||||
FirstSeen: now,
|
||||
PayloadType: &payloadType,
|
||||
DecodedJSON: decoded,
|
||||
}
|
||||
store.packets = append(store.packets, tx)
|
||||
store.byPayloadType[4] = append(store.byPayloadType[4], tx)
|
||||
}
|
||||
}
|
||||
|
||||
cfg := &Config{Port: 3000}
|
||||
hub := NewHub()
|
||||
srv := NewServer(db, cfg, hub)
|
||||
srv.store = store
|
||||
router := mux.NewRouter()
|
||||
srv.RegisterRoutes(router)
|
||||
|
||||
req := httptest.NewRequest("GET", "/api/analytics/hash-collisions", nil)
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != 200 {
|
||||
t.Fatalf("expected 200, got %d", w.Code)
|
||||
}
|
||||
var body map[string]interface{}
|
||||
json.Unmarshal(w.Body.Bytes(), &body)
|
||||
|
||||
incon := body["inconsistent_nodes"].([]interface{})
|
||||
for _, item := range incon {
|
||||
node := item.(map[string]interface{})
|
||||
role := node["role"].(string)
|
||||
if role == "companion" {
|
||||
t.Error("companion node should be excluded from inconsistent_nodes")
|
||||
}
|
||||
}
|
||||
|
||||
// Repeater and room_server should be present
|
||||
roles := make(map[string]bool)
|
||||
for _, item := range incon {
|
||||
node := item.(map[string]interface{})
|
||||
roles[node["role"].(string)] = true
|
||||
}
|
||||
if !roles["repeater"] {
|
||||
t.Error("expected repeater in inconsistent_nodes")
|
||||
}
|
||||
if !roles["room_server"] {
|
||||
t.Error("expected room_server in inconsistent_nodes")
|
||||
}
|
||||
}
|
||||
|
||||
func TestHashSizeInfoTimeWindow(t *testing.T) {
|
||||
// Issue #566: adverts older than 7 days should be excluded from hash size computation.
|
||||
db := setupTestDB(t)
|
||||
seedTestData(t, db)
|
||||
store := NewPacketStore(db, nil)
|
||||
if err := store.Load(); err != nil {
|
||||
t.Fatalf("store.Load failed: %v", err)
|
||||
}
|
||||
|
||||
pk := "dd44444444444444444444444444444444444444444444444444444444444444"
|
||||
db.conn.Exec("INSERT OR IGNORE INTO nodes (public_key, name, role) VALUES (?, 'OldNode', 'repeater')", pk)
|
||||
|
||||
decoded := `{"name":"OldNode","pubKey":"` + pk + `"}`
|
||||
payloadType := 4
|
||||
|
||||
// Old adverts (>7 days ago) with flip-flop pattern
|
||||
// Use header 0x11 (routeType=FLOOD) and pathByte 0x41/0x81 (non-zero hop count)
|
||||
// so packets aren't skipped by direct zero-hop filter.
|
||||
oldTime := time.Now().UTC().Add(-10 * 24 * time.Hour).Format("2006-01-02T15:04:05.000Z")
|
||||
oldRaws := []string{"11" + "41" + "aabb", "11" + "81" + "aabb", "11" + "41" + "aabb"}
|
||||
for i, raw := range oldRaws {
|
||||
tx := &StoreTx{
|
||||
ID: 6000 + i,
|
||||
RawHex: raw,
|
||||
Hash: "old-" + strconv.Itoa(i),
|
||||
FirstSeen: oldTime,
|
||||
PayloadType: &payloadType,
|
||||
DecodedJSON: decoded,
|
||||
}
|
||||
store.packets = append(store.packets, tx)
|
||||
store.byPayloadType[4] = append(store.byPayloadType[4], tx)
|
||||
}
|
||||
|
||||
info := store.GetNodeHashSizeInfo()
|
||||
ni := info[pk]
|
||||
if ni != nil && ni.Inconsistent {
|
||||
t.Error("old adverts (>7 days) should be excluded; node should not be flagged as inconsistent")
|
||||
}
|
||||
|
||||
// Now add recent adverts with consistent hash size — should appear in info
|
||||
pk2 := "ee55555555555555555555555555555555555555555555555555555555555555"
|
||||
db.conn.Exec("INSERT OR IGNORE INTO nodes (public_key, name, role) VALUES (?, 'NewNode', 'repeater')", pk2)
|
||||
decoded2 := `{"name":"NewNode","pubKey":"` + pk2 + `"}`
|
||||
recentTime := time.Now().UTC().Format("2006-01-02T15:04:05.000Z")
|
||||
for i := 0; i < 3; i++ {
|
||||
tx := &StoreTx{
|
||||
ID: 6100 + i,
|
||||
RawHex: "11" + "41" + "aabb",
|
||||
Hash: "new-" + strconv.Itoa(i),
|
||||
FirstSeen: recentTime,
|
||||
PayloadType: &payloadType,
|
||||
DecodedJSON: decoded2,
|
||||
}
|
||||
store.packets = append(store.packets, tx)
|
||||
store.byPayloadType[4] = append(store.byPayloadType[4], tx)
|
||||
}
|
||||
|
||||
// Invalidate cache before second call
|
||||
store.hashSizeInfoMu.Lock()
|
||||
store.hashSizeInfoCache = nil
|
||||
store.hashSizeInfoMu.Unlock()
|
||||
|
||||
info2 := store.GetNodeHashSizeInfo()
|
||||
ni2 := info2[pk2]
|
||||
if ni2 == nil {
|
||||
t.Error("recent adverts should be included in hash size info")
|
||||
}
|
||||
}
|
||||
|
||||
func TestObserverAnalyticsNoStore(t *testing.T) {
|
||||
_, router := setupNoStoreServer(t)
|
||||
req := httptest.NewRequest("GET", "/api/observers/obs1/analytics", nil)
|
||||
@@ -3059,11 +3275,11 @@ func TestHashCollisionsWithCollision(t *testing.T) {
|
||||
now := time.Now().UTC()
|
||||
recent := now.Add(-1 * time.Hour).Format(time.RFC3339)
|
||||
|
||||
// Two nodes with same first byte 'CC', no adverts so hash_size=0 (included in all buckets)
|
||||
// Two repeater nodes with same first byte 'CC' and hash_size=1
|
||||
db.conn.Exec(`INSERT INTO nodes (public_key, name, role, lat, lon, last_seen, first_seen, advert_count)
|
||||
VALUES ('CC11223344556677', 'Node1', 'repeater', 37.5, -122.0, ?, '2026-01-01T00:00:00Z', 0)`, recent)
|
||||
VALUES ('CC11223344556677', 'Node1', 'repeater', 37.5, -122.0, ?, '2026-01-01T00:00:00Z', 5)`, recent)
|
||||
db.conn.Exec(`INSERT INTO nodes (public_key, name, role, lat, lon, last_seen, first_seen, advert_count)
|
||||
VALUES ('CC99887766554433', 'Node2', 'repeater', 37.51, -122.01, ?, '2026-01-01T00:00:00Z', 0)`, recent)
|
||||
VALUES ('CC99887766554433', 'Node2', 'repeater', 37.51, -122.01, ?, '2026-01-01T00:00:00Z', 5)`, recent)
|
||||
|
||||
cfg := &Config{Port: 3000}
|
||||
hub := NewHub()
|
||||
@@ -3072,6 +3288,14 @@ func TestHashCollisionsWithCollision(t *testing.T) {
|
||||
if err := store.Load(); err != nil {
|
||||
t.Fatalf("store.Load failed: %v", err)
|
||||
}
|
||||
// Inject hash_size=1 for both nodes so they appear in the 1-byte bucket
|
||||
store.hashSizeInfoMu.Lock()
|
||||
store.hashSizeInfoCache = map[string]*hashSizeNodeInfo{
|
||||
"CC11223344556677": {HashSize: 1, AllSizes: map[int]bool{1: true}},
|
||||
"CC99887766554433": {HashSize: 1, AllSizes: map[int]bool{1: true}},
|
||||
}
|
||||
store.hashSizeInfoAt = time.Now()
|
||||
store.hashSizeInfoMu.Unlock()
|
||||
srv.store = store
|
||||
router := mux.NewRouter()
|
||||
srv.RegisterRoutes(router)
|
||||
@@ -3186,3 +3410,214 @@ func TestHashCollisionsMissingCoordinates(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestHashCollisionsOnlyRepeaters verifies that only repeater nodes
|
||||
// are included in collision analysis. Companions, rooms, sensors, and
|
||||
// hash_size==0 nodes are excluded — per firmware analysis, only repeaters
|
||||
// forward packets and appear in path[] arrays. (#441)
|
||||
func TestHashCollisionsOnlyRepeaters(t *testing.T) {
|
||||
db := setupTestDB(t)
|
||||
|
||||
// Insert nodes sharing the same 1-byte prefix "AA":
|
||||
// 1. repeater with hash_size=1 → should be counted
|
||||
// 2. repeater with hash_size=0 (unknown) → should be excluded
|
||||
// 3. companion with hash_size=1 → should be excluded
|
||||
// 4. room with hash_size=1 → should be excluded
|
||||
// 5. sensor with hash_size=1 → should be excluded
|
||||
now := time.Now().Format("2006-01-02 15:04:05")
|
||||
db.conn.Exec(`INSERT INTO nodes (public_key, name, role, last_seen) VALUES
|
||||
('aa11223344556677', 'Repeater1', 'repeater', ?),
|
||||
('aa99887766554433', 'UnknownNode', 'repeater', ?),
|
||||
('aadeadbeefcafe01', 'Companion1', 'companion', ?),
|
||||
('aabbcc1122334455', 'Room1', 'room', ?),
|
||||
('aabbcc9988776655', 'Sensor1', 'sensor', ?)`, now, now, now, now, now)
|
||||
|
||||
// We also need a second repeater with hash_size=1 and same prefix to
|
||||
// confirm that genuine collisions ARE still detected.
|
||||
db.conn.Exec(`INSERT INTO nodes (public_key, name, role, last_seen) VALUES
|
||||
('aa00112233445566', 'Repeater2', 'repeater', ?)`, now)
|
||||
|
||||
cfg := &Config{Port: 3000}
|
||||
hub := NewHub()
|
||||
srv := NewServer(db, cfg, hub)
|
||||
store := NewPacketStore(db, nil)
|
||||
store.Load()
|
||||
srv.store = store
|
||||
|
||||
// Inject hash size info directly into the cache
|
||||
store.hashSizeInfoMu.Lock()
|
||||
store.hashSizeInfoCache = map[string]*hashSizeNodeInfo{
|
||||
"aa11223344556677": {HashSize: 1, AllSizes: map[int]bool{1: true}},
|
||||
"aa00112233445566": {HashSize: 1, AllSizes: map[int]bool{1: true}},
|
||||
"aa99887766554433": {HashSize: 0, AllSizes: map[int]bool{}}, // unknown
|
||||
"aadeadbeefcafe01": {HashSize: 1, AllSizes: map[int]bool{1: true}}, // companion
|
||||
"aabbcc1122334455": {HashSize: 1, AllSizes: map[int]bool{1: true}}, // room
|
||||
"aabbcc9988776655": {HashSize: 1, AllSizes: map[int]bool{1: true}}, // sensor
|
||||
}
|
||||
store.hashSizeInfoAt = time.Now()
|
||||
store.hashSizeInfoMu.Unlock()
|
||||
|
||||
result := store.computeHashCollisions("")
|
||||
|
||||
bySize, ok := result["by_size"].(map[string]interface{})
|
||||
if !ok {
|
||||
t.Fatal("missing by_size")
|
||||
}
|
||||
|
||||
size1, ok := bySize["1"].(map[string]interface{})
|
||||
if !ok {
|
||||
t.Fatal("missing by_size[1]")
|
||||
}
|
||||
|
||||
stats, ok := size1["stats"].(map[string]interface{})
|
||||
if !ok {
|
||||
t.Fatal("missing stats")
|
||||
}
|
||||
|
||||
// Only Repeater1 and Repeater2 should be in nodesForByte (hash_size=1, role=repeater).
|
||||
// UnknownNode (hash_size=0), Companion1, Room1, Sensor1 must all be excluded.
|
||||
nodesForByte := stats["nodes_for_byte"]
|
||||
if nodesForByte != 2 {
|
||||
t.Errorf("expected nodes_for_byte=2 (only repeaters with hash_size=1), got %v", nodesForByte)
|
||||
}
|
||||
|
||||
// They share prefix "AA", so there should be exactly 1 collision entry.
|
||||
collisions, ok := size1["collisions"].([]collisionEntry)
|
||||
if !ok {
|
||||
t.Fatalf("collisions is not []collisionEntry")
|
||||
}
|
||||
if len(collisions) != 1 {
|
||||
t.Errorf("expected 1 collision entry, got %d", len(collisions))
|
||||
}
|
||||
if len(collisions) == 1 && len(collisions[0].Nodes) != 2 {
|
||||
t.Errorf("expected 2 nodes in collision, got %d", len(collisions[0].Nodes))
|
||||
}
|
||||
}
|
||||
|
||||
func TestNodePathsEndpointUsesIndex(t *testing.T) {
|
||||
srv, router := setupTestServer(t)
|
||||
|
||||
// Verify byPathHop index was built during Load
|
||||
srv.store.mu.RLock()
|
||||
hopKeys := len(srv.store.byPathHop)
|
||||
srv.store.mu.RUnlock()
|
||||
if hopKeys == 0 {
|
||||
t.Fatal("byPathHop index is empty after Load")
|
||||
}
|
||||
|
||||
// Query paths for TestRepeater (pubkey aabbccdd11223344, prefix "aa")
|
||||
// Should find transmissions with hop "aa" in path
|
||||
req := httptest.NewRequest("GET", "/api/nodes/aabbccdd11223344/paths", nil)
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != 200 {
|
||||
t.Fatalf("expected 200, got %d: %s", w.Code, w.Body.String())
|
||||
}
|
||||
|
||||
var resp struct {
|
||||
Paths []json.RawMessage `json:"paths"`
|
||||
TotalTransmissions int `json:"totalTransmissions"`
|
||||
}
|
||||
if err := json.Unmarshal(w.Body.Bytes(), &resp); err != nil {
|
||||
t.Fatalf("bad JSON: %v", err)
|
||||
}
|
||||
|
||||
// Transmission 1 has path ["aa","bb"] which contains "aa" matching prefix of aabbccdd11223344
|
||||
if resp.TotalTransmissions == 0 {
|
||||
t.Error("expected at least 1 transmission matching node paths")
|
||||
}
|
||||
if len(resp.Paths) == 0 {
|
||||
t.Error("expected at least 1 path group")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPathHopIndexIncrementalUpdate(t *testing.T) {
|
||||
// Test that addTxToPathHopIndex and removeTxFromPathHopIndex work correctly
|
||||
idx := make(map[string][]*StoreTx)
|
||||
|
||||
pk1 := "fullpubkey1"
|
||||
tx1 := &StoreTx{
|
||||
ID: 1,
|
||||
PathJSON: `["ab","cd"]`,
|
||||
ResolvedPath: []*string{&pk1, nil},
|
||||
}
|
||||
|
||||
addTxToPathHopIndex(idx, tx1)
|
||||
|
||||
// Should be indexed under "ab", "cd", and "fullpubkey1"
|
||||
if len(idx["ab"]) != 1 {
|
||||
t.Errorf("expected 1 entry for 'ab', got %d", len(idx["ab"]))
|
||||
}
|
||||
if len(idx["cd"]) != 1 {
|
||||
t.Errorf("expected 1 entry for 'cd', got %d", len(idx["cd"]))
|
||||
}
|
||||
if len(idx["fullpubkey1"]) != 1 {
|
||||
t.Errorf("expected 1 entry for resolved pubkey, got %d", len(idx["fullpubkey1"]))
|
||||
}
|
||||
|
||||
// Add another tx with overlapping hop
|
||||
tx2 := &StoreTx{
|
||||
ID: 2,
|
||||
PathJSON: `["ab","ef"]`,
|
||||
}
|
||||
addTxToPathHopIndex(idx, tx2)
|
||||
|
||||
if len(idx["ab"]) != 2 {
|
||||
t.Errorf("expected 2 entries for 'ab', got %d", len(idx["ab"]))
|
||||
}
|
||||
if len(idx["ef"]) != 1 {
|
||||
t.Errorf("expected 1 entry for 'ef', got %d", len(idx["ef"]))
|
||||
}
|
||||
|
||||
// Remove tx1
|
||||
removeTxFromPathHopIndex(idx, tx1)
|
||||
|
||||
if len(idx["ab"]) != 1 {
|
||||
t.Errorf("expected 1 entry for 'ab' after removal, got %d", len(idx["ab"]))
|
||||
}
|
||||
if _, ok := idx["cd"]; ok {
|
||||
t.Error("expected 'cd' key to be deleted after removal")
|
||||
}
|
||||
if _, ok := idx["fullpubkey1"]; ok {
|
||||
t.Error("expected resolved pubkey key to be deleted after removal")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMetricsAPIEndpoints(t *testing.T) {
|
||||
srv, router := setupTestServer(t)
|
||||
|
||||
now := time.Now().UTC()
|
||||
t1 := now.Add(-1 * time.Hour).Format(time.RFC3339)
|
||||
|
||||
srv.db.conn.Exec("INSERT INTO observer_metrics (observer_id, timestamp, noise_floor) VALUES (?, ?, ?)",
|
||||
"obs1", t1, -112.0)
|
||||
|
||||
// Test /api/observers/obs1/metrics
|
||||
req := httptest.NewRequest("GET", "/api/observers/obs1/metrics", nil)
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
if w.Code != 200 {
|
||||
t.Fatalf("GET /api/observers/obs1/metrics = %d, want 200", w.Code)
|
||||
}
|
||||
var resp map[string]interface{}
|
||||
json.Unmarshal(w.Body.Bytes(), &resp)
|
||||
metrics, ok := resp["metrics"].([]interface{})
|
||||
if !ok || len(metrics) != 1 {
|
||||
t.Errorf("expected 1 metric in response, got %v", resp["metrics"])
|
||||
}
|
||||
|
||||
// Test /api/observers/metrics/summary
|
||||
req2 := httptest.NewRequest("GET", "/api/observers/metrics/summary?window=24h", nil)
|
||||
w2 := httptest.NewRecorder()
|
||||
router.ServeHTTP(w2, req2)
|
||||
if w2.Code != 200 {
|
||||
t.Fatalf("GET /api/observers/metrics/summary = %d, want 200", w2.Code)
|
||||
}
|
||||
var resp2 map[string]interface{}
|
||||
json.Unmarshal(w2.Body.Bytes(), &resp2)
|
||||
observers, ok := resp2["observers"].([]interface{})
|
||||
if !ok || len(observers) != 1 {
|
||||
t.Errorf("expected 1 observer in summary, got %v", resp2["observers"])
|
||||
}
|
||||
}
|
||||
|
||||
+1063
-427
File diff suppressed because it is too large
Load Diff
@@ -240,6 +240,7 @@ type TransmissionResp struct {
|
||||
SNR interface{} `json:"snr"`
|
||||
RSSI interface{} `json:"rssi"`
|
||||
PathJSON interface{} `json:"path_json"`
|
||||
ResolvedPath []*string `json:"resolved_path,omitempty"`
|
||||
Direction interface{} `json:"direction"`
|
||||
Score interface{} `json:"score,omitempty"`
|
||||
Observations []ObservationResp `json:"observations,omitempty"`
|
||||
@@ -254,6 +255,7 @@ type ObservationResp struct {
|
||||
SNR interface{} `json:"snr"`
|
||||
RSSI interface{} `json:"rssi"`
|
||||
PathJSON interface{} `json:"path_json"`
|
||||
ResolvedPath []*string `json:"resolved_path,omitempty"`
|
||||
Timestamp interface{} `json:"timestamp"`
|
||||
}
|
||||
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
corescope-tui
|
||||
@@ -0,0 +1,30 @@
|
||||
module github.com/corescope/tui
|
||||
|
||||
go 1.22
|
||||
|
||||
require (
|
||||
github.com/charmbracelet/bubbletea v1.3.4
|
||||
github.com/charmbracelet/lipgloss v1.1.0
|
||||
github.com/gorilla/websocket v1.5.3
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect
|
||||
github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc // indirect
|
||||
github.com/charmbracelet/x/ansi v0.8.0 // indirect
|
||||
github.com/charmbracelet/x/cellbuf v0.0.13-0.20250311204145-2c3ea96c31dd // indirect
|
||||
github.com/charmbracelet/x/term v0.2.1 // indirect
|
||||
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f // indirect
|
||||
github.com/lucasb-eyer/go-colorful v1.2.0 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/mattn/go-localereader v0.0.1 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.16 // indirect
|
||||
github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 // indirect
|
||||
github.com/muesli/cancelreader v0.2.2 // indirect
|
||||
github.com/muesli/termenv v0.16.0 // indirect
|
||||
github.com/rivo/uniseg v0.4.7 // indirect
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
|
||||
golang.org/x/sync v0.11.0 // indirect
|
||||
golang.org/x/sys v0.30.0 // indirect
|
||||
golang.org/x/text v0.3.8 // indirect
|
||||
)
|
||||
@@ -0,0 +1,47 @@
|
||||
github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k=
|
||||
github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8=
|
||||
github.com/charmbracelet/bubbletea v1.3.4 h1:kCg7B+jSCFPLYRA52SDZjr51kG/fMUEoPoZrkaDHyoI=
|
||||
github.com/charmbracelet/bubbletea v1.3.4/go.mod h1:dtcUCyCGEX3g9tosuYiut3MXgY/Jsv9nKVdibKKRRXo=
|
||||
github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc h1:4pZI35227imm7yK2bGPcfpFEmuY1gc2YSTShr4iJBfs=
|
||||
github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc/go.mod h1:X4/0JoqgTIPSFcRA/P6INZzIuyqdFY5rm8tb41s9okk=
|
||||
github.com/charmbracelet/lipgloss v1.1.0 h1:vYXsiLHVkK7fp74RkV7b2kq9+zDLoEU4MZoFqR/noCY=
|
||||
github.com/charmbracelet/lipgloss v1.1.0/go.mod h1:/6Q8FR2o+kj8rz4Dq0zQc3vYf7X+B0binUUBwA0aL30=
|
||||
github.com/charmbracelet/x/ansi v0.8.0 h1:9GTq3xq9caJW8ZrBTe0LIe2fvfLR/bYXKTx2llXn7xE=
|
||||
github.com/charmbracelet/x/ansi v0.8.0/go.mod h1:wdYl/ONOLHLIVmQaxbIYEC/cRKOQyjTkowiI4blgS9Q=
|
||||
github.com/charmbracelet/x/cellbuf v0.0.13-0.20250311204145-2c3ea96c31dd h1:vy0GVL4jeHEwG5YOXDmi86oYw2yuYUGqz6a8sLwg0X8=
|
||||
github.com/charmbracelet/x/cellbuf v0.0.13-0.20250311204145-2c3ea96c31dd/go.mod h1:xe0nKWGd3eJgtqZRaN9RjMtK7xUYchjzPr7q6kcvCCs=
|
||||
github.com/charmbracelet/x/term v0.2.1 h1:AQeHeLZ1OqSXhrAWpYUtZyX1T3zVxfpZuEQMIQaGIAQ=
|
||||
github.com/charmbracelet/x/term v0.2.1/go.mod h1:oQ4enTYFV7QN4m0i9mzHrViD7TQKvNEEkHUMCmsxdUg=
|
||||
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f h1:Y/CXytFA4m6baUTXGLOoWe4PQhGxaX0KpnayAqC48p4=
|
||||
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f/go.mod h1:vw97MGsxSvLiUE2X8qFplwetxpGLQrlU1Q9AUEIzCaM=
|
||||
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
|
||||
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY=
|
||||
github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-localereader v0.0.1 h1:ygSAOl7ZXTx4RdPYinUpg6W99U8jWvWi9Ye2JC/oIi4=
|
||||
github.com/mattn/go-localereader v0.0.1/go.mod h1:8fBrzywKY7BI3czFoHkuzRoWE9C+EiG4R1k4Cjx5p88=
|
||||
github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
|
||||
github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 h1:ZK8zHtRHOkbHy6Mmr5D264iyp3TiX5OmNcI5cIARiQI=
|
||||
github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6/go.mod h1:CJlz5H+gyd6CUWT45Oy4q24RdLyn7Md9Vj2/ldJBSIo=
|
||||
github.com/muesli/cancelreader v0.2.2 h1:3I4Kt4BQjOR54NavqnDogx/MIoWBFa0StPA8ELUXHmA=
|
||||
github.com/muesli/cancelreader v0.2.2/go.mod h1:3XuTXfFS2VjM+HTLZY9Ak0l6eUKfijIfMUZ4EgX0QYo=
|
||||
github.com/muesli/termenv v0.16.0 h1:S5AlUN9dENB57rsbnkPyfdGuWIlkmzJjbFf0Tf5FWUc=
|
||||
github.com/muesli/termenv v0.16.0/go.mod h1:ZRfOIKPFDYQoDFF4Olj7/QJbW60Ol/kL1pU3VfY/Cnk=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
|
||||
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavMF/ppJZNG9ZpyihvCd0w101no=
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM=
|
||||
golang.org/x/exp v0.0.0-20220909182711-5c715a9e8561 h1:MDc5xs78ZrZr3HMQugiXOAkSZtfTpbJLDr/lwfgO53E=
|
||||
golang.org/x/exp v0.0.0-20220909182711-5c715a9e8561/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE=
|
||||
golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w=
|
||||
golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
|
||||
golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/text v0.3.8 h1:nAL+RVCQ9uMn3vJZbV+MRnydTJFPf8qqY42YiA6MrqY=
|
||||
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
|
||||
+696
@@ -0,0 +1,696 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
tea "github.com/charmbracelet/bubbletea"
|
||||
"github.com/charmbracelet/lipgloss"
|
||||
"github.com/gorilla/websocket"
|
||||
)
|
||||
|
||||
// --- Data types ---
|
||||
|
||||
type ObserverSummary struct {
|
||||
ObserverID string `json:"id"`
|
||||
ObserverName *string `json:"name"`
|
||||
NoiseFloor *float64 `json:"noise_floor"`
|
||||
BatteryMv *int `json:"battery_mv"`
|
||||
PacketCount int `json:"packet_count"`
|
||||
LastSeen string `json:"last_seen"`
|
||||
}
|
||||
|
||||
type Packet struct {
|
||||
Timestamp string
|
||||
Type string
|
||||
ObserverName string
|
||||
Hops string
|
||||
RSSI string
|
||||
SNR string
|
||||
ChannelText string
|
||||
}
|
||||
|
||||
// --- Messages ---
|
||||
|
||||
type summaryMsg []ObserverSummary
|
||||
type summaryErrMsg struct{ err error }
|
||||
type packetMsg Packet
|
||||
type wsStatusMsg string
|
||||
type tickMsg time.Time
|
||||
type renderTickMsg time.Time
|
||||
|
||||
// --- Model ---
|
||||
|
||||
type view int
|
||||
|
||||
const (
|
||||
viewDashboard view = iota
|
||||
viewLiveFeed
|
||||
)
|
||||
|
||||
// ringBufferMax is the maximum number of packets kept in the live feed.
|
||||
const ringBufferMax = 500
|
||||
|
||||
type model struct {
|
||||
baseURL string
|
||||
currentView view
|
||||
width int
|
||||
height int
|
||||
|
||||
// Dashboard
|
||||
observers []ObserverSummary
|
||||
lastRefresh time.Time
|
||||
fetchErr error
|
||||
|
||||
// Live feed — ring buffer with head/tail indices, no allocations in steady state.
|
||||
ringBuf [ringBufferMax]Packet
|
||||
ringHead int // index of oldest element
|
||||
ringLen int // number of elements in the buffer
|
||||
dirty bool // true when new data arrived since last render tick
|
||||
// wsMsgChan multiplexes packets and status updates from the WS goroutine
|
||||
// into the bubbletea event loop.
|
||||
wsMsgChan chan tea.Msg
|
||||
wsStatus string
|
||||
wsDone chan struct{}
|
||||
wsCloseOnce sync.Once
|
||||
}
|
||||
|
||||
func initialModel(baseURL string) model {
|
||||
return model{
|
||||
baseURL: strings.TrimRight(baseURL, "/"),
|
||||
wsStatus: "disconnected",
|
||||
wsMsgChan: make(chan tea.Msg, 100),
|
||||
wsDone: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
// --- Styles ---
|
||||
|
||||
var (
|
||||
titleStyle = lipgloss.NewStyle().Bold(true).Foreground(lipgloss.Color("69"))
|
||||
greenStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("42"))
|
||||
yellowStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("226"))
|
||||
redStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("196"))
|
||||
dimStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("241"))
|
||||
statusStyle = lipgloss.NewStyle().Background(lipgloss.Color("236")).Foreground(lipgloss.Color("252")).Padding(0, 1)
|
||||
tabActive = lipgloss.NewStyle().Bold(true).Foreground(lipgloss.Color("69")).Underline(true)
|
||||
tabInactive = lipgloss.NewStyle().Foreground(lipgloss.Color("241"))
|
||||
headerStyle = lipgloss.NewStyle().Bold(true).Foreground(lipgloss.Color("252"))
|
||||
)
|
||||
|
||||
// --- Commands ---
|
||||
|
||||
func fetchSummary(baseURL string) tea.Cmd {
|
||||
return func() tea.Msg {
|
||||
client := &http.Client{Timeout: 10 * time.Second}
|
||||
resp, err := client.Get(baseURL + "/api/observers")
|
||||
if err != nil {
|
||||
return summaryErrMsg{err}
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
body, err := io.ReadAll(io.LimitReader(resp.Body, 1<<20))
|
||||
if err != nil {
|
||||
return summaryErrMsg{err}
|
||||
}
|
||||
// The API returns {"observers": [...]}
|
||||
var wrapper struct {
|
||||
Observers []ObserverSummary `json:"observers"`
|
||||
}
|
||||
if err := json.Unmarshal(body, &wrapper); err != nil {
|
||||
return summaryErrMsg{fmt.Errorf("json: %w (body: %.100s)", err, string(body))}
|
||||
}
|
||||
return summaryMsg(wrapper.Observers)
|
||||
}
|
||||
}
|
||||
|
||||
func tickEvery(d time.Duration) tea.Cmd {
|
||||
return tea.Tick(d, func(t time.Time) tea.Msg {
|
||||
return tickMsg(t)
|
||||
})
|
||||
}
|
||||
|
||||
// renderTick fires every 16ms (~60fps) to coalesce packet renders.
|
||||
func renderTick() tea.Cmd {
|
||||
return tea.Tick(16*time.Millisecond, func(t time.Time) tea.Msg {
|
||||
return renderTickMsg(t)
|
||||
})
|
||||
}
|
||||
|
||||
// listenForWSMsg waits for the next message from the WebSocket goroutine and
|
||||
// delivers it into the bubbletea event loop. Returns nil when the channel is
|
||||
// closed (program shutting down).
|
||||
func listenForWSMsg(ch <-chan tea.Msg) tea.Cmd {
|
||||
return func() tea.Msg {
|
||||
msg, ok := <-ch
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
return msg
|
||||
}
|
||||
}
|
||||
|
||||
// --- WebSocket goroutine ---
|
||||
|
||||
// connectWS manages the WebSocket connection with exponential backoff reconnect.
|
||||
// It sends packetMsg and wsStatusMsg on msgChan. It returns when done is closed.
|
||||
func connectWS(baseURL string, msgChan chan<- tea.Msg, done <-chan struct{}) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
select {
|
||||
case msgChan <- wsStatusMsg(fmt.Sprintf("panic: %v", r)):
|
||||
default:
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
u, err := url.Parse(baseURL)
|
||||
if err != nil {
|
||||
select {
|
||||
case msgChan <- wsStatusMsg("invalid url"):
|
||||
case <-done:
|
||||
}
|
||||
return
|
||||
}
|
||||
scheme := "ws"
|
||||
if u.Scheme == "https" {
|
||||
scheme = "wss"
|
||||
}
|
||||
wsURL := scheme + "://" + u.Host + "/ws"
|
||||
|
||||
backoff := time.Second
|
||||
maxBackoff := 30 * time.Second
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-done:
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
sendStatus(msgChan, done, "connecting...")
|
||||
conn, _, err := websocket.DefaultDialer.Dial(wsURL, nil)
|
||||
if err != nil {
|
||||
sendStatus(msgChan, done, fmt.Sprintf("error: %v", err))
|
||||
select {
|
||||
case <-done:
|
||||
return
|
||||
case <-time.After(backoff):
|
||||
}
|
||||
backoff = time.Duration(math.Min(float64(backoff)*2, float64(maxBackoff)))
|
||||
continue
|
||||
}
|
||||
|
||||
sendStatus(msgChan, done, "connected")
|
||||
backoff = time.Second
|
||||
|
||||
// readLoop reads messages until error or done.
|
||||
// Ping/pong keepalive detects dead connections faster than relying on
|
||||
// read deadline alone. We send pings every 30s; the pong handler resets
|
||||
// the read deadline to 60s. If no pong arrives, ReadMessage times out.
|
||||
func() {
|
||||
defer conn.Close()
|
||||
|
||||
conn.SetReadDeadline(time.Now().Add(60 * time.Second))
|
||||
conn.SetPongHandler(func(string) error {
|
||||
conn.SetReadDeadline(time.Now().Add(60 * time.Second))
|
||||
return nil
|
||||
})
|
||||
|
||||
// Periodic ping goroutine
|
||||
pingDone := make(chan struct{})
|
||||
defer close(pingDone)
|
||||
go func() {
|
||||
ticker := time.NewTicker(30 * time.Second)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
if err := conn.WriteMessage(websocket.PingMessage, nil); err != nil {
|
||||
return
|
||||
}
|
||||
case <-pingDone:
|
||||
return
|
||||
case <-done:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-done:
|
||||
// Send a graceful close frame before returning.
|
||||
_ = conn.WriteMessage(
|
||||
websocket.CloseMessage,
|
||||
websocket.FormatCloseMessage(websocket.CloseNormalClosure, ""),
|
||||
)
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
// ReadMessage blocks until data arrives or the 60s read deadline
|
||||
// expires. The pong handler resets the deadline on each pong.
|
||||
// On timeout (dead connection), we break out and reconnect.
|
||||
// We don't set a per-read deadline here — the pong handler and
|
||||
// initial SetReadDeadline above manage it.
|
||||
_, message, err := conn.ReadMessage()
|
||||
if err != nil {
|
||||
if websocket.IsCloseError(err, websocket.CloseNormalClosure) {
|
||||
sendStatus(msgChan, done, "disconnected")
|
||||
return
|
||||
}
|
||||
// Timeout is expected — just loop back to check done.
|
||||
if netErr, ok := err.(*websocket.CloseError); ok {
|
||||
sendStatus(msgChan, done, fmt.Sprintf("closed: %d", netErr.Code))
|
||||
return
|
||||
}
|
||||
if isTimeoutError(err) {
|
||||
continue
|
||||
}
|
||||
sendStatus(msgChan, done, "disconnected")
|
||||
return
|
||||
}
|
||||
|
||||
pkt := parseWSMessage(message)
|
||||
if pkt != nil {
|
||||
select {
|
||||
case msgChan <- packetMsg(*pkt):
|
||||
case <-done:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
// sendStatus sends a wsStatusMsg, respecting cancellation.
|
||||
func sendStatus(msgChan chan<- tea.Msg, done <-chan struct{}, status string) {
|
||||
select {
|
||||
case msgChan <- wsStatusMsg(status):
|
||||
case <-done:
|
||||
}
|
||||
}
|
||||
|
||||
// isTimeoutError checks if an error is a network timeout (read deadline exceeded).
|
||||
func isTimeoutError(err error) bool {
|
||||
// net.Error has a Timeout() method.
|
||||
type timeout interface {
|
||||
Timeout() bool
|
||||
}
|
||||
if t, ok := err.(timeout); ok {
|
||||
return t.Timeout()
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// parseWSMessage parses a WebSocket broadcast frame.
|
||||
// The server sends: {"type":"packet","data":{...}} where data contains
|
||||
// top-level fields (observer_name, rssi, snr, timestamp, ...) plus
|
||||
// nested "decoded" (with header.payloadTypeName, payload) and "packet".
|
||||
func parseWSMessage(data []byte) *Packet {
|
||||
var envelope map[string]interface{}
|
||||
if err := json.Unmarshal(data, &envelope); err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unwrap the {"type":"packet","data":{...}} envelope
|
||||
if t, _ := envelope["type"].(string); t != "packet" {
|
||||
return nil // ignore non-packet messages (e.g. "status")
|
||||
}
|
||||
msg, ok := envelope["data"].(map[string]interface{})
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
pkt := &Packet{}
|
||||
|
||||
// Timestamp — prefer top-level, fall back to nested packet
|
||||
if ts, ok := msg["timestamp"].(string); ok {
|
||||
if t, err := time.Parse(time.RFC3339, ts); err == nil {
|
||||
pkt.Timestamp = t.Format("15:04:05")
|
||||
} else if len(ts) >= 8 {
|
||||
pkt.Timestamp = ts[:8]
|
||||
} else {
|
||||
pkt.Timestamp = ts
|
||||
}
|
||||
}
|
||||
if pkt.Timestamp == "" {
|
||||
pkt.Timestamp = time.Now().Format("15:04:05")
|
||||
}
|
||||
|
||||
// Type — from decoded.header.payloadTypeName (matches live.js)
|
||||
if decoded, ok := msg["decoded"].(map[string]interface{}); ok {
|
||||
if header, ok := decoded["header"].(map[string]interface{}); ok {
|
||||
if t, ok := header["payloadTypeName"].(string); ok {
|
||||
pkt.Type = t
|
||||
}
|
||||
}
|
||||
}
|
||||
if pkt.Type == "" {
|
||||
pkt.Type = "UNKNOWN"
|
||||
}
|
||||
|
||||
// Observer name
|
||||
if name, ok := msg["observer_name"].(string); ok {
|
||||
pkt.ObserverName = name
|
||||
} else if id, ok := msg["observer_id"].(string); ok {
|
||||
pkt.ObserverName = safePrefix(id, 8)
|
||||
}
|
||||
|
||||
// Hops — from decoded.payload.hops or path
|
||||
if decoded, ok := msg["decoded"].(map[string]interface{}); ok {
|
||||
if payload, ok := decoded["payload"].(map[string]interface{}); ok {
|
||||
if hops, ok := payload["hops"].(float64); ok {
|
||||
pkt.Hops = fmt.Sprintf("%d", int(hops))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// RSSI / SNR — top-level fields
|
||||
if rssi, ok := msg["rssi"].(float64); ok {
|
||||
pkt.RSSI = fmt.Sprintf("%.0f", rssi)
|
||||
}
|
||||
if snr, ok := msg["snr"].(float64); ok {
|
||||
pkt.SNR = fmt.Sprintf("%.1f", snr)
|
||||
}
|
||||
|
||||
// Channel text — from decoded.payload
|
||||
if decoded, ok := msg["decoded"].(map[string]interface{}); ok {
|
||||
if payload, ok := decoded["payload"].(map[string]interface{}); ok {
|
||||
ch := ""
|
||||
if name, ok := payload["channel_name"].(string); ok {
|
||||
ch = "#" + name
|
||||
}
|
||||
if text, ok := payload["text"].(string); ok {
|
||||
if ch != "" {
|
||||
pkt.ChannelText = ch + " " + truncate(text, 40)
|
||||
} else {
|
||||
pkt.ChannelText = truncate(text, 40)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return pkt
|
||||
}
|
||||
|
||||
func truncate(s string, n int) string {
|
||||
runes := []rune(s)
|
||||
if len(runes) <= n {
|
||||
return s
|
||||
}
|
||||
return string(runes[:n-1]) + "…"
|
||||
}
|
||||
|
||||
// safePrefix returns the first n characters of s (rune-aware), or s if shorter.
|
||||
func safePrefix(s string, n int) string {
|
||||
runes := []rune(s)
|
||||
if len(runes) <= n {
|
||||
return s
|
||||
}
|
||||
return string(runes[:n])
|
||||
}
|
||||
|
||||
// --- Init / Update / View ---
|
||||
|
||||
func (m model) Init() tea.Cmd {
|
||||
go connectWS(m.baseURL, m.wsMsgChan, m.wsDone)
|
||||
|
||||
return tea.Batch(
|
||||
fetchSummary(m.baseURL),
|
||||
tickEvery(5*time.Second),
|
||||
listenForWSMsg(m.wsMsgChan),
|
||||
renderTick(),
|
||||
)
|
||||
}
|
||||
|
||||
func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
||||
switch msg := msg.(type) {
|
||||
case tea.KeyMsg:
|
||||
switch msg.String() {
|
||||
case "q", "ctrl+c":
|
||||
m.wsCloseOnce.Do(func() { close(m.wsDone) })
|
||||
return m, tea.Quit
|
||||
case "tab", "1":
|
||||
if m.currentView == viewDashboard {
|
||||
m.currentView = viewLiveFeed
|
||||
} else {
|
||||
m.currentView = viewDashboard
|
||||
}
|
||||
case "2":
|
||||
m.currentView = viewLiveFeed
|
||||
}
|
||||
|
||||
case tea.WindowSizeMsg:
|
||||
m.width = msg.Width
|
||||
m.height = msg.Height
|
||||
|
||||
case summaryMsg:
|
||||
m.observers = []ObserverSummary(msg)
|
||||
// Pre-sort by worst noise floor (highest = worst) so View doesn't sort on every render.
|
||||
sort.Slice(m.observers, func(i, j int) bool {
|
||||
return nfVal(m.observers[i].NoiseFloor) > nfVal(m.observers[j].NoiseFloor)
|
||||
})
|
||||
m.lastRefresh = time.Now()
|
||||
m.fetchErr = nil
|
||||
|
||||
case summaryErrMsg:
|
||||
m.fetchErr = msg.err
|
||||
|
||||
case tickMsg:
|
||||
return m, tea.Batch(
|
||||
fetchSummary(m.baseURL),
|
||||
tickEvery(5*time.Second),
|
||||
listenForWSMsg(m.wsMsgChan),
|
||||
)
|
||||
|
||||
case wsStatusMsg:
|
||||
m.wsStatus = string(msg)
|
||||
return m, listenForWSMsg(m.wsMsgChan)
|
||||
|
||||
case packetMsg:
|
||||
p := Packet(msg)
|
||||
// Ring buffer: write at (head+len) % cap, no allocations.
|
||||
if m.ringLen < ringBufferMax {
|
||||
m.ringBuf[(m.ringHead+m.ringLen)%ringBufferMax] = p
|
||||
m.ringLen++
|
||||
} else {
|
||||
// Overwrite oldest, advance head.
|
||||
m.ringBuf[m.ringHead] = p
|
||||
m.ringHead = (m.ringHead + 1) % ringBufferMax
|
||||
}
|
||||
m.dirty = true
|
||||
return m, listenForWSMsg(m.wsMsgChan)
|
||||
|
||||
case renderTickMsg:
|
||||
// 60fps render coalescing: bubbletea re-renders when Update returns.
|
||||
// By ticking at 16ms, we batch all packets that arrived between ticks
|
||||
// into a single View() call instead of re-rendering per packet.
|
||||
if m.dirty {
|
||||
m.dirty = false
|
||||
}
|
||||
return m, renderTick()
|
||||
}
|
||||
|
||||
// Always keep the WS listener running, even for unhandled messages.
|
||||
return m, listenForWSMsg(m.wsMsgChan)
|
||||
}
|
||||
|
||||
func (m model) View() string {
|
||||
var b strings.Builder
|
||||
|
||||
// Title
|
||||
b.WriteString(titleStyle.Render("🍄 CoreScope TUI"))
|
||||
b.WriteString("\n")
|
||||
|
||||
// Tabs
|
||||
dash := tabInactive.Render("[1:Dashboard]")
|
||||
live := tabInactive.Render("[2:Live Feed]")
|
||||
if m.currentView == viewDashboard {
|
||||
dash = tabActive.Render("[1:Dashboard]")
|
||||
} else {
|
||||
live = tabActive.Render("[2:Live Feed]")
|
||||
}
|
||||
b.WriteString(dash + " " + live + "\n\n")
|
||||
|
||||
// Content
|
||||
switch m.currentView {
|
||||
case viewDashboard:
|
||||
b.WriteString(m.viewDashboard())
|
||||
case viewLiveFeed:
|
||||
b.WriteString(m.viewLiveFeed())
|
||||
}
|
||||
|
||||
// Status bar
|
||||
b.WriteString("\n")
|
||||
wsIcon := "●"
|
||||
wsColor := redStyle
|
||||
if m.wsStatus == "connected" {
|
||||
wsColor = greenStyle
|
||||
} else if m.wsStatus == "connecting..." {
|
||||
wsColor = yellowStyle
|
||||
}
|
||||
status := fmt.Sprintf(" WS: %s %s │ View: %s │ %s │ q:quit Tab:switch",
|
||||
wsColor.Render(wsIcon), m.wsStatus,
|
||||
viewName(m.currentView),
|
||||
m.baseURL,
|
||||
)
|
||||
b.WriteString(statusStyle.Render(status))
|
||||
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func viewName(v view) string {
|
||||
if v == viewDashboard {
|
||||
return "Dashboard"
|
||||
}
|
||||
return "Live Feed"
|
||||
}
|
||||
|
||||
func (m model) viewDashboard() string {
|
||||
var b strings.Builder
|
||||
|
||||
if m.fetchErr != nil {
|
||||
b.WriteString(redStyle.Render(fmt.Sprintf("Error: %v", m.fetchErr)))
|
||||
b.WriteString("\n\n")
|
||||
}
|
||||
|
||||
refreshStr := ""
|
||||
if !m.lastRefresh.IsZero() {
|
||||
refreshStr = m.lastRefresh.Format("15:04:05")
|
||||
}
|
||||
b.WriteString(fmt.Sprintf("Observers: %d │ Last refresh: %s\n\n",
|
||||
len(m.observers), refreshStr))
|
||||
|
||||
// Header
|
||||
b.WriteString(headerStyle.Render(fmt.Sprintf("%-24s %8s %10s %8s %10s",
|
||||
"Observer", "NF(dBm)", "Battery", "Packets", "Last Seen")))
|
||||
b.WriteString("\n")
|
||||
b.WriteString(dimStyle.Render(strings.Repeat("─", 68)))
|
||||
b.WriteString("\n")
|
||||
|
||||
for _, o := range m.observers {
|
||||
name := safePrefix(o.ObserverID, 8)
|
||||
if o.ObserverName != nil && *o.ObserverName != "" {
|
||||
name = truncate(*o.ObserverName, 24)
|
||||
}
|
||||
|
||||
nf := fmtNF(o.NoiseFloor)
|
||||
batt := "—"
|
||||
if o.BatteryMv != nil {
|
||||
batt = fmt.Sprintf("%dmV", *o.BatteryMv)
|
||||
}
|
||||
lastSeen := "—"
|
||||
if o.LastSeen != "" {
|
||||
if t, err := time.Parse(time.RFC3339, o.LastSeen); err == nil {
|
||||
lastSeen = time.Since(t).Truncate(time.Second).String() + " ago"
|
||||
if time.Since(t) < time.Minute {
|
||||
lastSeen = "just now"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Color code NF
|
||||
nfStyle := greenStyle
|
||||
if o.NoiseFloor != nil {
|
||||
if *o.NoiseFloor > -85 {
|
||||
nfStyle = redStyle
|
||||
} else if *o.NoiseFloor > -100 {
|
||||
nfStyle = yellowStyle
|
||||
}
|
||||
}
|
||||
|
||||
line := fmt.Sprintf("%-24s %8s %10s %8d %10s",
|
||||
name, nfStyle.Render(nf), batt, o.PacketCount, lastSeen)
|
||||
b.WriteString(line + "\n")
|
||||
}
|
||||
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func nfVal(nf *float64) float64 {
|
||||
if nf == nil {
|
||||
return -999
|
||||
}
|
||||
return *nf
|
||||
}
|
||||
|
||||
func fmtNF(nf *float64) string {
|
||||
if nf == nil {
|
||||
return "—"
|
||||
}
|
||||
return fmt.Sprintf("%.1f", *nf)
|
||||
}
|
||||
|
||||
func (m model) viewLiveFeed() string {
|
||||
var b strings.Builder
|
||||
|
||||
b.WriteString(fmt.Sprintf("Packets: %d/%d │ WS: %s\n\n", m.ringLen, ringBufferMax, m.wsStatus))
|
||||
|
||||
b.WriteString(headerStyle.Render(fmt.Sprintf("%-10s %-10s %-20s %5s %6s %6s %s",
|
||||
"Time", "Type", "Observer", "Hops", "RSSI", "SNR", "Channel/Text")))
|
||||
b.WriteString("\n")
|
||||
b.WriteString(dimStyle.Render(strings.Repeat("─", 85)))
|
||||
b.WriteString("\n")
|
||||
|
||||
// Show last N packets that fit the screen
|
||||
maxLines := 20
|
||||
if m.height > 10 {
|
||||
maxLines = m.height - 10
|
||||
}
|
||||
// Calculate visible range from the ring buffer (most recent packets).
|
||||
visible := m.ringLen
|
||||
if visible > maxLines {
|
||||
visible = maxLines
|
||||
}
|
||||
startIdx := m.ringLen - visible // offset from oldest
|
||||
|
||||
for i := 0; i < visible; i++ {
|
||||
p := m.ringBuf[(m.ringHead+startIdx+i)%ringBufferMax]
|
||||
typeStyle := dimStyle
|
||||
switch p.Type {
|
||||
case "ADVERT":
|
||||
typeStyle = greenStyle
|
||||
case "GRP_TXT", "TXT_MSG":
|
||||
typeStyle = yellowStyle
|
||||
case "REQ":
|
||||
typeStyle = redStyle
|
||||
}
|
||||
|
||||
line := fmt.Sprintf("%-10s %s %-20s %5s %6s %6s %s",
|
||||
dimStyle.Render(p.Timestamp),
|
||||
typeStyle.Render(fmt.Sprintf("%-10s", p.Type)),
|
||||
truncate(p.ObserverName, 20),
|
||||
p.Hops, p.RSSI, p.SNR,
|
||||
dimStyle.Render(p.ChannelText),
|
||||
)
|
||||
b.WriteString(line + "\n")
|
||||
}
|
||||
|
||||
return b.String()
|
||||
}
|
||||
|
||||
// --- Main ---
|
||||
|
||||
func main() {
|
||||
urlFlag := flag.String("url", "http://localhost:3000", "CoreScope server URL")
|
||||
flag.Parse()
|
||||
|
||||
m := initialModel(*urlFlag)
|
||||
p := tea.NewProgram(m, tea.WithAltScreen())
|
||||
if _, err := p.Run(); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,403 @@
|
||||
# Security Analysis: MeshCore Channel Encryption
|
||||
|
||||
## Scope
|
||||
|
||||
This analysis covers MeshCore's encryption vulnerabilities in order of practical severity. Section 1 addresses PSK brute-force (the highest-priority practical threat). Sections 2–9 cover AES-128-ECB structural weaknesses. Section 8 covers TXT_MSG. All claims are derived from firmware source (`BaseChatMesh.cpp`, `Utils.cpp`, `Mesh.cpp`, `MeshCore.h`) unless explicitly marked as conjecture.
|
||||
|
||||
## 1. PSK Brute-Force with Timestamp Oracle
|
||||
|
||||
### 1.1 The No-KDF Design
|
||||
|
||||
MeshCore channel PSKs are base64-decoded directly into AES-128 keys with no key derivation function (from `BaseChatMesh::addChannel()`):
|
||||
|
||||
```cpp
|
||||
int len = decode_base64((unsigned char *) psk_base64, strlen(psk_base64), dest->channel.secret);
|
||||
```
|
||||
|
||||
No PBKDF2, scrypt, argon2, or HKDF is applied. The base64-decoded bytes ARE the AES key. This means:
|
||||
|
||||
1. **Human-memorable passphrases have drastically reduced entropy.** If a user types "SecretChannel" as their PSK, the base64-decoded output is ~10 bytes of ASCII-range values. The key space is determined by the passphrase complexity, not by AES-128's theoretical 2^128 key space.
|
||||
|
||||
2. **Short passphrases produce short keys.** `decode_base64` maps every 4 base64 characters to 3 bytes. A passphrase shorter than ~22 base64 characters produces fewer than 16 bytes, and the remainder of the 16-byte key buffer depends on whatever was previously in memory (likely zeros from initialization). An 8-character passphrase decodes to only 6 bytes — the effective key space may be as low as 2^48.
|
||||
|
||||
3. **No salt.** Identical passphrases on different meshes produce identical keys. A single precomputed dictionary attack works globally against all MeshCore deployments.
|
||||
|
||||
### 1.2 Timestamp as Known-Plaintext Oracle
|
||||
|
||||
Every GRP_TXT plaintext begins with a structured, largely predictable header:
|
||||
|
||||
```
|
||||
Block 0: [TS₀][TS₁][TS₂][TS₃][0x00][sender_name][: ][message_start...]
|
||||
```
|
||||
|
||||
An attacker who captures a single packet can verify a candidate PSK by:
|
||||
1. Decrypting block 0 with the candidate key
|
||||
2. Checking if bytes 0–3 produce a plausible Unix timestamp (within a reasonable window of the capture time)
|
||||
3. Checking if byte 4 is 0x00 (TXT_TYPE_PLAIN)
|
||||
4. Optionally checking if bytes 5+ are valid ASCII (sender name)
|
||||
|
||||
The timestamp alone constrains the search: a ±1-hour window around capture time yields ~7,200 valid timestamps out of 2^32 possibilities — a false-positive rate of ~1.7×10^-6. Combined with the type byte and ASCII sender-name check, false positives are effectively zero. **One captured packet is sufficient for definitive key verification.**
|
||||
|
||||
### 1.3 Attack Cost Estimates
|
||||
|
||||
Hardware assumption: commodity GPU (e.g., RTX 4090) performing ~10 billion AES-128-ECB block encryptions per second. This is conservative — optimized implementations achieve higher throughput.
|
||||
|
||||
| Passphrase style | Search space | Time at 10^10 AES/sec |
|
||||
|---|---|---|
|
||||
| Single common English word (10K-word list) | ~10^4 | microseconds |
|
||||
| Single English word (170K full dictionary) | ~1.7×10^5 | microseconds |
|
||||
| Two concatenated common words | ~10^8 | ~10 milliseconds |
|
||||
| Three concatenated common words | ~10^12 | ~100 seconds (~2 min) |
|
||||
| Four random common words (Diceware-style) | ~10^16 | ~10^6 seconds (~12 days) |
|
||||
| Random 8-char alphanumeric (62^8) | ~2.2×10^14 | ~22,000 seconds (~6 hours) |
|
||||
| Random 12-char alphanumeric (62^12) | ~3.2×10^21 | ~10^11 seconds (infeasible) |
|
||||
| Full random 16-byte key (2^128) | ~3.4×10^38 | infeasible |
|
||||
|
||||
**Important caveats on search space:**
|
||||
- Dictionary sizes vary: "common English words" ≈ 3K–10K; full dictionary ≈ 170K. Estimates above use 10K for "common" lists.
|
||||
- Humans do not choose words uniformly. Zipf's law applies — a small fraction of words account for most selections. The effective entropy is **lower** than the uniform assumption, making attacks faster.
|
||||
- Concatenation without separators creates ambiguity ("therapist" = "therapist" or "the"+"rapist"), but this marginally reduces search space rather than increasing it.
|
||||
- Multi-channel amortization: an attacker can test each candidate against ALL captured channels simultaneously, paying the AES cost once per candidate.
|
||||
|
||||
### 1.4 Attack Properties
|
||||
|
||||
- **Offline attack.** No rate limiting, no lockout, no detection. The attacker works entirely on captured ciphertext.
|
||||
- **Single-packet verification.** One GRP_TXT packet is sufficient. No need to collect multiple messages.
|
||||
- **No KDF stretching.** Each candidate requires exactly one AES-128 block decryption (16 bytes), not thousands of hash iterations.
|
||||
- **Global applicability.** No salt means precomputed tables work across all MeshCore deployments using the same passphrase.
|
||||
- **Side-channel exposure.** Since the PSK IS the key (no KDF), any AES key-schedule side-channel directly reveals the passphrase. PSK reuse across systems (e.g., same passphrase for MeshCore and WiFi) means compromise of one compromises both.
|
||||
|
||||
### 1.5 Severity Assessment
|
||||
|
||||
**PSK brute-force is the #1 practical threat to MeshCore channel confidentiality.** Unlike ECB frequency analysis (§5), which requires hundreds of captured messages with repeated content, PSK brute-force requires a single captured packet and succeeds whenever users choose human-memorable passphrases — which is the common case for manually-configured channels.
|
||||
|
||||
Any channel using a passphrase of 3 or fewer common words, or any alphanumeric string shorter than 12 characters, should be considered **vulnerable to offline brute-force within hours to days** using commodity hardware.
|
||||
|
||||
### 1.6 Recommended Mitigations
|
||||
|
||||
**Priority 0 (Critical):** Apply a memory-hard KDF (argon2id preferred; scrypt or PBKDF2 with ≥100K iterations as fallback) to derive the AES key from the passphrase. This transforms each candidate test from ~1 nanosecond to ~100 milliseconds, increasing attack cost by a factor of ~10^8.
|
||||
|
||||
**Priority 0a:** Add a per-channel salt (random bytes stored alongside the channel config) to prevent precomputed/global attacks.
|
||||
|
||||
**Priority 0b:** Document that channel PSKs should be random 16-byte keys (e.g., generated with `openssl rand -base64 22`), not human-memorable passphrases. This is a stopgap until KDF support is added.
|
||||
|
||||
## 2. How Encryption Works
|
||||
|
||||
### Constants (from `MeshCore.h`)
|
||||
- `CIPHER_KEY_SIZE = 16` (AES-128)
|
||||
- `PUB_KEY_SIZE = 32`
|
||||
- `CIPHER_MAC_SIZE` = HMAC-SHA256 truncated output size
|
||||
|
||||
### encrypt() (from `Utils.cpp`)
|
||||
AES-128-ECB, block-by-block. No IV, no counter, no chaining:
|
||||
```cpp
|
||||
aes.setKey(shared_secret, CIPHER_KEY_SIZE); // first 16 bytes of shared_secret
|
||||
while (src_len >= 16) {
|
||||
aes.encryptBlock(dp, src); // each 16-byte block independently
|
||||
dp += 16; src += 16; src_len -= 16;
|
||||
}
|
||||
if (src_len > 0) { // partial final block
|
||||
uint8_t tmp[16];
|
||||
memset(tmp, 0, 16); // zero-fill
|
||||
memcpy(tmp, src, src_len); // copy remaining bytes
|
||||
aes.encryptBlock(dp, tmp);
|
||||
}
|
||||
```
|
||||
|
||||
### encryptThenMAC() (from `Utils.cpp`)
|
||||
```cpp
|
||||
int enc_len = encrypt(shared_secret, dest + CIPHER_MAC_SIZE, src, src_len);
|
||||
SHA256 sha;
|
||||
sha.resetHMAC(shared_secret, PUB_KEY_SIZE); // HMAC uses full 32 bytes
|
||||
sha.update(dest + CIPHER_MAC_SIZE, enc_len);
|
||||
sha.finalizeHMAC(shared_secret, PUB_KEY_SIZE, dest, CIPHER_MAC_SIZE);
|
||||
```
|
||||
|
||||
**Key reuse flaw:** The same `shared_secret` buffer serves both AES and HMAC. AES uses `shared_secret[0..15]` (first 16 bytes). HMAC uses `shared_secret[0..31]` (full 32 bytes). The AES key is a prefix of the HMAC key. See §7 for implications.
|
||||
|
||||
### GRP_TXT Plaintext Construction (from `BaseChatMesh::sendGroupMessage()`)
|
||||
|
||||
```cpp
|
||||
memcpy(temp, ×tamp, 4); // bytes 0-3: Unix timestamp (seconds)
|
||||
temp[4] = 0; // byte 4: TXT_TYPE_PLAIN
|
||||
sprintf((char *)&temp[5], "%s: ", sender_name); // bytes 5+: "SenderName: "
|
||||
char *ep = strchr((char *)&temp[5], 0);
|
||||
int prefix_len = ep - (char *)&temp[5]; // length of "SenderName: "
|
||||
memcpy(ep, text, text_len); // message text (no null terminator)
|
||||
ep[text_len] = 0; // null written AFTER data boundary
|
||||
// data_len passed to encrypt = 5 + prefix_len + text_len
|
||||
```
|
||||
|
||||
**The null terminator is NOT part of the encrypted data length.** The call to `createGroupDatagram` passes length `5 + prefix_len + text_len`. The null at `ep[text_len]` is written to the buffer but is beyond `data_len`. In the final partial block, `encrypt()` zero-fills with `memset(tmp, 0, 16)` before copying the remaining bytes — so a zero byte appears at the position where the null would be, but this is an artifact of zero-padding, not an explicit null in the plaintext.
|
||||
|
||||
On the receiving side, this is confirmed:
|
||||
```cpp
|
||||
data[len] = 0; // need to make a C string again, with null terminator
|
||||
```
|
||||
The receiver must re-add the null after decryption.
|
||||
|
||||
## 3. Block Layout Analysis
|
||||
|
||||
### Notation
|
||||
|
||||
Let `N` = length of sender name. Then:
|
||||
- `prefix_len` = N + 2 (for ": " suffix from `sprintf("%s: ", sender_name)`)
|
||||
- Header overhead = 4 (timestamp) + 1 (type) + prefix_len = N + 7 bytes
|
||||
- Message text begins at byte offset N + 7
|
||||
|
||||
### Block 0
|
||||
|
||||
Block 0 = bytes 0–15 of plaintext:
|
||||
```
|
||||
[TS₀][TS₁][TS₂][TS₃][0x00][sender_name: ][...message start...]
|
||||
```
|
||||
|
||||
The first 9 − N bytes of message text fit in block 0 (when N < 9). For N ≥ 9, no message text fits in block 0.
|
||||
|
||||
### Boundary Condition: Sender Name ≥ 12 Characters
|
||||
|
||||
When N ≥ 12, the header overhead (N + 7 ≥ 19) exceeds 16 bytes. The header itself spills into block 1:
|
||||
|
||||
**Example: sender name "LongUserName1" (N = 13), message "hi":**
|
||||
```
|
||||
Header = 13 + 7 = 20 bytes. Total plaintext = 20 + 2 = 22 bytes.
|
||||
|
||||
Block 0 (bytes 0-15): [TS₀][TS₁][TS₂][TS₃][0x00][L][o][n][g][U][s][e][r][N][a][m]
|
||||
Block 1 (bytes 16-31): [e][1][:][space][h][i][0x00 ×10] ← zero-padded partial block
|
||||
```
|
||||
|
||||
Block 1 here contains the tail of the sender name, the ": " separator, message text, AND zero-padding. For sender names of length 12–15, block 1 is a mix of header and message — **it is NOT "pure message text."**
|
||||
|
||||
For sender names ≥ 16, blocks 0 and 1 are both pure header, and message text doesn't begin until block 1 or later.
|
||||
|
||||
### General Block Content Table
|
||||
|
||||
| Sender name length N | Header bytes | Message starts at byte | Block 0 content | Block 1+ content |
|
||||
|---|---|---|---|---|
|
||||
| 1–8 | 8–15 | 8–15 | timestamp + header + message start | message text + zero-pad |
|
||||
| 9–11 | 16–18 | 16–18 | timestamp + header (no message) | header tail + message + zero-pad |
|
||||
| 12–15 | 19–22 | 19–22 | timestamp + partial header | header tail + message + zero-pad |
|
||||
| ≥16 | ≥23 | ≥23 | timestamp + partial header | header continuation, then message |
|
||||
|
||||
### Typical Case (N = 5, e.g. "Alice")
|
||||
|
||||
Header = 12 bytes. Message starts at byte 12. Block 0 holds 4 bytes of message text.
|
||||
|
||||
```
|
||||
Message "hello world" (11 chars). Total plaintext = 12 + 11 = 23 bytes.
|
||||
|
||||
Block 0 (bytes 0-15): [TS₀][TS₁][TS₂][TS₃][0x00][A][l][i][c][e][:][space][h][e][l][l]
|
||||
Block 1 (bytes 16-22): [o][space][w][o][r][l][d] → padded to: [o][space][w][o][r][l][d][0×9]
|
||||
```
|
||||
|
||||
Block 1 contains 7 bytes of message text and 9 bytes of zero-padding.
|
||||
|
||||
## 4. Attack Surface by Block Position
|
||||
|
||||
### Block 0: Accidental Nonce from Timestamp
|
||||
|
||||
The 4-byte Unix timestamp in bytes 0–3 acts as an **accidental nonce** — it was included "mostly as an extra blob to help make packet_hash unique" (per firmware comment), not as a cryptographic countermeasure against ECB determinism. Nevertheless, it has the effect of making block 0's plaintext vary per message.
|
||||
|
||||
**Precision on uniqueness:** Block 0 is unique per (sender, timestamp-second) pair, not per message. Two messages from the same sender within the same second, on the same channel, with the same type byte, produce identical block 0 plaintext and therefore identical block 0 ciphertext. At typical mesh chat rates, same-second collisions are rare but not impossible for automated/scripted senders.
|
||||
|
||||
**Known-plaintext observation:** Bytes 4–15 of block 0 are largely predictable per sender (type byte is always 0x00 for plain text; sender name and ": " are static). The timestamp is predictable within a window (Unix seconds). An attacker who knows the sender name and approximate time can compute all 16 plaintext bytes of block 0. However, **AES-128 is resistant to known-plaintext attacks** — knowing plaintext-ciphertext pairs for block 0 does not help recover the key or decrypt other blocks.
|
||||
|
||||
### Blocks 1+: Deterministic ECB (for short sender names)
|
||||
|
||||
When the sender name is short enough that the header fits in block 0 (N ≤ 8), blocks 1+ contain **only message text and zero-padding.** No timestamp, no nonce, no per-message varying data. Identical message text at the same block offset produces identical ciphertext, always.
|
||||
|
||||
When N ≥ 9, block 1 contains header spillover, which includes static sender name bytes — these vary per sender but not per message, so block 1 is still deterministic for a given sender once the header portion is fixed.
|
||||
|
||||
**The fundamental ECB property:** For any block beyond the timestamp's reach, `E_K(P) = E_K(P)`. Same plaintext block → same ciphertext block, regardless of when or how many times it's sent.
|
||||
|
||||
### Partial Final Block: Strongest Attack Target
|
||||
|
||||
The final block of every message is zero-padded by `encrypt()` to 16 bytes. The padding bytes are deterministic and known (always 0x00). For a message whose final block contains `B` bytes of actual content:
|
||||
|
||||
- `B` bytes are unknown message text
|
||||
- `16 - B` bytes are known zeros
|
||||
|
||||
When B is small (short final fragment), most of the block is known plaintext. For B = 1, the attacker knows 15 of 16 bytes — only 256 possible plaintext blocks exist. This means:
|
||||
|
||||
- **The final block has at most 2^(8B) possible plaintexts** (versus 2^128 for a full unknown block)
|
||||
- For B ≤ 4, there are ≤ 2^32 possibilities — a small enough space for dictionary attacks given enough ciphertext samples
|
||||
- The attacker can precompute all possible final-block plaintexts for small B values and match against observed ciphertext blocks
|
||||
|
||||
This makes the partial final block a **stronger frequency analysis target** than interior blocks, where all 16 bytes may be unknown text.
|
||||
|
||||
## 5. Feasible Attack Scenarios
|
||||
|
||||
### 4.1 Block Frequency Analysis on Blocks 1+
|
||||
|
||||
**Preconditions (all must hold):**
|
||||
1. Attacker can observe encrypted GRP_TXT packets (passive radio capture)
|
||||
2. Messages from the same sender (or senders with identical name lengths — same block alignment)
|
||||
3. Messages long enough to produce blocks beyond block 0 (text > 9 − N chars)
|
||||
4. Sufficient message volume with repeated content at the same block positions
|
||||
|
||||
**Method:**
|
||||
1. Collect GRP_TXT packets, group by sender hash
|
||||
2. Decompose encrypted payloads into 16-byte blocks (after stripping HMAC prefix)
|
||||
3. Discard block 0 (timestamp-varying)
|
||||
4. Build frequency tables for blocks 1, 2, 3, etc., per sender
|
||||
5. Match high-frequency ciphertext blocks against expected plaintext distributions
|
||||
|
||||
**Practical constraints limiting this attack:**
|
||||
- LoRa bandwidth severely limits message length. Most mesh chat messages are short — many fit entirely within block 0 (≤ 9 − N chars of text), yielding zero analyzable blocks.
|
||||
- Messages that spill into block 1+ tend to be longer and more varied — fewer repeated patterns.
|
||||
- The attack requires repeated identical 16-byte-aligned text fragments from the same sender over time.
|
||||
|
||||
**Conditions under which this attack succeeds:** Automated or scripted senders transmitting repetitive messages longer than block 0 capacity, on a channel with a static PSK, over an extended collection period. For human-typed conversational messages with typical length and variety, the number of repeated block 1+ patterns is likely too low for meaningful frequency analysis. (This is an empirical claim that depends on actual traffic patterns — no formal bound is established here.)
|
||||
|
||||
### 4.2 Partial Final Block Dictionary Attack
|
||||
|
||||
**Preconditions:**
|
||||
1. Attacker knows (or can estimate) the message length modulo 16
|
||||
2. Final block has few content bytes (B ≤ 4)
|
||||
|
||||
**Method:** Enumerate all 2^(8B) candidate plaintexts for the final block. Since AES-ECB is deterministic with a fixed key, the attacker can build a lookup table: if they ever observe a ciphertext block matching one of the candidates in a known-plaintext scenario (e.g., from a leaked or guessed message), they can identify which final-block value corresponds to which ciphertext.
|
||||
|
||||
**Limitation:** Without the key, the attacker cannot compute E_K(candidate) directly. The attack requires collecting enough ciphertext final blocks to perform frequency analysis within the reduced plaintext space. With only 256 possibilities (B=1), convergence is fast given sufficient samples.
|
||||
|
||||
### 4.3 Cross-Sender Correlation
|
||||
|
||||
Senders with identical name lengths produce identical block alignments. Messages from "Alice" (N=5) and "Bobby" (N=5) place message text at the same byte offsets. If both send the same message, their blocks 1+ are identical ciphertext — **but only if they share the same channel PSK** (same AES key). On the same channel, this enables cross-sender frequency analysis within same-name-length groups.
|
||||
|
||||
### 4.4 Message Length Leakage
|
||||
|
||||
Ciphertext length = ⌈(5 + prefix_len + text_len) / 16⌉ × 16 bytes. This reveals the message text length within a 16-byte window (not 15, because the block count is the observable quantity). Not ECB-specific — any block cipher without constant-length padding leaks this.
|
||||
|
||||
### 4.5 Replay Attacks
|
||||
|
||||
`encryptThenMAC()` authenticates the ciphertext, but if the mesh doesn't track previously-seen packet MACs, captured packets can be replayed. The embedded timestamp may be checked for staleness — this requires firmware verification beyond the scope of this analysis.
|
||||
|
||||
### 4.6 No Forward Secrecy
|
||||
|
||||
Channel PSKs are static and shared among all participants. ECDH shared secrets for direct messages are also static (no ephemeral key exchange). Compromise of any key decrypts all past and future traffic encrypted under that key.
|
||||
|
||||
## 6. What Known-Plaintext Does NOT Achieve
|
||||
|
||||
AES-128 is designed to resist known-plaintext attacks. An attacker who knows the full plaintext and ciphertext of block 0 (or any block) **cannot**:
|
||||
- Recover the AES key
|
||||
- Decrypt other blocks encrypted under the same key
|
||||
- Derive any information about other plaintexts from their ciphertexts
|
||||
|
||||
The ECB weakness is **determinism** (identical plaintext → identical ciphertext), not key recovery. The attacks in §5 exploit pattern matching and frequency analysis, not cryptanalysis of AES itself.
|
||||
|
||||
## 7. HMAC Key Reuse: Cryptographic Design Flaw
|
||||
|
||||
From `encryptThenMAC()`:
|
||||
- AES key: `shared_secret[0..15]` (CIPHER_KEY_SIZE = 16)
|
||||
- HMAC key: `shared_secret[0..31]` (PUB_KEY_SIZE = 32)
|
||||
|
||||
The AES key is the first half of the HMAC key. Both are derived from the same `shared_secret` — for channels, this is the PSK; for direct messages, the ECDH shared secret.
|
||||
|
||||
**Why this matters:**
|
||||
1. **Violated key separation principle.** Standard practice dictates that encryption and authentication keys must be independent. Using overlapping portions of the same secret means a weakness in one mechanism could leak information relevant to the other.
|
||||
2. **HMAC key reveals AES key.** If an attacker recovers the 32-byte HMAC key (e.g., through a side-channel attack on the HMAC computation), they automatically obtain the 16-byte AES key as a prefix.
|
||||
3. **No key derivation function.** The shared_secret is used directly — no HKDF or similar KDF is applied to derive independent subkeys. This is a departure from cryptographic best practice (cf. RFC 5869).
|
||||
|
||||
**Practical impact:** In the current threat model (passive radio capture of LoRa packets), this is unlikely to be directly exploitable — HMAC-SHA256 does not leak its key through normal operation. However, it represents a structural weakness that compounds with any future vulnerability in either the AES or HMAC implementation.
|
||||
|
||||
## 8. TXT_MSG (Direct Message) Block Layout
|
||||
|
||||
Direct messages use a different plaintext structure (from `BaseChatMesh::composeMsgPacket()`):
|
||||
|
||||
```cpp
|
||||
memcpy(temp, ×tamp, 4); // bytes 0-3: timestamp
|
||||
temp[4] = (attempt & 3); // byte 4: attempt counter (0-3)
|
||||
memcpy(&temp[5], text, text_len + 1); // bytes 5+: message text
|
||||
// data_len = 5 + text_len (null terminator copied but not counted in length)
|
||||
```
|
||||
|
||||
**Block layout for TXT_MSG:**
|
||||
```
|
||||
Block 0: [TS₀][TS₁][TS₂][TS₃][attempt][text bytes 0-10]
|
||||
Block 1: [text bytes 11-26] (if message long enough)
|
||||
```
|
||||
|
||||
Key differences from GRP_TXT:
|
||||
- **No sender name in plaintext** — the sender is identified by the source hash in the unencrypted packet header, not in the encrypted payload.
|
||||
- **Header is exactly 5 bytes** (4 timestamp + 1 attempt), always. No variable-length field.
|
||||
- **11 bytes of message text fit in block 0** (vs. 9 − N for GRP_TXT).
|
||||
- **Encrypted with per-pair ECDH shared secret**, not a group PSK. Each sender-recipient pair has a unique key.
|
||||
|
||||
**ECB implications for TXT_MSG:**
|
||||
- Block 0 is still protected by the timestamp accidental nonce.
|
||||
- Blocks 1+ are deterministic, same as GRP_TXT — identical message text at the same offset produces identical ciphertext.
|
||||
- However, frequency analysis is harder: each sender-recipient pair uses a different key, so the attacker can only correlate messages within a single pair. The message volume for any given pair is typically much lower than for a group channel.
|
||||
- The fixed 5-byte header means block alignment is consistent across ALL direct messages (unlike GRP_TXT where alignment varies by sender name length). An attacker who compromises one ECDH key can build block frequency tables, but only for that specific pair.
|
||||
|
||||
## 9. Mitigations
|
||||
|
||||
### Priority 1: Switch to AES-128-CTR
|
||||
|
||||
Replace ECB with CTR mode. Use the existing 4-byte timestamp + a 4-byte per-message counter as the 8-byte nonce (padded to 16 bytes for the CTR block). Each byte of plaintext gets XORed with a unique keystream byte — eliminates all block-level determinism.
|
||||
|
||||
**Wire format change:** None if the nonce is derived from header fields already present. If an explicit counter is added, 4 bytes of overhead per message.
|
||||
|
||||
### Priority 2: Derive Independent Subkeys
|
||||
|
||||
Apply HKDF (or at minimum, two distinct SHA-256 hashes) to the shared_secret to produce independent AES and HMAC keys. This is a minimal code change:
|
||||
```
|
||||
aes_key = SHA256(shared_secret || "encrypt")[0..15]
|
||||
hmac_key = SHA256(shared_secret || "authenticate")
|
||||
```
|
||||
|
||||
### Priority 3: Constant-Length Padding
|
||||
|
||||
Pad all messages to a fixed block count (e.g., 4 blocks = 64 bytes) to eliminate length leakage. Expensive on LoRa — should be configurable per channel as a security-vs-bandwidth tradeoff.
|
||||
|
||||
### Priority 4: Replay Protection
|
||||
|
||||
Track seen packet HMACs within a time window. Reject messages with timestamps older than N minutes.
|
||||
|
||||
### Priority 5: Channel Key Rotation
|
||||
|
||||
Manual or automated periodic rotation of channel PSKs. Even monthly rotation limits the exposure window.
|
||||
|
||||
### Priority 6: Forward Secrecy
|
||||
|
||||
Ephemeral ECDH for direct messages. Significant protocol change but prevents retroactive decryption on key compromise.
|
||||
|
||||
## 10. Speculative: LLM-Assisted Analysis
|
||||
|
||||
> **This section is speculation, not formal analysis.** The claims below are plausible but unvalidated. They do not affect the formal findings in §1–9.
|
||||
|
||||
An LLM could reduce the sample size needed for block frequency analysis:
|
||||
|
||||
1. **Context-aware candidate generation:** Given a sender's known patterns (the sender name is recoverable from block 0's predictable prefix), an LLM could generate likely message continuations and predict which plaintext blocks to look for in the frequency tables.
|
||||
2. **Conversational inference:** Timestamps + sender IDs + partially decoded messages could let an LLM reconstruct probable conversation flow, narrowing the search space for unknown blocks.
|
||||
3. **Community-specific vocabulary:** Training on public mesh chat logs could yield common phrases and greeting patterns, further reducing the candidate plaintext space.
|
||||
|
||||
This does not change the fundamental requirement (blocks 1+ must repeat, or the final block must be in a small enough space for dictionary matching). It potentially reduces the number of captured messages needed for convergence, but no quantitative bound is established.
|
||||
|
||||
## 11. Conclusion
|
||||
|
||||
MeshCore's encryption has four vulnerabilities, ranked by practical exploitability:
|
||||
|
||||
### Vulnerability #1: PSK Brute-Force (Critical)
|
||||
|
||||
**No KDF + known-plaintext oracle = offline key recovery from a single packet.** Any channel using a human-memorable passphrase of ≤3 common words or ≤11 alphanumeric characters is recoverable in minutes to hours on commodity GPU hardware. This is the highest-priority threat because it requires minimal attacker capability (one captured packet), succeeds against the most common deployment pattern (human-chosen passphrases), and completely compromises channel confidentiality. See §1.
|
||||
|
||||
### Vulnerability #2: ECB Determinism (Medium)
|
||||
|
||||
**Blocks beyond the timestamp's reach are deterministic.** Identical plaintext at the same block offset always produces identical ciphertext. For GRP_TXT messages longer than ~9 − N characters (where N is sender name length), this enables frequency analysis on blocks 1+. The partial final block, with its known zero-padding, is the strongest individual target. Exploitation requires hundreds of captured messages with repeated content — a higher bar than PSK brute-force. See §4–§5.
|
||||
|
||||
### Vulnerability #3: Key Material Reuse (Medium)
|
||||
|
||||
**AES and HMAC share the same key material** without a key derivation function. The AES key is a prefix of the HMAC key. This violates key separation and creates a structural dependency between the encryption and authentication mechanisms. See §7.
|
||||
|
||||
### Vulnerability #4: No Forward Secrecy (Low–Medium)
|
||||
|
||||
**No forward secrecy, no key rotation, no replay protection.** These are independent of the above but compound the risk: a single key compromise (whether via brute-force or other means) exposes all past and future traffic encrypted under that key. See §9.
|
||||
|
||||
**Summary of recommended mitigations (in priority order):**
|
||||
1. **(Critical)** Apply a memory-hard KDF (argon2id) to channel PSKs — §1.6
|
||||
2. **(Critical)** Add per-channel salt — §1.6
|
||||
3. **(High)** Switch from AES-128-ECB to AES-128-CTR — §9
|
||||
4. **(High)** Derive independent AES and HMAC subkeys via HKDF — §9
|
||||
5. **(Medium)** Constant-length padding, replay protection, key rotation — §9
|
||||
6. **(Low)** Forward secrecy via ephemeral ECDH — §9
|
||||
|
||||
The timestamp in block 0 was not designed as a nonce and should not be relied upon as one.
|
||||
@@ -0,0 +1,132 @@
|
||||
# Proposal: Terminal/TUI Interface for CoreScope
|
||||
|
||||
**Status:** Approved for MVP
|
||||
**Issue:** TBD
|
||||
|
||||
## Problem
|
||||
|
||||
CoreScope's web UI requires a browser. Operators managing remote mesh deployments often work over SSH — headless servers, Raspberry Pis, field laptops with spotty connectivity. They need to check mesh health, view packet flow, and diagnose issues without opening a browser.
|
||||
|
||||
## Vision
|
||||
|
||||
A terminal-based user interface (TUI) that connects to a CoreScope instance's API and renders key views directly in the terminal. Think `htop` for mesh networks.
|
||||
|
||||
---
|
||||
|
||||
## Expert Review
|
||||
|
||||
### Carmack (Performance / Data Flow)
|
||||
|
||||
- **bubbletea is fine for this.** The TUI is a thin API consumer — it's not processing 7.3M observations locally. The server does the heavy lifting; the TUI just renders summary data from `/api/observers/metrics/summary` (dozens of rows, not millions). No performance concern here.
|
||||
- **WebSocket in a TUI — one gotcha:** reconnection. SSH sessions drop, networks flake. The TUI MUST have automatic reconnect with exponential backoff. Don't let a dropped WS kill the whole UI — show a "reconnecting..." status and keep the last-known state visible.
|
||||
- **Memory footprint:** Should be trivial. The TUI holds at most a few hundred packets in a ring buffer for the live feed + summary stats. Target <20MB RSS. bubbletea itself is lightweight. The danger is unbounded packet accumulation — use a fixed-size ring buffer (e.g., last 1000 packets) for the live feed, not an ever-growing slice.
|
||||
- **Batch WS messages.** Don't re-render on every single packet. Coalesce WS messages and re-render at most 10fps (every 100ms). Terminal rendering is slow — flooding it with updates causes flicker and CPU burn.
|
||||
|
||||
### Torvalds (Simplicity / Scope)
|
||||
|
||||
- **The scope is too big for an MVP.** Node detail view, sparklines, SSH server mode, multi-instance, export — delete all of that from M1. You need TWO views to prove this works: fleet dashboard table and live packet feed. That's it.
|
||||
- **bubbletea vs tview:** bubbletea. Not because Elm-architecture is "clean" — because it's what the Go community actually uses now, the examples are good, and lipgloss makes table rendering trivial. Don't overthink this.
|
||||
- **Over-engineering risk is HIGH.** The proposal describes 4 views, stretch features, and SSH server mode before a single line of code exists. Build the two-view demo. Ship it. Then decide what's next based on whether anyone actually uses it.
|
||||
- **Same repo, `cmd/tui/`.** Don't create a separate repo for what's going to be 500 lines of Go initially. It shares the same API types. Keep it together.
|
||||
- **Kill the "Open Questions" section.** Answer them: Target user = anyone with SSH access. M1 = dashboard + live feed. Same repo. Name = `corescope-tui`. Done. Stop discussing, start building.
|
||||
|
||||
### Doshi (Strategy / Prioritization)
|
||||
|
||||
- **This is an N (Neutral) feature, not an L.** It doesn't change CoreScope's trajectory — the web UI already works. But it's a solid N: it unlocks a real use case (SSH-only operators) and proves CoreScope's API is a proper platform, not just a web app backend.
|
||||
- **The MVP that proves the concept:** Can an operator SSH into a Pi, run `corescope-tui --url http://analyzer:3000`, and immediately see fleet health + live packets? If yes, the concept is proven. Everything else (node detail, sparklines, alerting) is M2+.
|
||||
- **Defer list:** Node detail view, RF sparklines, SSH server mode, multi-instance, export, mouse support, true-color fallback, alerting. ALL of these are M2 or later.
|
||||
- **Pre-mortem — why would this fail?**
|
||||
1. Nobody uses it because the web UI is good enough (likely for most users — that's fine, this is for the SSH-only niche)
|
||||
2. The API doesn't return what the TUI needs in the right shape (validate this FIRST — curl the endpoints before writing any TUI code)
|
||||
3. Scope creep kills the demo — someone adds "just one more view" and it's never done
|
||||
- **Opportunity cost:** Low. This is a day of work for the MVP. The API already exists. The risk is spending a week on polish nobody asked for.
|
||||
|
||||
---
|
||||
|
||||
## MVP Definition (Demo Target)
|
||||
|
||||
**Goal:** A working two-view TUI that connects to any CoreScope instance and displays real-time mesh data in a terminal. Buildable in one focused session.
|
||||
|
||||
### View 1: Fleet Dashboard (default)
|
||||
```
|
||||
┌─ CoreScope TUI ──────────────────────────────────────────┐
|
||||
│ Connected: analyzer.00id.net | Observers: 35 | ● Live │
|
||||
├──────────────────────────────────────────────────────────┤
|
||||
│ Observer │ Nodes │ Pkts/hr │ NF │ Status │
|
||||
│ GY889 Repeater │ 142 │ 312 │ -112 │ ● active │
|
||||
│ C0ffee SF │ 89 │ 201 │ -108 │ ● active │
|
||||
│ ELC-ONNIE-RPT-1 │ 67 │ 156 │ -95 │ ▲ warning │
|
||||
│ Bar Repeater │ 12 │ 3 │ -76 │ ▼ stale │
|
||||
└──────────────────────────────────────────────────────────┘
|
||||
Tab: [Dashboard] [Live Feed] q: quit ?: help
|
||||
```
|
||||
|
||||
- **Data source:** `GET /api/observers/metrics/summary`
|
||||
- **Refresh:** Poll every 5s (simple, no WS needed for this view)
|
||||
- **Sort:** By observer name initially. Stretch: column sort with arrow keys.
|
||||
|
||||
### View 2: Live Packet Feed
|
||||
```
|
||||
┌─ Live Feed ──────────────────────────────────────────────┐
|
||||
│ 14:32:01 ADVERT GY889 Repeater → 3 hops -112dB │
|
||||
│ 14:32:02 GRP_TXT #test "hello world" → 5 hops -98dB │
|
||||
│ 14:32:03 TXT_MSG [encrypted] → 2 hops -105dB │
|
||||
│ 14:32:04 CHAN #sf "anyone on?" → 8 hops -91dB │
|
||||
│ ░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░ │
|
||||
└──────────────────────────────────────────────────────────┘
|
||||
Tab: [Dashboard] [Live Feed] p: pause q: quit
|
||||
```
|
||||
|
||||
- **Data source:** WebSocket (`/ws`)
|
||||
- **Buffer:** Ring buffer, last 500 packets max
|
||||
- **Render:** Coalesce updates, re-render at most 10fps
|
||||
- **Reconnect:** Auto-reconnect with exponential backoff (1s, 2s, 4s, max 30s)
|
||||
|
||||
### What's NOT in MVP
|
||||
- Node detail view
|
||||
- RF sparklines
|
||||
- SSH server mode (`--serve-ssh`)
|
||||
- Multi-instance support
|
||||
- Export to CSV/JSON
|
||||
- Mouse support
|
||||
- Alerting / terminal bell
|
||||
- Color theme configuration
|
||||
- Custom filters (/ to filter)
|
||||
|
||||
### Technical Decisions (Resolved)
|
||||
| Question | Answer |
|
||||
|---|---|
|
||||
| Target user | SSH operators, power users, field techs |
|
||||
| Library | bubbletea + lipgloss |
|
||||
| Location | `cmd/tui/` in same repo |
|
||||
| Binary name | `corescope-tui` |
|
||||
| Min terminal | 256-color, 80x24 |
|
||||
| State | Stateless — pure API consumer, no local DB |
|
||||
|
||||
### Implementation Plan
|
||||
1. Scaffold `cmd/tui/main.go` — flag parsing (`--url`), bubbletea app init
|
||||
2. Fleet dashboard model — fetch `/api/observers/metrics/summary`, render table
|
||||
3. Live feed model — WebSocket connect, ring buffer, packet rendering
|
||||
4. Tab switching between views
|
||||
5. Status bar (connection state, help hints)
|
||||
6. Test against `https://analyzer.00id.net`
|
||||
|
||||
---
|
||||
|
||||
## Future Milestones (post-MVP, not scheduled)
|
||||
|
||||
### M2: Navigation & Detail
|
||||
- Node detail view (select observer → see its packets/neighbors)
|
||||
- Keyboard navigation (j/k, Enter, Esc)
|
||||
- `/` to filter packets
|
||||
|
||||
### M3: Visualization
|
||||
- RF noise floor sparklines (`▁▂▃▅▇█`)
|
||||
- Health history over time
|
||||
- Color theme support
|
||||
|
||||
### M4: Advanced
|
||||
- SSH server mode (`--serve-ssh :2222`)
|
||||
- Multi-instance tabs
|
||||
- Export current view to stdout (CSV/JSON)
|
||||
- Desktop notifications on anomalies
|
||||
@@ -0,0 +1,148 @@
|
||||
# Channel Color Highlighting Spec
|
||||
|
||||
**Status:** Proposed
|
||||
**Issue:** [#271](https://github.com/Kpa-clawbot/CoreScope/issues/271)
|
||||
**Author:** Stinkmeaner (AI)
|
||||
**Date:** 2026-04-05
|
||||
|
||||
## Problem
|
||||
|
||||
When monitoring multiple active hash channels simultaneously on the Live tab, all `GRP_TXT` traffic renders identically — same color, same styling. Users tracking specific channels (e.g. `#wardriving`) cannot visually distinguish their traffic from other channel activity without reading each row's channel field.
|
||||
|
||||
## Solution
|
||||
|
||||
Allow users to assign custom highlight colors to specific hash channels. Colors propagate across the Live feed, map animations, and timeline. Unassigned channels retain the default `GRP_TXT` styling.
|
||||
|
||||
### Data Model
|
||||
|
||||
**Storage:** Single `localStorage` key `live-channel-colors`
|
||||
|
||||
```json
|
||||
{
|
||||
"#wardriving": "#ef4444",
|
||||
"#meshnet": "#3b82f6"
|
||||
}
|
||||
```
|
||||
|
||||
- Keyed by resolved channel name (e.g. `#wardriving`) or raw hash prefix if unresolved
|
||||
- Included in customizer theme export/import for portability
|
||||
- Maximum ~16 assignments (no hard limit, but UI should discourage excess — see Edge Cases)
|
||||
|
||||
### Channel Matching
|
||||
|
||||
- Match on the packet's `channel` or `group` field
|
||||
- Handle both resolved channel names and raw hash prefixes
|
||||
- Only applies to `GRP_TXT` packet types — other types retain their existing `TYPE_COLORS` styling
|
||||
|
||||
### Visual Treatment
|
||||
|
||||
**Feed rows (primary):**
|
||||
- 4px colored left border
|
||||
- Subtle background tint: channel color at 8–10% opacity
|
||||
- Text color unchanged — contrast must remain accessible
|
||||
|
||||
**Map animations:**
|
||||
- Packet arcs use the assigned channel color instead of default `TYPE_COLORS.GRP_TXT`
|
||||
- Node markers retain role-based coloring (channel color does NOT override node markers)
|
||||
|
||||
**Timeline sparkline:**
|
||||
- Dots/bars colored per channel assignment
|
||||
- Unassigned channels use default color
|
||||
|
||||
**Auto-legend:**
|
||||
- Generated from active assignments
|
||||
- Displayed near the feed header
|
||||
- Color swatch + channel name, compact horizontal layout
|
||||
|
||||
### Configuration UI
|
||||
|
||||
**Quick assign (primary workflow):**
|
||||
- Right-click (long-press on mobile) a channel name in the Live feed
|
||||
- Color picker popover with ~12 preset swatches + custom hex input
|
||||
- "Clear" button to remove assignment
|
||||
|
||||
**Customizer panel (management):**
|
||||
- New "Channel Colors" section under existing "Packet Type Colors"
|
||||
- Lists all assigned channels with color swatches
|
||||
- Add/edit/remove individual assignments
|
||||
- "Clear All" button
|
||||
- Synced with theme export/import
|
||||
|
||||
### Priority Rules
|
||||
|
||||
| Context | Color source |
|
||||
|---------|-------------|
|
||||
| Feed row background/border | Channel color (if assigned), else default |
|
||||
| Feed row text | Always default (no override) |
|
||||
| Map packet arcs | Channel color (if assigned), else `TYPE_COLORS.GRP_TXT` |
|
||||
| Map node markers | Always role color (no override) |
|
||||
| Timeline dots | Channel color (if assigned), else default |
|
||||
|
||||
## Edge Cases
|
||||
|
||||
- **10+ colors:** At ~10 simultaneous assignments, colors become hard to distinguish. The UI should show a soft warning ("Many colors assigned — consider clearing unused ones") but not block the user.
|
||||
- **Color conflicts with role/type colors:** Channel color takes priority for feed row highlighting only. Role colors remain authoritative for node markers.
|
||||
- **Removal:** Clearing a channel color reverts to default styling immediately — no page refresh needed.
|
||||
- **Non-GRP_TXT packets:** Channel color never applied. These packets have no channel association.
|
||||
- **Customizer rework (#288):** If the customizer rework lands first, the Channel Colors section should follow the new single-delta-object pattern (`cs-theme-overrides`). If it hasn't landed, use the standalone `live-channel-colors` key and migrate later.
|
||||
- **Dark/light mode:** Channel colors are mode-independent (same color in both modes). The 8–10% opacity tint ensures readability in both themes.
|
||||
|
||||
## Milestones
|
||||
|
||||
### M1: Core model + feed row highlighting
|
||||
- `localStorage` read/write for `live-channel-colors`
|
||||
- Feed row rendering: left border + background tint
|
||||
- Unit tests for storage CRUD and color application logic
|
||||
|
||||
### M2: Quick-assign UI
|
||||
- Right-click / long-press context menu on channel names
|
||||
- Color picker popover with presets + custom hex
|
||||
- Clear button
|
||||
- Playwright E2E test for assign/clear workflow
|
||||
|
||||
### M3: Map animation integration
|
||||
- Packet arc color lookup from channel assignments
|
||||
- Falls back to `TYPE_COLORS.GRP_TXT` when unassigned
|
||||
- Visual verification via browser screenshot
|
||||
|
||||
### M4: Customizer section + export/import
|
||||
- "Channel Colors" management panel in customizer
|
||||
- Include channel colors in theme export JSON
|
||||
- Import restores channel colors
|
||||
- Unit tests for export/import round-trip
|
||||
|
||||
### M5: Timeline coloring + auto-legend
|
||||
- Timeline sparkline uses channel colors
|
||||
- Auto-legend renders near feed header
|
||||
- Playwright E2E for legend visibility
|
||||
|
||||
## Testing
|
||||
|
||||
| Level | What | How |
|
||||
|-------|------|-----|
|
||||
| Unit | Storage CRUD, color lookup, merge with defaults | `test-frontend-helpers.js` via `vm.createContext` |
|
||||
| Unit | Export/import round-trip with channel colors | Same |
|
||||
| E2E | Quick-assign popover, color applied to feed rows | Playwright against localhost |
|
||||
| E2E | Customizer channel colors section | Playwright |
|
||||
| E2E | Legend appears when ≥1 channel colored | Playwright |
|
||||
| Visual | Map arcs colored, dark/light mode readability | Browser screenshot |
|
||||
|
||||
## Expert Review Notes
|
||||
|
||||
### Tufte (Visualization)
|
||||
- **Left border + tint is sound.** The 4px border is data-ink (encodes channel identity). The tint at 8–10% opacity provides grouping without overwhelming the data. This is information encoding, not decoration.
|
||||
- **Risk at scale:** Beyond ~8 colors, perceptual distinguishability drops sharply. The spec correctly warns but doesn't enforce. Consider using a curated palette of maximally-distinct colors (like ColorBrewer qualitative sets) as the preset swatches rather than a free-form picker.
|
||||
- **Auto-legend is correct:** Direct labeling on every row would be redundant (channel name already in the row). A compact legend near the feed is the right balance — it teaches the encoding once.
|
||||
- **No chartjunk introduced.** The visual treatment adds information (channel identity) without decorative excess.
|
||||
|
||||
### Torvalds (Code Quality)
|
||||
- **localStorage is fine** for user preferences with <1KB payloads. No need for IndexedDB or server-side storage.
|
||||
- **5 milestones is appropriate.** Each is independently shippable and testable. No milestone depends on speculation about future milestones.
|
||||
- **Watch the customizer coupling.** If #288 lands, the `live-channel-colors` key should merge into `cs-theme-overrides`. Design the read/write functions to abstract the storage key so migration is a one-line change, not a rewrite.
|
||||
- **Keep the color picker simple.** Don't build a custom color picker — use `<input type="color">` with preset swatch buttons. The browser's native picker is fine.
|
||||
|
||||
### Doshi (Product Strategy)
|
||||
- **This is N (Neutral).** It's a genuine usability improvement for multi-channel monitoring, but it doesn't change CoreScope's trajectory. It won't attract new users or unlock new use cases — it makes existing power users slightly more efficient.
|
||||
- **Opportunity cost is low.** Each milestone is small (~1-2 hours of work). The total investment is modest.
|
||||
- **5 milestones is fine** given each is small. Shipping M1+M2 alone delivers 80% of the value. M3–M5 are polish. Consider M1+M2 as the MVP gate — if nobody uses channel colors after M2, stop there.
|
||||
- **Pre-mortem:** This fails if users rarely monitor 2+ channels simultaneously, making the problem theoretical. Validate that multi-channel monitoring is a real workflow before M3.
|
||||
@@ -0,0 +1,311 @@
|
||||
# Deployment Simplification Spec
|
||||
|
||||
**Status:** Draft
|
||||
**Author:** Kpa-clawbot
|
||||
**Date:** 2026-04-05
|
||||
|
||||
## Current State
|
||||
|
||||
CoreScope deployment today requires:
|
||||
|
||||
1. **Clone the repo** and build from source (`docker compose build`)
|
||||
2. **Create a config.json** — the example is 100+ lines with MQTT credentials, channel keys, theme colors, regions, cache TTLs, health thresholds, branding, and more. An operator must understand all of this before seeing a single packet.
|
||||
3. **Set up a Caddyfile** for TLS (separate `caddy-config/` directory, bind-mounted)
|
||||
4. **Understand the supervisord architecture** — the container runs 4 processes (mosquitto, ingestor, server, caddy) via supervisord. This is opaque to operators.
|
||||
5. **No pre-built images** — there's no image on Docker Hub or GHCR. Every operator must `git clone` + `docker compose build`.
|
||||
6. **Updates require rebuilding** — `git pull && docker compose build && docker compose up -d`. No `docker compose pull`.
|
||||
7. **manage.sh is 100+ lines** of bash wrapping `docker compose` with state files, confirmations, and color output. It's helpful for the maintainer but intimidating for new operators.
|
||||
|
||||
### What works well
|
||||
|
||||
- **Dockerfile is solid** — multi-stage Go build, Alpine runtime, small image
|
||||
- **Health checks exist** — `wget -qO- http://localhost:3000/api/stats`
|
||||
- **Environment variable overrides** — ports and data dirs are configurable via `.env`
|
||||
- **Data persistence** — bind mounts for DB (`~/meshcore-data`), named volume for Caddy certs
|
||||
- **DISABLE_MOSQUITTO flag** — can use external MQTT broker
|
||||
- **Graceful shutdown** — `stop_grace_period: 30s`, SIGTERM handling
|
||||
|
||||
### What's painful
|
||||
|
||||
| Pain Point | Impact |
|
||||
|---|---|
|
||||
| Must build from source | Blocks anyone without Go/Docker buildx knowledge |
|
||||
| 100-line config.json required | Operator doesn't know what's optional vs required |
|
||||
| No sensible defaults for MQTT | Can't connect to public mesh without credentials |
|
||||
| No pre-built multi-arch images | ARM users (Raspberry Pi) must cross-compile |
|
||||
| No one-line deploy | Minimum 4 steps: clone, configure, build, start |
|
||||
| Updates = rebuild | Slow, error-prone, requires git |
|
||||
|
||||
## Goal
|
||||
|
||||
An operator who has never seen the codebase should be able to run CoreScope with:
|
||||
|
||||
```bash
|
||||
docker run -d -p 80:80 -v corescope-data:/app/data ghcr.io/kpa-clawbot/corescope:v3.4.1
|
||||
```
|
||||
|
||||
And see live MeshCore packets from the public mesh within 60 seconds.
|
||||
|
||||
## Pre-built Images
|
||||
|
||||
Publish to **GHCR** (`ghcr.io/kpa-clawbot/corescope`) on every release tag.
|
||||
|
||||
- **Tags:**
|
||||
- `vX.Y.Z` (e.g., `v3.4.1`) — specific release, pinned, recommended for production
|
||||
- `vX.Y` (e.g., `v3.4`) — latest patch in a minor series, auto-updates patches only
|
||||
- `vX` (e.g., `v3`) — latest minor+patch in a major series
|
||||
- `latest` — latest release tag (NOT latest commit). Only moves on tagged releases, never on random master commits. Still, production deployments should pin to `vX.Y.Z`
|
||||
- `edge` — built from master on every push. Unstable, for testing only. Clearly labeled as such
|
||||
- **Architectures:** `linux/amd64`, `linux/arm64` (Raspberry Pi 4/5)
|
||||
- **Build trigger:** GitHub Actions on `v*` tag push
|
||||
- **CI workflow:** New job `publish` after existing `deploy`, uses `docker/build-push-action` with QEMU for multi-arch
|
||||
|
||||
```yaml
|
||||
# .github/workflows/publish.yml (simplified)
|
||||
on:
|
||||
push:
|
||||
tags: ['v*']
|
||||
jobs:
|
||||
publish:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
packages: write
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: docker/setup-qemu-action@v3
|
||||
- uses: docker/setup-buildx-action@v3
|
||||
- uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: docker/build-push-action@v6
|
||||
with:
|
||||
push: true
|
||||
platforms: linux/amd64,linux/arm64
|
||||
tags: |
|
||||
ghcr.io/kpa-clawbot/corescope:v3.4.1
|
||||
ghcr.io/kpa-clawbot/corescope:${{ github.ref_name }}
|
||||
build-args: |
|
||||
APP_VERSION=${{ github.ref_name }}
|
||||
GIT_COMMIT=${{ github.sha }}
|
||||
BUILD_TIME=${{ github.event.head_commit.timestamp }}
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
### Hierarchy (highest priority wins)
|
||||
|
||||
1. **Environment variables** — `CORESCOPE_MQTT_BROKER`, `CORESCOPE_PORT`, etc.
|
||||
2. **`/app/data/config.json`** — full config file (volume-mounted)
|
||||
3. **Built-in defaults** — work out of the box
|
||||
|
||||
### Environment variables for common settings
|
||||
|
||||
| Variable | Default | Description |
|
||||
|---|---|---|
|
||||
| `CORESCOPE_MQTT_BROKER` | `mqtt://localhost:1883` | Primary MQTT broker URL |
|
||||
| `CORESCOPE_MQTT_TOPIC` | `meshcore/+/+/packets` | MQTT topic pattern |
|
||||
| `CORESCOPE_PORT` | `3000` | HTTP server port (internal) |
|
||||
| `CORESCOPE_DB_PATH` | `/app/data/meshcore.db` | SQLite database path |
|
||||
| `CORESCOPE_SITE_NAME` | `CoreScope` | Branding site name |
|
||||
| `CORESCOPE_DEFAULT_REGION` | (none) | Default region filter |
|
||||
| `DISABLE_MOSQUITTO` | `false` | Skip internal MQTT broker |
|
||||
| `DISABLE_CADDY` | `false` | Skip internal Caddy (when behind reverse proxy) |
|
||||
|
||||
### Built-in defaults that work out of the box
|
||||
|
||||
The Go server and ingestor already have reasonable defaults compiled in. The only missing piece is **a default public MQTT source** so a fresh instance can see packets immediately. Options:
|
||||
|
||||
- **Option A:** Ship with the internal Mosquitto broker only (no external sources). Operator sees an empty dashboard and must configure MQTT. Safe but unhelpful.
|
||||
- **Option B:** Ship with a public read-only MQTT source pre-configured (e.g., `mqtt.meshtastic.org` or equivalent if one exists for MeshCore). Operator sees live data immediately. Better UX.
|
||||
|
||||
**Recommendation:** Option A as default (safe), with a documented one-liner to add a public source. The config.example.json already shows how to add `mqttSources`.
|
||||
|
||||
## Compose Profiles
|
||||
|
||||
A single `docker-compose.yml` with profiles:
|
||||
|
||||
```yaml
|
||||
services:
|
||||
corescope:
|
||||
image: ghcr.io/kpa-clawbot/corescope:v3.4.1
|
||||
profiles: ["", "standard", "full"] # runs in all profiles
|
||||
ports:
|
||||
- "${HTTP_PORT:-80}:80"
|
||||
volumes:
|
||||
- ${DATA_DIR:-./data}:/app/data
|
||||
environment:
|
||||
- DISABLE_MOSQUITTO=${DISABLE_MOSQUITTO:-false}
|
||||
- DISABLE_CADDY=${DISABLE_CADDY:-false}
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "-qO-", "http://localhost:3000/api/stats"]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
restart: unless-stopped
|
||||
```
|
||||
|
||||
**Note:** Since the container already bundles mosquitto + caddy + server + ingestor via supervisord, "profiles" are really just env var toggles:
|
||||
|
||||
| Profile | DISABLE_MOSQUITTO | DISABLE_CADDY | Use case |
|
||||
|---|---|---|---|
|
||||
| **minimal** | `true` | `true` | External MQTT + external reverse proxy |
|
||||
| **standard** (default) | `false` | `true` | Internal MQTT, no TLS (behind nginx/traefik) |
|
||||
| **full** | `false` | `false` | Everything including Caddy auto-TLS |
|
||||
|
||||
This avoids splitting into separate compose services. The monolithic container is actually fine for this use case — it's a single-purpose appliance.
|
||||
|
||||
## One-Line Deploy
|
||||
|
||||
### Simplest (Docker run, no TLS)
|
||||
|
||||
```bash
|
||||
docker run -d --name corescope \
|
||||
-p 80:80 \
|
||||
-v corescope-data:/app/data \
|
||||
-e DISABLE_CADDY=true \
|
||||
ghcr.io/kpa-clawbot/corescope:v3.4.1
|
||||
```
|
||||
|
||||
### With Docker Compose
|
||||
|
||||
```bash
|
||||
curl -sL https://raw.githubusercontent.com/Kpa-clawbot/CoreScope/master/docker-compose.simple.yml -o docker-compose.yml
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
Where `docker-compose.simple.yml` is a minimal 15-line file shipped in the repo.
|
||||
|
||||
## Update Path
|
||||
|
||||
```bash
|
||||
docker compose pull
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
Or for `docker run` users:
|
||||
|
||||
```bash
|
||||
docker pull ghcr.io/kpa-clawbot/corescope:v3.4.1
|
||||
docker stop corescope && docker rm corescope
|
||||
docker run -d --name corescope ... # same args as before
|
||||
```
|
||||
|
||||
No rebuild. No git pull. No source code needed.
|
||||
|
||||
## Data Persistence
|
||||
|
||||
| Path | Content | Mount |
|
||||
|---|---|---|
|
||||
| `/app/data/meshcore.db` | SQLite database (all packets, nodes) | Required volume |
|
||||
| `/app/data/config.json` | Custom configuration (optional) | Same volume |
|
||||
| `/app/data/theme.json` | Custom theme (optional) | Same volume |
|
||||
| `/data/caddy` | TLS certificates (Caddy-managed) | Named volume (automatic) |
|
||||
|
||||
**Backup:** `cp ~/corescope-data/meshcore.db ~/backup/` — it's just a SQLite file.
|
||||
|
||||
**Migration:** Existing `~/meshcore-data` directories work unchanged. Just point the volume at the same path.
|
||||
|
||||
## TLS/HTTPS
|
||||
|
||||
### Option 1: Caddy auto-TLS (built-in)
|
||||
|
||||
The container ships Caddy. To enable auto-TLS:
|
||||
|
||||
1. Mount a custom Caddyfile:
|
||||
```bash
|
||||
docker run -d \
|
||||
-p 80:80 -p 443:443 \
|
||||
-v corescope-data:/app/data \
|
||||
-v caddy-certs:/data/caddy \
|
||||
-v ./Caddyfile:/etc/caddy/Caddyfile:ro \
|
||||
ghcr.io/kpa-clawbot/corescope:v3.4.1
|
||||
```
|
||||
|
||||
2. Caddyfile:
|
||||
```
|
||||
your-domain.com {
|
||||
reverse_proxy localhost:3000
|
||||
}
|
||||
```
|
||||
|
||||
### Option 2: External reverse proxy (recommended for production)
|
||||
|
||||
Run with `DISABLE_CADDY=true` and put nginx/traefik/cloudflare in front. This is the standard approach and what most operators already have.
|
||||
|
||||
## Health Checks
|
||||
|
||||
Already implemented. The container health check hits `/api/stats`:
|
||||
|
||||
```bash
|
||||
# From outside the container
|
||||
curl -f http://localhost/api/stats
|
||||
|
||||
# Response includes packet counts, node counts, uptime
|
||||
```
|
||||
|
||||
Docker will mark the container as `healthy`/`unhealthy` automatically.
|
||||
|
||||
## Monitoring
|
||||
|
||||
**Future (M5 from RF health spec):** Expose a `/metrics` Prometheus endpoint with:
|
||||
|
||||
- `corescope_packets_total` — total packets ingested
|
||||
- `corescope_nodes_active` — currently active nodes
|
||||
- `corescope_mqtt_connected` — MQTT connection status
|
||||
- `corescope_ingestor_lag_seconds` — time since last packet
|
||||
|
||||
This is not required for the deployment simplification work but should be designed alongside it.
|
||||
|
||||
## Migration from Current Setup
|
||||
|
||||
For existing operators using `manage.sh` + build-from-source:
|
||||
|
||||
1. **Keep your data directory** — the bind mount path is the same
|
||||
2. **Keep your config.json** — it goes in the data directory as before
|
||||
3. **Replace `docker compose build`** with `docker compose pull`
|
||||
4. **Update docker-compose.yml** — change `build:` to `image: ghcr.io/kpa-clawbot/corescope:v3.4.1`
|
||||
5. **manage.sh continues to work** — it wraps `docker compose` and will work with pre-built images
|
||||
|
||||
**Breaking changes:** None expected. The container interface (ports, volumes, env vars) stays the same.
|
||||
|
||||
## Milestones
|
||||
|
||||
### M1: Pre-built images (1-2 days)
|
||||
- [ ] Create `.github/workflows/publish.yml` for multi-arch builds
|
||||
- [ ] Push a test `v0.x.0` tag and verify image on GHCR
|
||||
- [ ] Update README with `docker run` quickstart
|
||||
- [ ] Create `docker-compose.simple.yml` (minimal compose file using pre-built image)
|
||||
|
||||
### M2: Environment variable configuration (1 day)
|
||||
- [ ] Add env var parsing to Go server `config.go` (overlay on config.json)
|
||||
- [ ] Add env var parsing to Go ingestor
|
||||
- [ ] Add `DISABLE_CADDY` support to `entrypoint-go.sh`
|
||||
- [ ] Document all env vars in README
|
||||
|
||||
### M3: Sensible defaults (0.5 day)
|
||||
- [ ] Ensure server starts with zero config (no config.json required)
|
||||
- [ ] Verify ingestor connects to localhost MQTT by default
|
||||
- [ ] Test: `docker run` with no config produces a working (empty) dashboard
|
||||
|
||||
### M4: Documentation + migration guide (0.5 day)
|
||||
- [ ] Write operator-facing deployment docs in `docs/deployment.md`
|
||||
- [ ] Migration guide for existing users
|
||||
- [ ] One-page quickstart
|
||||
|
||||
**Total estimate:** 3-4 days of work.
|
||||
|
||||
## Torvalds Review
|
||||
|
||||
> "Is this over-engineered?"
|
||||
|
||||
The spec is intentionally simple. Key decisions:
|
||||
|
||||
1. **No Kubernetes manifests, Helm charts, or Terraform.** Just Docker.
|
||||
2. **No config management system.** Env vars + optional JSON file.
|
||||
3. **Keep the monolithic container.** Splitting into 4 separate services (server, ingestor, mosquitto, caddy) would be "proper" microservices but is worse for operators who just want one thing to run. The supervisord approach is fine for an appliance.
|
||||
4. **No custom CLI tool.** `docker compose` is the interface.
|
||||
5. **Profiles are just env vars**, not separate compose files or services.
|
||||
|
||||
The simplest version is literally just M1: publish the existing image to GHCR. Everything else is polish. An operator can already `docker run` the image — they just can't `docker pull` it because it's not published anywhere.
|
||||
@@ -0,0 +1,141 @@
|
||||
# Movable UI Panels — Draggable Panel Positioning
|
||||
|
||||
**Status:** Proposed
|
||||
**Related:** #279 (original request), PR #606 (collapsible panels — immediate fix)
|
||||
**Date:** 2026-04-05
|
||||
|
||||
---
|
||||
|
||||
## Problem
|
||||
|
||||
The live map page overlays several UI panels on the map viewport: legend, live feed, node detail, and filters. On smaller screens or dense deployments, these panels obscure map content. Users have no control over where panels sit — they're CSS-fixed in corners, and when they collide with each other or with map data, the only option is to close them entirely. Closing a panel means losing access to the data it shows.
|
||||
|
||||
PR #606 addresses the immediate pain with collapsible panels and responsive breakpoints. This spec covers the next step: letting users reposition panels to wherever serves their workflow best.
|
||||
|
||||
## Solution
|
||||
|
||||
Panels become draggable within the map viewport. Users grab a handle, drag to a new position, release. Position persists in `localStorage` per panel ID. That's it.
|
||||
|
||||
### What each panel gets
|
||||
|
||||
| Affordance | Behavior |
|
||||
|---|---|
|
||||
| **Drag handle** | A subtle grip indicator (6-dot grid or `⋮⋮`) in the panel header. Cursor changes to `grab`/`grabbing`. The handle is the ONLY drag target — the panel body remains interactive (scrollable, clickable). |
|
||||
| **Snap-to-edge** | When released within 20px of a viewport edge, the panel snaps flush to that edge. Prevents panels floating 3px from the side looking broken. |
|
||||
| **Position persistence** | `localStorage` key per panel: `panel-pos-{id}` → `{ x, y }` as viewport percentages (not pixels — survives resize). |
|
||||
| **Z-index on focus** | Clicking or dragging a panel brings it to front. Simple incrementing counter, reset on page load. |
|
||||
| **Reset button** | Single button (in settings or as a map control) resets ALL panels to default positions. Clears all `panel-pos-*` keys. |
|
||||
|
||||
### What we do NOT build
|
||||
|
||||
- **Resizable panels.** Drag-to-resize adds complexity for marginal benefit. Panels have natural content-driven sizes.
|
||||
- **Docking/tiling/splitting.** This is not a window manager. No snap-to-other-panel, no split view, no tiling grid.
|
||||
- **Panel minimization to a taskbar.** Collapsible (PR #606) is sufficient.
|
||||
- **Drag on mobile.** Touch-drag conflicts with map pan. Mobile keeps collapsible behavior from PR #606. Draggable is desktop-only (`pointer: fine` media query).
|
||||
|
||||
## Design Considerations
|
||||
|
||||
### Drag handle affordance
|
||||
|
||||
The handle must be visible enough that users discover it, but not so prominent that it becomes visual noise. A 6-dot grip icon (`⋮⋮`) in the panel title bar, styled at 60% opacity, rising to 100% on hover. The cursor change (`grab` → `grabbing`) provides the primary affordance.
|
||||
|
||||
### Snap-to-edge
|
||||
|
||||
Panels snap to the nearest edge when released within a 20px threshold. Snap positions: top-left, top-right, bottom-left, bottom-right, or any edge midpoint. This prevents the "floating at 47px from the left" awkwardness without constraining users to a rigid grid.
|
||||
|
||||
### Position persistence
|
||||
|
||||
Positions stored as viewport percentages: `{ xPct: 0.02, yPct: 0.15 }`. On window resize, panels stay proportionally positioned. If a resize would push a panel off-screen, clamp it to the nearest visible edge.
|
||||
|
||||
### Responsive breakpoints
|
||||
|
||||
Below the medium breakpoint (defined in PR #606), panels revert to their fixed/collapsible positions. The draggable behavior is a progressive enhancement for viewports wide enough to have meaningful repositioning space. Persisted positions are preserved in `localStorage` but not applied until the viewport is wide enough again.
|
||||
|
||||
### Z-index management
|
||||
|
||||
A module-level counter starting at 1000. Each panel interaction (click, drag start) sets that panel's z-index to `++counter`. On page load, counter resets to 1000. No panel can exceed z-index 9999 (modal/overlay territory) — if counter approaches that, compact all panel z-indices down.
|
||||
|
||||
### Accessibility
|
||||
|
||||
- Panels are focusable (`tabindex="0"` on the drag handle).
|
||||
- Arrow keys reposition the focused panel by 10px per press (Shift+Arrow = 50px).
|
||||
- `Escape` while dragging cancels and returns to the previous position.
|
||||
- `Home` key resets the focused panel to its default position.
|
||||
- Screen readers: `aria-label="Drag handle for {panel name}. Use arrow keys to reposition."` and `role="slider"` with `aria-valuenow` reflecting position.
|
||||
|
||||
## Implementation
|
||||
|
||||
### Milestones
|
||||
|
||||
**M1: Core drag mechanics** (~2 days)
|
||||
- `DragManager` class: registers panels, handles pointer events, updates positions
|
||||
- Snap-to-edge logic
|
||||
- Z-index management
|
||||
- No persistence yet — positions reset on reload
|
||||
|
||||
**M2: Persistence + reset** (~1 day)
|
||||
- `localStorage` read/write for panel positions
|
||||
- Reset-to-defaults button
|
||||
- Viewport-percentage storage with resize clamping
|
||||
|
||||
**M3: Responsive + accessibility** (~1 day)
|
||||
- Disable drag below medium breakpoint
|
||||
- Keyboard repositioning (arrow keys)
|
||||
- ARIA attributes
|
||||
- Screen reader announcements on position change
|
||||
|
||||
**M4: Polish + testing** (~1 day)
|
||||
- Playwright E2E tests: drag, snap, persist, reset, keyboard
|
||||
- Performance validation: drag must not trigger layout thrash (use `transform: translate()`, not `top/left`)
|
||||
- Edge case handling (see below)
|
||||
|
||||
### Technical approach
|
||||
|
||||
- **No library.** Pointer events (`pointerdown`, `pointermove`, `pointerup`) with `setPointerCapture`. ~150 lines of vanilla JS.
|
||||
- **CSS transforms for positioning.** `transform: translate(Xpx, Ypx)` avoids layout reflow during drag. Only write to `style.transform`, never `top`/`left`.
|
||||
- **Debounce persistence.** Write to `localStorage` on `pointerup`, not during drag.
|
||||
- **Single file:** `public/drag-manager.js` — imported by `live.js`, no other dependencies.
|
||||
|
||||
## Edge Cases
|
||||
|
||||
| Case | Handling |
|
||||
|---|---|
|
||||
| Panel dragged partially off-screen | Clamp to viewport bounds on `pointerup` |
|
||||
| Window resized while panel is near edge | Re-clamp on `resize` (debounced 200ms) |
|
||||
| Two panels overlap after drag | Allowed — z-index determines which is on top. Users can move them. |
|
||||
| `localStorage` full or unavailable | Graceful fallback to default positions. No error shown. |
|
||||
| Panel content changes size after drag | Panel stays at dragged position; content reflows within. If panel grows past viewport edge, clamp. |
|
||||
| User has old `localStorage` keys from a removed panel | Ignore unknown keys on load. Clean up stale keys on reset. |
|
||||
| RTL layouts | Snap logic uses physical viewport edges, not logical start/end. Drag is inherently physical. |
|
||||
|
||||
## Expert Reviews
|
||||
|
||||
### Tufte (Information Design)
|
||||
|
||||
- **Draggability is justified** only if it serves data access — and here it does. Panels obscuring map data is a data-visibility problem, not a UI-decoration problem. Letting users clear their sightlines to the data is correct.
|
||||
- **The drag handle must be minimal.** Six dots at 60% opacity is acceptable. Anything more prominent (colored bars, icons, labels) becomes chartjunk — UI chrome competing with data for attention.
|
||||
- **Resist feature creep.** Resizable panels, docking zones, panel-to-panel snapping — all increase interface complexity without increasing data throughput. The spec correctly excludes these.
|
||||
- **Snap-to-edge is good.** It prevents the visual noise of arbitrarily placed rectangles. Panels aligned to edges create clean negative space for the map data.
|
||||
|
||||
### Torvalds (Engineering Pragmatism)
|
||||
|
||||
- **This is borderline over-engineering.** The real question: do users actually need free-form drag, or would a simpler "pick a corner" toggle (TL/TR/BL/BR) cover 95% of use cases with 20% of the code?
|
||||
- **The 4-corner toggle would be ~40 lines.** The full drag system is ~150+ lines plus persistence, snap logic, accessibility, resize handling, z-index management, and edge cases. That's a lot of surface area for "I want the legend on the right instead of the left."
|
||||
- **Recommendation:** Ship the 4-corner toggle first (M0). If users actually request free-form drag after that, build it. YAGNI applies here.
|
||||
- **If you do build drag:** the spec is sound. Pointer events + transforms + localStorage is the right stack. No library is correct. But test it on Firefox — pointer capture has quirks.
|
||||
|
||||
### Doshi (Product/Business)
|
||||
|
||||
- **This is an N (Nice-to-have), not an L (Leverage).** It improves UX for power users who spend hours on the live map, but it doesn't unlock new capabilities or new users.
|
||||
- **Opportunity cost:** 5 developer-days on draggable panels is 5 days not spent on features that expand what CoreScope can do (new analytics, alerting, multi-site support).
|
||||
- **The collapsible panels (PR #606) likely resolve the P1 pain.** Track whether users still complain about panel placement after #606 ships. If complaints drop to zero, this spec can stay on the shelf.
|
||||
- **If built:** ship M1+M2 only (3 days). M3 accessibility can come later if adoption warrants it. M4 testing is non-negotiable.
|
||||
|
||||
### Feedback incorporated
|
||||
|
||||
Based on the reviews, the spec adds a **Milestone 0** recommendation:
|
||||
|
||||
**M0: Corner-position toggle** (~0.5 days)
|
||||
Before building full drag, ship a simpler panel-position toggle: each panel's header gets a small button that cycles through TL → TR → BR → BL placement. Positions persist in `localStorage`. If this satisfies user needs, M1–M4 become unnecessary.
|
||||
|
||||
**Decision gate:** Ship M0 with PR #606 or shortly after. Monitor feedback for 2 weeks. If users request free-form repositioning, proceed to M1. If corner toggle is sufficient, close this spec as "resolved by M0."
|
||||
@@ -0,0 +1,489 @@
|
||||
# Spec: RF Health Dashboard — Observer Radio Metrics
|
||||
|
||||
**Status:** Draft v3
|
||||
**Purpose:** Enable operators to quickly identify RF jammers, deaf receivers, and radio health issues through per-observer time-series charts.
|
||||
|
||||
## Prerequisite Gate
|
||||
|
||||
**Before building anything, verify that stats messages arrive periodically from observers.**
|
||||
|
||||
The ingestor must receive radio stats messages at a predictable interval via MQTT. Confirmed: status messages arrive every ~5 minutes per observer.
|
||||
|
||||
**Verification steps (M0):**
|
||||
1. Connect ≥3 observers to the MQTT bridge
|
||||
2. Log all incoming stats messages with timestamps for 24h
|
||||
3. Confirm messages arrive at a regular interval (expected: every few minutes)
|
||||
4. If stats are NOT periodic, stop — a stats-request mechanism must be added to the MQTT bridge first (separate spec)
|
||||
5. **Verify `triggerNoiseFloorCalibrate()` firing frequency.** If it fires on every stats cycle, noise floor readings may be artificially consistent (measuring calibration, not environment). If it fires only on boot, the first sample after reboot is unreliable — document which behavior the firmware uses.
|
||||
|
||||
Do not proceed to M1 until this gate passes.
|
||||
|
||||
## Problem
|
||||
|
||||
Operators currently have no visibility into RF environment quality over time. A jammer could be active for hours before anyone notices degraded mesh performance. A deaf receiver silently drops packets with no alert. There's no way to distinguish "the mesh is quiet" from "my observer can't hear anything."
|
||||
|
||||
## Solution
|
||||
|
||||
A new Analytics tab ("RF Health") showing per-observer time-series charts for noise floor, TX airtime, RX airtime, and receive errors over configurable time windows (1h to 30d, plus custom from/to range). Automated pattern detection (M3+) flags anomalies and suggests diagnoses after operators have used raw charts to provide feedback.
|
||||
|
||||
## Data Model
|
||||
|
||||
### New table: `observer_metrics`
|
||||
|
||||
```sql
|
||||
CREATE TABLE IF NOT EXISTS observer_metrics (
|
||||
observer_id TEXT NOT NULL,
|
||||
timestamp TEXT NOT NULL, -- ISO 8601, rounded to nearest sample interval
|
||||
noise_floor REAL, -- dBm, from radio stats (nullable — may arrive without airtime)
|
||||
tx_air_secs INTEGER, -- cumulative TX seconds since boot (nullable)
|
||||
rx_air_secs INTEGER, -- cumulative RX seconds since boot (nullable)
|
||||
packets_sent INTEGER, -- cumulative packets sent since boot (nullable)
|
||||
packets_recv INTEGER, -- cumulative packets received since boot (nullable)
|
||||
recv_errors INTEGER, -- cumulative CRC/decode failures since boot (nullable)
|
||||
battery_mv INTEGER, -- battery voltage in millivolts (nullable, for field/solar nodes)
|
||||
PRIMARY KEY (observer_id, timestamp)
|
||||
);
|
||||
```
|
||||
|
||||
**Field notes:**
|
||||
|
||||
- **`recv_errors`** (CRC failure count) is the strongest single indicator of channel quality. A rising error rate with stable noise floor points to in-band digital interference rather than broadband jamming. This is more diagnostic than packet_count alone.
|
||||
- **`packets_sent` / `packets_recv`** are tracked separately because the ratio reveals asymmetric link problems (e.g., observer can transmit but not receive, or vice versa). The old `packet_count` field conflated these.
|
||||
- **`battery_mv`** is nullable and only relevant for field/solar deployments. Low battery causes erratic radio behavior (reduced TX power, missed RX windows) that looks like RF problems but isn't. Charting voltage alongside RF metrics prevents misdiagnosis.
|
||||
- All cumulative counters (`tx_air_secs`, `rx_air_secs`, `packets_sent`, `packets_recv`, `recv_errors`) reset on reboot — see reboot handling below.
|
||||
|
||||
No additional indexes. The composite primary key covers all query patterns (per-observer time-range scans). At 70K rows, a full scan for any fleet-wide time query is fast enough.
|
||||
|
||||
### Clock source
|
||||
|
||||
**Always use the ingestor's wall clock for timestamps, not observer-reported timestamps.** Observer clocks may be wrong, drifted, or absent (no RTC). Round the ingestor wall clock to the nearest sample interval boundary (e.g., 5-minute marks) for consistent time alignment.
|
||||
|
||||
### Noise floor cold start caveat
|
||||
|
||||
**The first noise floor sample after a reboot may be unreliable.** The radio's noise floor reading requires settling time and may reflect calibration artifacts rather than the actual RF environment. Mark the first post-reboot sample with a `reboot` flag (see reboot handling) so the frontend can annotate it. Do not use first-post-reboot noise floor samples in baseline/median calculations.
|
||||
|
||||
### Sampling strategy
|
||||
|
||||
- **Interval:** Every 5 minutes (configurable via config.json `metrics.sampleIntervalSec`, default 300)
|
||||
- **Source:** MQTT stats messages (`STATS_TYPE_RADIO`)
|
||||
- **Insertion:** `INSERT OR REPLACE INTO observer_metrics (observer_id, timestamp, ...) VALUES (?, ?, ...)` with timestamp rounded to the nearest interval boundary. No need to track last-insert time per observer — rounding + `INSERT OR REPLACE` is idempotent and naturally deduplicates.
|
||||
- **Storage:** ~10K rows/day for 35 observers. At configurable retention. Negligible.
|
||||
- **Retention:** Configurable, configurable, default 30 days. Prune with a single `DELETE FROM observer_metrics WHERE timestamp < datetime('now', '-N days')` on startup and every 24h. Consider `PRAGMA auto_vacuum = INCREMENTAL` for embedded devices.
|
||||
|
||||
### Gap detection
|
||||
|
||||
If the time between two consecutive samples for an observer exceeds 2× the sample interval (e.g., >10 minutes for a 5-min interval), insert null values in the response to indicate a gap. This prevents charts from drawing misleading interpolation lines across outages.
|
||||
|
||||
### Reboot handling
|
||||
|
||||
Cumulative counters (`tx_air_secs`, `rx_air_secs`, `packets_sent`, `packets_recv`, `recv_errors`) reset on device reboot. Detect counter resets (current value < previous value) and:
|
||||
1. Skip the delta computation for that interval (do not produce a negative value)
|
||||
2. Log a reboot event for the observer with the timestamp
|
||||
3. Use the current sample as the new baseline for subsequent deltas
|
||||
4. **Include reboot timestamps in the API response** so the frontend can render them as annotations directly on the chart (see frontend design)
|
||||
5. **Flag the first post-reboot noise floor sample** as potentially unreliable (cold start — see above)
|
||||
|
||||
### Delta computation (server-side)
|
||||
|
||||
Cumulative counters are converted to per-interval rates server-side. **Deltas are computed server-side, not in the frontend.** The API returns percentage/rate values directly. This keeps firmware implementation details (cumulative counters, reboot semantics) out of the UI layer, reduces payload size, and centralizes reboot-handling logic.
|
||||
|
||||
### Graceful degradation
|
||||
|
||||
Not all observers may report all metrics. If fields are absent:
|
||||
- Store `NULL` for missing columns
|
||||
- The API returns `null` for unavailable fields
|
||||
- The frontend shows only the charts for which data exists — missing charts are hidden, not broken
|
||||
- Status detection uses only available metrics
|
||||
- `battery_mv` is expected to be absent on mains-powered observers — this is normal, not an error
|
||||
|
||||
Partial data is always better than no data. Never error or crash on missing optional fields.
|
||||
|
||||
### Required ingestor changes
|
||||
|
||||
1. Parse `tx_air_secs`, `rx_air_secs`, `packets_sent`, `packets_recv`, `recv_errors`, and `battery_mv` from MQTT stats messages (same pattern as existing `noise_floor`)
|
||||
2. On each stats message, round ingestor wall clock to nearest interval, `INSERT OR REPLACE` into `observer_metrics`
|
||||
3. Handle missing fields gracefully (insert NULLs for absent metrics)
|
||||
4. Detect counter resets and record reboot events
|
||||
5. Add new columns to `observers` table for current/latest values
|
||||
|
||||
### API endpoints
|
||||
|
||||
```
|
||||
GET /api/observers/{id}/metrics?since=2026-04-04T00:00:00Z&until=2026-04-05T00:00:00Z&resolution=5m
|
||||
```
|
||||
|
||||
**`resolution` query parameter** controls downsampling:
|
||||
- `5m` (default) — raw samples
|
||||
- `1h` — hourly aggregates (`GROUP BY strftime('%Y-%m-%dT%H:00:00', timestamp)` with MIN/MAX/AVG)
|
||||
- `1d` — daily aggregates
|
||||
|
||||
Use `1h` resolution for 7d views to avoid shipping 2,016 points per observer. Essential for the fleet comparison view (35 observers × 2,016 = 70K points at raw resolution → 35 × 168 = 5,880 points at 1h resolution).
|
||||
|
||||
Returns:
|
||||
```json
|
||||
{
|
||||
"observer_id": "1F445B...",
|
||||
"observer_name": "GY889 Repeater",
|
||||
"reboots": ["2026-04-04T03:15:00Z", "2026-04-04T18:22:00Z"],
|
||||
"metrics": [
|
||||
{
|
||||
"timestamp": "2026-04-04T00:00:00Z",
|
||||
"noise_floor": -112.5,
|
||||
"tx_airtime_pct": 2.1,
|
||||
"rx_airtime_pct": 8.3,
|
||||
"packets_sent": 42,
|
||||
"packets_recv": 342,
|
||||
"recv_errors": 3,
|
||||
"recv_error_rate": 0.87,
|
||||
"battery_mv": 3720,
|
||||
"is_reboot_sample": false
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
Notes:
|
||||
- `tx_airtime_pct` and `rx_airtime_pct` are server-computed deltas as percentages. Null if airtime data unavailable.
|
||||
- `recv_error_rate` = `recv_errors / (packets_recv + recv_errors)` as a percentage. Null if either field unavailable.
|
||||
- `packets_sent` and `packets_recv` are per-interval deltas (not cumulative). Null if unavailable.
|
||||
- `reboots` array contains timestamps of detected reboots within the queried window, for chart annotation.
|
||||
- `is_reboot_sample` flags first-post-reboot samples where noise floor may be unreliable.
|
||||
- `battery_mv` is null for mains-powered observers.
|
||||
|
||||
```
|
||||
GET /api/observers/metrics/summary?window=24h
|
||||
```
|
||||
|
||||
**Fleet summary is cached incrementally.** Maintain a rolling summary struct in memory, updated on each new sample insert (35 observers × 1 sample/5min = 7 inserts/min — trivially cheap). The endpoint reads from the cached struct, not from SQLite queries on every request.
|
||||
|
||||
Returns:
|
||||
```json
|
||||
{
|
||||
"observers": [
|
||||
{
|
||||
"observer_id": "1F445B...",
|
||||
"observer_name": "GY889 Repeater",
|
||||
"current_noise_floor": -112.5,
|
||||
"avg_noise_floor_24h": -114.2,
|
||||
"max_noise_floor_24h": -95.0,
|
||||
"tx_airtime_pct_24h": 2.1,
|
||||
"rx_airtime_pct_24h": 8.3,
|
||||
"recv_error_rate_24h": 0.87,
|
||||
"battery_mv": 3720,
|
||||
"status": "normal"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## Frontend Design
|
||||
|
||||
### Design Principles
|
||||
|
||||
The dashboard exists for one purpose: **let an operator glance at it at 3 AM and know immediately if something is wrong.** Every design decision follows from this. Decoration that doesn't serve comprehension is removed. Data that can be shown is shown — not hidden behind clicks or hovers.
|
||||
|
||||
Key rules (per Tufte):
|
||||
- **Maximize data-ink ratio.** Every pixel must encode data or directly support reading it. Remove anything that doesn't.
|
||||
- **No chartjunk.** No gradient fills, no 3D effects, no decorative borders, no ornamental chrome.
|
||||
- **Labels on the data, not in legends.** Direct-label lines, annotate anomalies at the point they occur. The viewer should never look away from the data to understand it.
|
||||
- **Show data variation, not design variation.** All observer charts use identical scales, formats, and typography. If two charts look different, it's because the data is different.
|
||||
- **Respect the viewer's intelligence.** Dense, information-rich displays are fine. Oversimplified displays waste screen space and the operator's time.
|
||||
|
||||
### Page structure: small multiples grid
|
||||
|
||||
```
|
||||
Analytics → RF Health tab
|
||||
├── Time range: [1h] [3h] [6h] [12h] [24h] [3d] [7d] [30d] [Custom ▾]
|
||||
│ ├── Presets: click to quick-set
|
||||
│ └── Custom: two datetime inputs (from/to) with calendar picker
|
||||
│ └── URL hash reflects selected range for deep linking
|
||||
│
|
||||
├── Small Multiples Grid (ALL observers, one cell per observer)
|
||||
│ │
|
||||
│ │ Each cell contains:
|
||||
│ │ ┌─────────────────────────────────────────┐
|
||||
│ │ │ GY889 Repeater -112.5 dBm 3.7V│ ← name, current NF, battery (if field node)
|
||||
│ │ │ ┈┈┈╲┈┈┈┈┈┈╱┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈│ ← noise floor sparkline (24h)
|
||||
│ │ │ err: 0.8% TX: 2.1% RX: 8.3% │ ← key rates, inline text
|
||||
│ │ │ ▲reboot 03:15 │ ← reboot annotation (if any)
|
||||
│ │ └─────────────────────────────────────────┘
|
||||
│ │
|
||||
│ │ Sorted by: worst status first, then highest noise floor
|
||||
│ │ Grid: 3–4 columns on desktop, 2 on tablet, 1 on phone
|
||||
│ │ Click any cell → expand to full detail below
|
||||
│ │
|
||||
│ └── Entire grid is visible at once — no pagination, no "show more"
|
||||
│ (35 observers × ~60px per cell = ~700px — fits on one screen)
|
||||
│
|
||||
├── Expanded Detail (shown below grid when a cell is clicked)
|
||||
│ │
|
||||
│ │ Three time-aligned charts, stacked vertically, sharing X-axis:
|
||||
│ │
|
||||
│ │ 1. Noise Floor (dBm)
|
||||
│ │ - SVG line chart, Y-axis inverted (higher dBm = worse = higher on chart)
|
||||
│ │ - Thin reference lines at -100 dBm and -85 dBm, directly labeled
|
||||
│ │ (e.g., "−100 warning" / "−85 critical") — no color bands
|
||||
│ │ - Gaps (nulls) break the line — no interpolation across outages
|
||||
│ │ - Reboot markers: vertical hairline at each reboot timestamp,
|
||||
│ │ labeled "reboot" directly on the chart
|
||||
│ │ - First-post-reboot sample marked with open circle (unreliable cold start)
|
||||
│ │ - Direct labels on notable points (min, max, anomalies)
|
||||
│ │
|
||||
│ │ 2. Airtime (%) — hidden if no airtime data
|
||||
│ │ - Two separate SVG lines (NOT stacked area — stacked areas
|
||||
│ │ make it impossible to read the lower series accurately)
|
||||
│ │ - TX line and RX line, directly labeled at their endpoints
|
||||
│ │ ("TX 2.1%" / "RX 8.3%") — no legend box
|
||||
│ │ - Same X-axis as noise floor chart above
|
||||
│ │ - Gaps shown as breaks
|
||||
│ │
|
||||
│ │ 3. Channel Quality
|
||||
│ │ - Receive error rate (%) as a line
|
||||
│ │ - Packets recv as a light step-line for context
|
||||
│ │ - Directly labeled — no legend
|
||||
│ │ - High error rate + low packet count = dead channel
|
||||
│ │ - High error rate + high packet count = interference
|
||||
│ │
|
||||
│ │ 4. Battery Voltage (shown only if battery_mv is non-null)
|
||||
│ │ - Simple line chart, mV scale
|
||||
│ │ - Directly labeled with current value
|
||||
│ │ - Useful for correlating RF anomalies with low-battery behavior
|
||||
│ │
|
||||
│ │ All four charts share the same X-axis and time range.
|
||||
│ │ Reboot markers appear as vertical hairlines across ALL charts
|
||||
│ │ (same event, visible in all contexts — no hunting).
|
||||
│ │
|
||||
│ └── Current values shown as text below charts:
|
||||
│ NF: −112.5 dBm | TX: 2.1% | RX: 8.3% | Err: 0.87% | Batt: 3.72V
|
||||
│ 24h: avg −114.2 | max −95.0 | 3 reboots
|
||||
│
|
||||
└── Fleet Comparison (M4)
|
||||
└── Small multiples of noise floor, one per observer, identical Y-scale
|
||||
└── NOT an overlay chart — overlays become unreadable past 5 lines
|
||||
└── Use 1h resolution for 7d views
|
||||
```
|
||||
|
||||
### Why small multiples, not expandable accordion
|
||||
|
||||
An accordion (expand/collapse per observer) forces the operator to click through each observer sequentially. At 3 AM with 35 observers, that's unacceptable. The small multiples grid shows ALL observers simultaneously — the eye does the comparison, not the mouse. Anomalies pop out visually because they break the pattern of the grid. This is Tufte's core insight: **small multiples leverage the viewer's ability to detect pattern breaks across a consistent visual template.**
|
||||
|
||||
### Why no color bands on charts
|
||||
|
||||
Color bands (green/yellow/red zones) are decorative — they add ink that doesn't encode data. They also pre-judge what's "good" and "bad," which varies by deployment environment. Instead, use **thin reference lines with direct text labels** at the warning and critical thresholds. The reference lines take up negligible ink, the labels are informational, and the operator's eye naturally compares the data line against them.
|
||||
|
||||
### Why not stacked area for airtime
|
||||
|
||||
Stacked area charts are a common source of graphical dishonesty. The bottom series (TX) reads correctly against the X-axis, but the top series (RX) reads against the TX boundary — making it impossible to accurately judge RX values without mental subtraction. Two separate lines, directly labeled, are always more honest and more readable.
|
||||
|
||||
### Color usage
|
||||
|
||||
Color encodes data category, never decoration:
|
||||
- **Noise floor line:** single muted color (the line IS the data — it doesn't need to be loud)
|
||||
- **TX / RX lines:** two distinct colors, directly labeled at endpoints (no legend needed)
|
||||
- **Error rate:** a third distinct color
|
||||
- **Reboot markers:** gray hairlines (de-emphasized — context, not data)
|
||||
- **Status text in grid cells:** text color only (not background fill) — red text for critical, amber for warning, default for normal
|
||||
- No background color fills on cards. No colored borders. No badge backgrounds. Color on text only where it carries meaning.
|
||||
|
||||
### Labels and annotations
|
||||
|
||||
- **Reference lines** at threshold values, labeled directly ("−100 dBm warning")
|
||||
- **Reboot events** as vertical hairlines across all charts, labeled "reboot" at the top
|
||||
- **Cold-start samples** marked with open circles and a subtle "?" annotation
|
||||
- **Current values** as inline text on the sparkline cells and below detail charts
|
||||
- **No separate legends.** Lines are labeled at their endpoints or directly on the chart.
|
||||
- **Hover** shows exact timestamp + value — this is the only interactive element, and it reveals precision, not hidden data
|
||||
|
||||
### Data density
|
||||
|
||||
- The small multiples grid fits 35 observers in ~700px vertical space (one screen on desktop)
|
||||
- Each cell is information-dense: name + current value + sparkline + rates + reboot count — all visible without clicking
|
||||
- Detail charts are stacked vertically sharing the X-axis, eliminating redundant time labels
|
||||
- No wasted whitespace between chart panels — they are a single visual unit
|
||||
|
||||
### Information hierarchy (3 AM glance test)
|
||||
|
||||
1. **Grid scan (2 seconds):** Are all sparklines flat and similar? Yes → everything's fine. One cell has a spike or red text → that's the problem.
|
||||
2. **Cell read (3 seconds):** Which observer, what's the current NF, what's the error rate? All visible without clicking.
|
||||
3. **Detail dive (10 seconds):** Click the cell, see time-series context, see if it correlates with reboots, check battery, check airtime.
|
||||
|
||||
An operator never needs to click anything to know if the fleet is healthy. Clicking only provides temporal detail for diagnosis.
|
||||
|
||||
### Mobile considerations
|
||||
|
||||
- Grid collapses to 1 column on phone (each cell is full-width, still showing sparkline + values)
|
||||
- Detail charts fill the viewport width, Y-axis labels move above the chart to save horizontal space
|
||||
- Touch targets: the entire grid cell is tappable (not a small icon)
|
||||
- Time range selector uses segmented control (large touch targets) for presets, not a dropdown
|
||||
- Custom range picker: two datetime inputs with calendar popup, positioned below the presets
|
||||
- Selected range (preset or custom) persists in URL hash: `&range=24h` or `&from=2026-04-04T14:00:00Z&to=2026-04-04T16:00:00Z`
|
||||
|
||||
### Chart rendering
|
||||
|
||||
**Use SVG, not Canvas.** The existing analytics.js uses SVG for all charts (sparklines, bar charts, histograms). Canvas is only used for the force-directed neighbor graph. Follow the existing SVG patterns — reuse `sparkSvg()` for fleet overview sparklines.
|
||||
|
||||
2,016 SVG polyline points per chart is fine. For the fleet comparison view (M4), use hourly downsampling (168 points per observer) to avoid layout jank on mobile.
|
||||
|
||||
### Deep linking
|
||||
|
||||
```
|
||||
#/analytics?tab=rf-health
|
||||
#/analytics?tab=rf-health&observer=1F445B...&range=24h
|
||||
```
|
||||
|
||||
## Pattern Detection (M3+)
|
||||
|
||||
**Pattern detection is deferred until after operators have used raw charts (M1–M2) and provided feedback on what patterns actually matter.** Do not implement automated diagnosis until real-world usage informs the rules.
|
||||
|
||||
### Planned automated diagnosis
|
||||
|
||||
The server computes a `status` field per observer based on the last N samples:
|
||||
|
||||
| Pattern | Status | Indicator |
|
||||
|---|---|---|
|
||||
| NF stable, RX/TX normal, low error rate | `normal` | (no indicator — absence of alarm is the signal) |
|
||||
| NF spike + RX drop (broadband interference) | `jammer_suspected` | Red text: "Jammer?" |
|
||||
| NF normal, RX near zero, fleet active (≥5 observers) | `deaf` | Red text: "Deaf receiver" |
|
||||
| High `recv_errors` rate + stable NF | `digital_interference` | Amber text: "CRC errors high" |
|
||||
| TX approaching duty cycle warning | `tx_overload` | Amber text: "TX overload" |
|
||||
| No samples in >15 min | `offline` | Gray text: "Offline" |
|
||||
| NF gradually increasing over hours | `interference_trend` | Amber text: "Rising interference" |
|
||||
| Battery voltage below threshold | `low_battery` | Amber text: "Low battery" |
|
||||
|
||||
**Jammer detection logic:** A jammer raises the noise floor AND causes RX to drop (the receiver can't hear legitimate signals over the interference). NF spike + RX spike would indicate a legitimate busy channel, not a jammer. The key signal is: NF goes up, RX goes down.
|
||||
|
||||
**Digital interference detection (new):** High `recv_errors` with a stable noise floor indicates in-band digital interference (another protocol sharing the frequency, or a malfunctioning node transmitting garbage). This is distinct from broadband jamming, which raises the noise floor. `recv_errors` is the strongest single signal for this.
|
||||
|
||||
**Deaf detection:** Requires a minimum fleet size of ≥5 active observers to establish a meaningful fleet median. With fewer observers, skip deaf detection — the sample size is too small for comparison.
|
||||
|
||||
### Status priority
|
||||
|
||||
When multiple status conditions apply simultaneously, use this priority order (highest first):
|
||||
1. `offline` — no data trumps everything
|
||||
2. `jammer_suspected` — active threat
|
||||
3. `deaf` — hardware failure
|
||||
4. `digital_interference` — channel quality issue
|
||||
5. `tx_overload` — regulatory concern
|
||||
6. `low_battery` — power issue causing RF symptoms
|
||||
7. `interference_trend` — gradual degradation
|
||||
8. `normal` — default
|
||||
|
||||
### Baseline computation
|
||||
|
||||
- **Baseline noise floor:** rolling median of last 24h, **excluding first-post-reboot samples** (cold start unreliable). Computed once on new sample arrival, cached — not recomputed per request.
|
||||
- **Spike detection:** current sample exceeds an absolute threshold (configurable) AND exceeds baseline + spike delta. Both conditions must be met — a delta-only threshold could false-positive in environments where the absolute NF is already benign (e.g., -115 dBm + 15 dBm = -100 dBm, which is fine).
|
||||
- **"Others active" check for deaf detection:** compare this observer's RX packet count against the fleet median. If this observer is <10% of fleet median AND fleet has ≥5 active observers, flag as potentially deaf.
|
||||
- **Error rate baseline:** rolling average of `recv_error_rate` over 24h. Spike above 2× baseline triggers `digital_interference` status.
|
||||
|
||||
### Alert thresholds (configurable)
|
||||
|
||||
```json
|
||||
{
|
||||
"rfHealth": {
|
||||
"noiseFloorWarning": -100,
|
||||
"noiseFloorCritical": -85,
|
||||
"spikeThresholdDb": 15,
|
||||
"txDutyCycleWarning": 8,
|
||||
"deafThresholdPct": 10,
|
||||
"deafMinFleetSize": 5,
|
||||
"offlineTimeoutSec": 900,
|
||||
"sampleIntervalSec": 300,
|
||||
"retentionDays": 30,
|
||||
"errorRateWarning": 5,
|
||||
"lowBatteryMv": 3300
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Note: No hardcoded duty cycle limit line on charts. Duty cycle regulations vary by jurisdiction (e.g., 1% in EU 868MHz, 10% in some US ISM bands). The warning threshold is configurable but no "regulatory limit" line is drawn on charts.
|
||||
|
||||
## Implementation Milestones
|
||||
|
||||
### M0: Prerequisite — Verify stats message frequency ✅ PASSED
|
||||
- **Confirmed 2026-04-05:** Live MQTT capture on staging shows status messages arriving every ~5 minutes per observer
|
||||
- **Fields confirmed present:** `noise_floor`, `tx_air_secs`, `rx_air_secs`, `recv_errors`, `battery_mv`, `uptime_secs`
|
||||
- **Fields NOT yet parsed by ingestor:** `tx_air_secs`, `rx_air_secs`, `recv_errors` (noise_floor and battery_mv already parsed)
|
||||
- **Ingestor timestamps:** Use ingestor wall clock, not observer timestamps (confirmed in design)
|
||||
- **Verified:** `triggerNoiseFloorCalibrate()` fires every 2 seconds (`NOISE_FLOOR_CALIB_INTERVAL = 2000ms` in `Dispatcher.cpp`). Continuous calibration with 64 RSSI samples per cycle. Noise floor data is always fresh.
|
||||
- **Gate: PASSED.** Proceed to M1.
|
||||
|
||||
### M1: Store metrics + small multiples grid (MVP)
|
||||
- Create `observer_metrics` table with all columns (migration)
|
||||
- Ingestor: parse all available fields from stats, `INSERT OR REPLACE` with rounded timestamps
|
||||
- Handle missing fields gracefully (store NULLs)
|
||||
- Detect counter resets and record reboot events
|
||||
- Add `/api/observers/{id}/metrics` endpoint (all available fields)
|
||||
- Add `/api/observers/metrics/summary` endpoint (cached incrementally)
|
||||
- Add "RF Health" tab to Analytics
|
||||
- **Small multiples grid** with sparklines and inline values for all observers
|
||||
- Per-observer detail view: noise floor line chart with reference lines (not color bands), reboot markers as vertical hairlines, cold-start sample annotation
|
||||
- Time range selector (1h/3h/6h/12h/24h/3d/7d/30d + custom range picker)
|
||||
- Deep linking
|
||||
- Retention pruning
|
||||
- Tests: sampling, insertion idempotency, retention, API responses, gap handling, reboot detection
|
||||
|
||||
### M2: Airtime + channel quality charts
|
||||
- Server-side delta computation for all cumulative counters with reboot handling and gap detection
|
||||
- Add `resolution` query param for downsampling (1h, 1d)
|
||||
- Airtime charts: two separate lines (TX/RX), directly labeled — not stacked area
|
||||
- Channel quality chart: recv_error_rate line + packets_recv step-line
|
||||
- Battery voltage chart (shown only when data exists)
|
||||
- All charts time-aligned, sharing X-axis, reboot markers spanning all charts
|
||||
- Tests: delta computation, reboot handling, counter reset, gap insertion, downsampling, error rate calculation
|
||||
|
||||
### M3: Pattern detection
|
||||
- Implement after operators have used raw charts (M1–M2) and provided feedback
|
||||
- Jammer detection (NF spike + RX drop)
|
||||
- Digital interference detection (high recv_errors + stable NF)
|
||||
- Deaf receiver detection (with ≥5 fleet minimum)
|
||||
- Low battery detection
|
||||
- Interference trend detection
|
||||
- Status text indicators with priority ordering (no emoji badges — text only)
|
||||
- Baseline computation (rolling median excluding cold-start samples, cached)
|
||||
- Configurable alert thresholds
|
||||
- Tests: each pattern, edge cases, status priority
|
||||
|
||||
### M4: Fleet comparison + advanced views
|
||||
- Fleet comparison as **small multiples** (one noise floor chart per observer, identical Y-scale) — not overlay
|
||||
- Sort/filter fleet by status, noise floor, error rate
|
||||
- Optional: per-observer historical baseline trend
|
||||
- Use 1h resolution for 7d views
|
||||
|
||||
### M5: Metrics export — Prometheus / Grafana / external systems
|
||||
- **Prometheus endpoint:** `GET /metrics` exposing observer radio metrics in Prometheus exposition format
|
||||
- Gauges per observer: `corescope_observer_noise_floor_dbm{observer="...",name="..."}`, `corescope_observer_tx_air_secs_total`, `corescope_observer_rx_air_secs_total`, `corescope_observer_recv_errors_total`, `corescope_observer_battery_mv`, `corescope_observer_uptime_secs`
|
||||
- Fleet-level: `corescope_observers_total`, `corescope_observers_online`
|
||||
- Packet counters: `corescope_packets_total`, `corescope_observations_total`
|
||||
- Standard `process_*` and `go_*` runtime metrics via `promhttp` handler
|
||||
- **Configurable:** Enable/disable via `config.json` (`metrics.prometheusEnabled: true`, `metrics.prometheusPath: "/metrics"`)
|
||||
- **Auth:** Optional bearer token or basic auth on the metrics endpoint (prevents public scraping)
|
||||
- **Labels:** Each observer metric labeled with `observer` (pubkey), `name` (friendly name), `region`
|
||||
- **Why Prometheus format:** Industry standard, compatible with Grafana, Datadog, Victoria Metrics, Mimir, and any OpenMetrics consumer. Operators who already run monitoring stacks can integrate CoreScope without any custom work.
|
||||
- **Implementation:** Use Go `prometheus/client_golang` library. Register collectors that read from the in-memory `PacketStore` and `observer_metrics` table. No additional polling — just expose current state on each scrape.
|
||||
- **Grafana dashboard template:** Ship a JSON dashboard template (`docs/grafana-dashboard.json`) that operators can import for instant RF health visualization in Grafana. Pre-configured panels matching the built-in RF Health tab.
|
||||
- **OpenTelemetry (future):** If demand exists, add OTLP export alongside Prometheus. Not in M5 scope.
|
||||
|
||||
## Design Decisions
|
||||
|
||||
1. **Per-observer, not per-device.** Even if two observers share hardware, their RF environments may differ (different antennas, channels). observer_id is already the natural key.
|
||||
2. **Poll-on-tab-switch, not WebSocket push.** Data changes every 5 minutes. Users check this tab when investigating issues, not for live monitoring. WebSocket push adds complexity for no UX benefit.
|
||||
3. **SVG charts.** Matches existing analytics.js patterns. Canvas only if fleet comparison proves too slow with SVG.
|
||||
4. **Server-side deltas.** Keeps firmware details out of the frontend. Single point for reboot/gap handling logic.
|
||||
5. **Incremental fleet summary cache.** 7 inserts/min is trivially cheap to process. No need to query SQLite on every summary request.
|
||||
6. **No standalone timestamp index.** The composite PK handles all query patterns. A standalone index wastes write amplification.
|
||||
7. **Ingestor wall clock for timestamps.** Observer clocks are unreliable. Consistent time source prevents alignment issues.
|
||||
8. **Small multiples over accordion/cards.** Enables instant visual fleet comparison without clicking. Anomalies break the visual pattern of the grid. (Tufte: "Small multiples are the best design solution for a wide range of problems in data presentation.")
|
||||
9. **Reference lines, not color bands.** Color bands add non-data ink and pre-judge thresholds. Reference lines are minimal and informational.
|
||||
10. **Two lines, not stacked area for airtime.** Stacked areas make the upper series unreadable. Two lines with direct labels are always more honest.
|
||||
11. **Text status indicators, not emoji badges.** Emoji badges are decorative chrome. Plain text with semantic color (red/amber/default) is higher data-ink ratio and more accessible.
|
||||
12. **Reboot markers as cross-chart annotations.** Reboots affect all metrics simultaneously. Showing them as vertical hairlines across all charts prevents the operator from having to correlate events across separate views.
|
||||
13. **Separate packets_sent/packets_recv.** The ratio reveals asymmetric link problems invisible in a combined count.
|
||||
14. **recv_errors as a first-class metric.** CRC failures are the strongest channel quality signal — more diagnostic than noise floor alone for in-band interference.
|
||||
15. **Exclude cold-start samples from baseline.** First-post-reboot noise floor readings may reflect calibration artifacts, not the RF environment. Including them would bias the baseline.
|
||||
|
||||
## Open Questions
|
||||
|
||||
1. **Multiple observers on same channel:** If two observers share a channel, their noise floors should correlate. Could be useful for validation but doesn't change the data model.
|
||||
2. **EMA vs median for baseline:** Exponential moving average is cheaper (no sort) and smoother than median. Consider for M3 implementation — but median is more robust against outliers. Decision deferred to M3.
|
||||
3. **`triggerNoiseFloorCalibrate()` frequency:** Must be verified in M0. If it fires on every stats cycle, noise floor readings may be artificially smoothed. If only on boot, cold-start caveat applies. This affects how much weight to give noise floor vs. recv_errors for interference detection.
|
||||
4. **Battery voltage thresholds:** 3.3V is a reasonable default for LiPo cells, but varies by chemistry and regulator. May need per-observer configuration.
|
||||
@@ -0,0 +1,212 @@
|
||||
# Startup Performance: Serve HTTP Within 2 Minutes on Any Database Size
|
||||
|
||||
## Problem
|
||||
|
||||
CoreScope takes 30–45 minutes to start on large databases (325K transmissions, 7.3M observations, 1.4GB SQLite). The HTTP server is completely unavailable during this time. Operators cannot restart without 30+ minutes of downtime.
|
||||
|
||||
### Where time goes (7.3M observation benchmark)
|
||||
|
||||
| Phase | Time | Blocking? |
|
||||
|---|---|---|
|
||||
| `Load()` — read SQLite → memory | ~90s | Yes |
|
||||
| Build subpath index | ~20s | Yes |
|
||||
| Build distance index | ~15s | Yes |
|
||||
| Build path-hop index | <1s | Yes |
|
||||
| Load neighbor edges from SQLite | <1s | Yes |
|
||||
| **Backfill `resolved_path` for NULL observations** | **20–30+ min** | **Yes — the killer** |
|
||||
| Re-pick best observations | ~10s | Yes |
|
||||
|
||||
The backfill calls `resolvePathForObs` for every observation with `resolved_path IS NULL`, then writes results back to SQLite and updates in-memory state. On first run (or after schema migration), this means resolving all 7.3M observations.
|
||||
|
||||
### Root cause
|
||||
|
||||
`backfillResolvedPaths()` in `neighbor_persist.go` runs synchronously in `main()` before `httpServer.ListenAndServe()`. It:
|
||||
1. Collects all observations with `ResolvedPath == nil` under a read lock
|
||||
2. Resolves paths (CPU-bound, ~millions of calls to `resolvePathForObs`)
|
||||
3. Writes results to SQLite in a single transaction
|
||||
4. Updates in-memory state under a write lock
|
||||
|
||||
Steps 2–4 block the main goroutine for 20–30 minutes.
|
||||
|
||||
## Solution: Async Chunked Backfill
|
||||
|
||||
### Design
|
||||
|
||||
Move `backfillResolvedPaths` out of the startup critical path. Start the HTTP server immediately after loading data and building indexes. Run backfill in a background goroutine with chunked processing that yields between batches.
|
||||
|
||||
### Startup sequence (new)
|
||||
|
||||
```
|
||||
1. OpenDB, verify tables (~1s)
|
||||
2. store.Load() (~90s)
|
||||
3. ensureNeighborEdgesTable (<1s)
|
||||
4. ensureResolvedPathColumn (<1s)
|
||||
5. Load/build neighbor graph (<1s)
|
||||
6. Build subpath/distance/path-hop indexes (~35s)
|
||||
7. pickBestObservation (with whatever (~10s)
|
||||
resolved_path data exists)
|
||||
8. *** START HTTP SERVER *** — serving at ~2min mark
|
||||
9. Background: backfillResolvedPaths (20-30 min, non-blocking)
|
||||
→ chunked, yields between batches
|
||||
→ updates in-memory + SQLite incrementally
|
||||
→ re-picks best obs for affected txs
|
||||
```
|
||||
|
||||
Total time to first HTTP response: **~2 minutes** regardless of database size.
|
||||
|
||||
### Implementation details
|
||||
|
||||
#### 1. Background backfill goroutine
|
||||
|
||||
```go
|
||||
// In main(), after starting HTTP server:
|
||||
go func() {
|
||||
backfillResolvedPathsAsync(store, dbPath, 5000, 100*time.Millisecond)
|
||||
}()
|
||||
```
|
||||
|
||||
The async backfill processes observations in chunks of N (e.g., 5,000):
|
||||
|
||||
```go
|
||||
func backfillResolvedPathsAsync(store *PacketStore, dbPath string, chunkSize int, yieldDuration time.Duration) {
|
||||
for {
|
||||
n := backfillResolvedPathsChunk(store, dbPath, chunkSize)
|
||||
if n == 0 {
|
||||
break // done
|
||||
}
|
||||
log.Printf("[store] backfilled resolved_path for %d observations (async)", n)
|
||||
time.Sleep(yieldDuration) // yield to HTTP handlers
|
||||
}
|
||||
log.Printf("[store] async resolved_path backfill complete")
|
||||
}
|
||||
```
|
||||
|
||||
Each chunk:
|
||||
1. Takes a read lock, collects up to `chunkSize` pending observations, releases lock
|
||||
2. Resolves paths (no lock held — `resolvePathForObs` only reads immutable data)
|
||||
3. Opens a separate RW SQLite connection, writes results in a transaction
|
||||
4. Takes a write lock, updates in-memory `obs.ResolvedPath` and re-picks best obs for affected transmissions, releases lock
|
||||
5. Sleeps briefly to yield CPU/lock time to HTTP handlers
|
||||
|
||||
#### 2. Readiness flag and API degraded-mode header
|
||||
|
||||
Add a boolean to `PacketStore`:
|
||||
|
||||
```go
|
||||
type PacketStore struct {
|
||||
// ...
|
||||
backfillComplete atomic.Bool
|
||||
}
|
||||
```
|
||||
|
||||
API responses include a header during backfill:
|
||||
|
||||
```
|
||||
X-CoreScope-Status: backfilling
|
||||
X-CoreScope-Backfill-Remaining: 4523000
|
||||
```
|
||||
|
||||
After backfill completes:
|
||||
```
|
||||
X-CoreScope-Status: ready
|
||||
```
|
||||
|
||||
The frontend can read this header and show a subtle banner: *"Resolving hop paths… some paths may show abbreviated pubkeys."*
|
||||
|
||||
#### 3. Index rebuilds
|
||||
|
||||
The subpath, distance, and path-hop indexes are built during startup from whatever data exists. During backfill, newly resolved paths need to update these indexes incrementally.
|
||||
|
||||
Options (in order of preference):
|
||||
|
||||
**Option A: Defer index updates to end of backfill.** Indexes work fine with unresolved paths — they just produce slightly less precise results. After backfill completes, rebuild indexes once. Simple, correct, low risk.
|
||||
|
||||
**Option B: Incremental index updates per chunk.** After each chunk, update affected index entries. More complex, better real-time accuracy. Only worth it if index accuracy during backfill matters for production use.
|
||||
|
||||
**Recommendation: Option A.** The indexes are usable with unresolved paths. A single rebuild at the end (~35s) is cheap compared to the backfill duration. The API works throughout — results just improve after backfill finishes.
|
||||
|
||||
#### 4. SQLite contention
|
||||
|
||||
The backfill opens a separate RW connection for writes. The main server uses a read-only connection for polling. SQLite WAL mode (already in use) allows concurrent readers and one writer. Contention risk is minimal:
|
||||
|
||||
- Write transactions are small (5,000 UPDATEs per chunk, batched in a single tx)
|
||||
- Read queries from HTTP handlers are unaffected by WAL writes
|
||||
- The 100ms yield between chunks prevents sustained write pressure
|
||||
|
||||
#### 5. Lock contention
|
||||
|
||||
The write lock is held only during the in-memory update phase of each chunk (~5,000 pointer assignments + re-picks). This takes microseconds. HTTP handlers acquire read locks for API responses — they will not be blocked for any perceptible duration.
|
||||
|
||||
#### 6. Frontend handling
|
||||
|
||||
The `hop-resolver.js` module already handles unresolved (prefix) hops gracefully — it shows abbreviated pubkeys. No frontend changes are required for correctness.
|
||||
|
||||
Optional enhancement: read the `X-CoreScope-Status` header and show a transient info banner during backfill. This is cosmetic and can be done in a follow-up.
|
||||
|
||||
### What about first-run specifically?
|
||||
|
||||
On first run with a pre-existing database (e.g., migrating from a version without `resolved_path`), ALL 7.3M observations need backfill. The async approach handles this identically — it just takes longer in the background while HTTP is already serving.
|
||||
|
||||
On subsequent restarts, `resolved_path` is already persisted in SQLite and loaded by `store.Load()`. The backfill loop finds zero pending observations and exits immediately.
|
||||
|
||||
### What about new observations during backfill?
|
||||
|
||||
The poller ingests new packets continuously. New observations written by the ingestor already have `resolved_path` set at ingest time (this is already implemented). The backfill only processes observations with `ResolvedPath == nil`, so there's no conflict with new data.
|
||||
|
||||
## Alternatives considered
|
||||
|
||||
### Lazy resolution (resolve on API access)
|
||||
|
||||
Resolve `resolved_path` only when an observation is accessed via API, cache the result.
|
||||
|
||||
**Rejected because:**
|
||||
- Adds latency to every API call that touches unresolved observations
|
||||
- Cache invalidation complexity (when does a cached resolution become stale?)
|
||||
- Doesn't help with index accuracy — indexes still need full data
|
||||
- The backfill is a one-time cost; lazy resolution makes it a recurring cost
|
||||
|
||||
### Progressive loading (recent data first)
|
||||
|
||||
Load only the last 24h into memory, start serving, load historical data in background.
|
||||
|
||||
**Rejected because:**
|
||||
- Significantly more complex — all store operations need "is this data loaded yet?" checks
|
||||
- Memory implications: need to track which time ranges are loaded
|
||||
- Historical queries return wrong results during loading (not just degraded — wrong)
|
||||
- The actual bottleneck is backfill, not `Load()`. Even loading all 7.3M observations takes only ~90s.
|
||||
|
||||
### Chunked blocking backfill (yield to HTTP between chunks, but keep in main startup)
|
||||
|
||||
Process N observations per tick with `runtime.Gosched()` between chunks, but still in `main()` before `ListenAndServe`.
|
||||
|
||||
**Rejected because:**
|
||||
- HTTP still isn't available until all chunks complete
|
||||
- Adds complexity without solving the core problem
|
||||
|
||||
## Carmack Review (Performance)
|
||||
|
||||
**The approach is sound.** Moving a 20–30 minute blocking operation to a background goroutine is the right call. Some notes:
|
||||
|
||||
1. **Chunk size tuning.** 5,000 is a reasonable starting point. Monitor: if write lock contention shows up in pprof (unlikely with microsecond hold times), reduce chunk size. If backfill is too slow, increase it or reduce yield time.
|
||||
|
||||
2. **Memory is not a concern.** The observations are already fully loaded in memory by `Load()`. The backfill only mutates the `ResolvedPath` field on existing objects — no additional memory allocation beyond temporary slices for the chunk.
|
||||
|
||||
3. **No hidden costs in `resolvePathForObs`.** It reads `nodePM` (a `PrefixMatcher`, immutable after startup) and `graph` (neighbor graph, immutable after startup). No locks needed during resolution. This is embarrassingly parallelizable if needed, but single-goroutine processing with chunking is sufficient.
|
||||
|
||||
4. **The index rebuild at the end is O(n) and takes ~35s.** This is a one-time cost after the first backfill. Not worth optimizing further unless the profile shows otherwise.
|
||||
|
||||
5. **Risk: `pickBestObservation` during backfill.** API responses may flip their "best" observation as resolved paths become available. This is cosmetically noisy but functionally correct. Document this as expected behavior.
|
||||
|
||||
6. **Future optimization if needed:** The backfill loop could be parallelized across multiple goroutines (partition observations by transmission hash). The resolution step is CPU-bound and read-only. This would reduce backfill wall time from 30 min to ~5 min on 8 cores. Not needed for MVP — the goal is HTTP availability, not backfill speed.
|
||||
|
||||
## Implementation plan
|
||||
|
||||
1. **Refactor `backfillResolvedPaths` into chunked async version** — new function `backfillResolvedPathsAsync` that processes in chunks and yields
|
||||
2. **Move backfill call in `main.go` to after `ListenAndServe`** — wrap in goroutine
|
||||
3. **Add `backfillComplete` atomic flag to `PacketStore`** — set after backfill finishes
|
||||
4. **Add `X-CoreScope-Status` response header** — middleware reads the flag
|
||||
5. **Rebuild indexes after backfill completes** — single call to rebuild subpath/distance/path-hop
|
||||
6. **Tests:** unit test for chunked backfill (mock store with N unresolved obs, verify chunks process correctly)
|
||||
7. **Frontend (follow-up):** optional banner during backfill state
|
||||
|
||||
Estimated effort: 1–2 hours for steps 1–5, plus tests.
|
||||
@@ -0,0 +1,88 @@
|
||||
# Analytics
|
||||
|
||||
The Analytics page provides deep-dive charts and tables about your mesh network. Select a tab to explore different aspects.
|
||||
|
||||
[Screenshot: analytics page with tab bar]
|
||||
|
||||
## Overview
|
||||
|
||||
Summary dashboard with key network metrics at a glance. Quick sparklines and counts across all data dimensions.
|
||||
|
||||
## RF / Signal
|
||||
|
||||
Radio frequency analysis:
|
||||
|
||||
- **SNR distribution** — histogram of signal-to-noise ratios across all packets
|
||||
- **RSSI distribution** — histogram of received signal strength
|
||||
- **SNR by observer** — which observers are getting the best signals
|
||||
- **Signal trends** — how signal quality changes over time
|
||||
|
||||
Use this to identify weak links or noisy observers.
|
||||
|
||||
## Topology
|
||||
|
||||
Network structure analysis:
|
||||
|
||||
- **Hop count distribution** — how many relay hops packets typically take
|
||||
- **Top relay nodes** — which repeaters handle the most traffic
|
||||
- **Node connectivity** — how well-connected each node is
|
||||
|
||||
## Channels
|
||||
|
||||
Channel message statistics:
|
||||
|
||||
- **Messages per channel** — which channels are most active
|
||||
- **Channel activity over time** — traffic trends by channel
|
||||
- **Top senders** — most active nodes per channel
|
||||
|
||||
## Hash Stats
|
||||
|
||||
Mesh hash size analysis:
|
||||
|
||||
- **Hash size distribution** — how many bytes nodes use for addressing
|
||||
- **Hash sizes by role** — do repeaters use different hash sizes than companions?
|
||||
|
||||
## Hash Issues
|
||||
|
||||
Potential hash collision detection:
|
||||
|
||||
- **Collision pairs** — nodes whose short hash prefixes overlap
|
||||
- **Risk assessment** — how likely collisions are at current hash sizes
|
||||
|
||||
Hash collisions can cause packet misrouting. If you see collisions here, consider increasing hash sizes on affected nodes.
|
||||
|
||||
## Route Patterns (Subpaths)
|
||||
|
||||
Common routing paths through the mesh:
|
||||
|
||||
- **Frequent subpaths** — which relay chains appear most often
|
||||
- **Path reliability** — how consistently each path is used
|
||||
- **Path detail** — click a subpath to see every packet that used it
|
||||
|
||||
## Nodes
|
||||
|
||||
Per-node analytics with sortable metrics across the fleet.
|
||||
|
||||
## Distance
|
||||
|
||||
Estimated distances between nodes based on GPS coordinates, correlated with signal quality.
|
||||
|
||||
## Neighbor Graph
|
||||
|
||||
Interactive visualization of which nodes can directly hear each other. Shows the mesh topology as a network graph.
|
||||
|
||||
## RF Health
|
||||
|
||||
Per-observer signal health over time. Identifies observers with degrading reception.
|
||||
|
||||
## Prefix Tool
|
||||
|
||||
Test hash prefix lengths to see how many collisions different sizes would produce. Useful for deciding on hash_size settings.
|
||||
|
||||
## Region filter
|
||||
|
||||
All analytics tabs respect the **region filter** at the top. Select a region to scope the data to observers in that area.
|
||||
|
||||
## Deep linking
|
||||
|
||||
Each tab is deep-linkable. Share a URL like `#/analytics?tab=collisions` to point someone directly at hash issues.
|
||||
@@ -0,0 +1,68 @@
|
||||
# Channels
|
||||
|
||||
The Channels page shows decrypted MeshCore channel messages — like a group chat viewer for your mesh.
|
||||
|
||||
[Screenshot: channels page with message list]
|
||||
|
||||
## What are channels?
|
||||
|
||||
MeshCore nodes can send messages on named channels (like `#LongFast` or `#test`). These are group messages broadcast through the mesh. Any observer that hears the packet captures it.
|
||||
|
||||
CoreScope can decrypt and display these messages if you provide the channel encryption key.
|
||||
|
||||
## How it works
|
||||
|
||||
1. Observers capture encrypted channel packets from the mesh
|
||||
2. CoreScope matches the packet's channel hash to a known channel name
|
||||
3. If a decryption key is configured, the message content is decrypted and displayed
|
||||
4. Without a key, you'll see the packet metadata but not the message text
|
||||
|
||||
## Viewing messages
|
||||
|
||||
Select a channel from the list on the left. Messages appear in chronological order on the right.
|
||||
|
||||
Each message shows:
|
||||
- **Sender** — node name or hash
|
||||
- **Text** — decrypted message content
|
||||
- **Observer** — which observer captured it
|
||||
- **Time** — when it was received
|
||||
|
||||
The message list auto-scrolls to show new messages as they arrive via WebSocket.
|
||||
|
||||
## Channel keys
|
||||
|
||||
To decrypt messages, add channel keys to your `config.json`:
|
||||
|
||||
```json
|
||||
{
|
||||
"channelKeys": {
|
||||
"public": "8b3387e9c5cdea6ac9e5edbaa115cd72"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
The key name (e.g., `"public"`) is a label for your reference. The value is the 16-byte hex encryption key for that channel.
|
||||
|
||||
See [Configuration](configuration.md) for details on `channelKeys` and `hashChannels`.
|
||||
|
||||
## Hash channels
|
||||
|
||||
The `hashChannels` config lists channel names that CoreScope should try to match by hash:
|
||||
|
||||
```json
|
||||
{
|
||||
"hashChannels": ["#LongFast", "#test", "#sf"]
|
||||
}
|
||||
```
|
||||
|
||||
CoreScope computes the hash of each name and matches incoming packets to identify which channel they belong to.
|
||||
|
||||
## Region filter
|
||||
|
||||
Channels respect the region filter. Select a region to see only messages captured by observers in that area.
|
||||
|
||||
## Tips
|
||||
|
||||
- The default MeshCore "public" channel key is well-known — most community meshes use it
|
||||
- If messages appear but show garbled text, your key may be wrong
|
||||
- Not all packets are channel messages — only type "Channel Msg" (GRP_TXT) appears here
|
||||
@@ -0,0 +1,181 @@
|
||||
# Configuration
|
||||
|
||||
CoreScope is configured via `config.json` in the server's working directory. Copy `config.example.json` to get started.
|
||||
|
||||
## Core settings
|
||||
|
||||
| Field | Default | Description |
|
||||
|-------|---------|-------------|
|
||||
| `port` | `3000` | HTTP server port |
|
||||
| `apiKey` | — | Secret key for admin API endpoints (POST/PUT routes) |
|
||||
| `dbPath` | — | Path to SQLite database file (optional, defaults to `meshcore.db`) |
|
||||
|
||||
## MQTT
|
||||
|
||||
```json
|
||||
"mqtt": {
|
||||
"broker": "mqtt://localhost:1883",
|
||||
"topic": "meshcore/+/+/packets"
|
||||
}
|
||||
```
|
||||
|
||||
The ingestor connects to this MQTT broker and subscribes to the topic pattern.
|
||||
|
||||
### Multiple MQTT sources
|
||||
|
||||
Use `mqttSources` for multiple brokers:
|
||||
|
||||
```json
|
||||
"mqttSources": [
|
||||
{
|
||||
"name": "local",
|
||||
"broker": "mqtt://localhost:1883",
|
||||
"topics": ["meshcore/#"]
|
||||
},
|
||||
{
|
||||
"name": "remote",
|
||||
"broker": "mqtts://mqtt.example.com:8883",
|
||||
"username": "user",
|
||||
"password": "pass",
|
||||
"topics": ["meshcore/SJC/#"]
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
## Branding
|
||||
|
||||
| Field | Description |
|
||||
|-------|-------------|
|
||||
| `branding.siteName` | Site title shown in the nav bar |
|
||||
| `branding.tagline` | Subtitle on the home page |
|
||||
| `branding.logoUrl` | URL to a custom logo image |
|
||||
| `branding.faviconUrl` | URL to a custom favicon |
|
||||
|
||||
## Theme
|
||||
|
||||
Colors used throughout the UI. All values are hex color codes.
|
||||
|
||||
| Field | Description |
|
||||
|-------|-------------|
|
||||
| `theme.accent` | Primary accent color (links, buttons) |
|
||||
| `theme.navBg` | Navigation bar background |
|
||||
| `theme.navBg2` | Secondary nav background |
|
||||
| `theme.statusGreen` | Healthy status color |
|
||||
| `theme.statusYellow` | Degraded status color |
|
||||
| `theme.statusRed` | Silent/error status color |
|
||||
|
||||
See [Customization](customization.md) for the full list — the theme customizer exposes every color.
|
||||
|
||||
## Node colors
|
||||
|
||||
Default marker colors by role:
|
||||
|
||||
```json
|
||||
"nodeColors": {
|
||||
"repeater": "#dc2626",
|
||||
"companion": "#2563eb",
|
||||
"room": "#16a34a",
|
||||
"sensor": "#d97706",
|
||||
"observer": "#8b5cf6"
|
||||
}
|
||||
```
|
||||
|
||||
## Health thresholds
|
||||
|
||||
How long (in hours) before a node is marked degraded or silent:
|
||||
|
||||
| Field | Default | Description |
|
||||
|-------|---------|-------------|
|
||||
| `healthThresholds.infraDegradedHours` | `24` | Repeaters/rooms → degraded after this many hours |
|
||||
| `healthThresholds.infraSilentHours` | `72` | Repeaters/rooms → silent after this many hours |
|
||||
| `healthThresholds.nodeDegradedHours` | `1` | Companions/others → degraded |
|
||||
| `healthThresholds.nodeSilentHours` | `24` | Companions/others → silent |
|
||||
|
||||
## Retention
|
||||
|
||||
| Field | Default | Description |
|
||||
|-------|---------|-------------|
|
||||
| `retention.nodeDays` | `7` | Nodes not seen in N days move to inactive |
|
||||
| `retention.packetDays` | `30` | Packets older than N days are deleted daily |
|
||||
|
||||
## Channel decryption
|
||||
|
||||
| Field | Description |
|
||||
|-------|-------------|
|
||||
| `channelKeys` | Object of `"label": "hex-key"` pairs for decrypting channel messages |
|
||||
| `hashChannels` | Array of channel names (e.g., `"#LongFast"`) to match by hash |
|
||||
|
||||
See [Channels](channels.md) for details.
|
||||
|
||||
## Map defaults
|
||||
|
||||
```json
|
||||
"mapDefaults": {
|
||||
"center": [37.45, -122.0],
|
||||
"zoom": 9
|
||||
}
|
||||
```
|
||||
|
||||
Initial map center and zoom level.
|
||||
|
||||
## Regions
|
||||
|
||||
```json
|
||||
"regions": {
|
||||
"SJC": "San Jose, US",
|
||||
"SFO": "San Francisco, US"
|
||||
}
|
||||
```
|
||||
|
||||
Named regions for the region filter dropdown. The `defaultRegion` field sets which region is selected by default.
|
||||
|
||||
## Cache TTL
|
||||
|
||||
All values in seconds. Controls how long the server caches API responses:
|
||||
|
||||
```json
|
||||
"cacheTTL": {
|
||||
"stats": 10,
|
||||
"nodeList": 90,
|
||||
"nodeDetail": 300,
|
||||
"analyticsRF": 1800
|
||||
}
|
||||
```
|
||||
|
||||
Lower values = fresher data but more server load.
|
||||
|
||||
## Packet store
|
||||
|
||||
| Field | Default | Description |
|
||||
|-------|---------|-------------|
|
||||
| `packetStore.maxMemoryMB` | `1024` | Maximum RAM for in-memory packet store |
|
||||
| `packetStore.estimatedPacketBytes` | `450` | Estimated bytes per packet (for memory budgeting) |
|
||||
|
||||
## Timestamps
|
||||
|
||||
| Field | Default | Description |
|
||||
|-------|---------|-------------|
|
||||
| `timestamps.defaultMode` | `"ago"` | Display mode: `"ago"` (relative) or `"absolute"` |
|
||||
| `timestamps.timezone` | `"local"` | `"local"` or `"utc"` |
|
||||
| `timestamps.formatPreset` | `"iso"` | Date format preset |
|
||||
|
||||
## Live map
|
||||
|
||||
| Field | Default | Description |
|
||||
|-------|---------|-------------|
|
||||
| `liveMap.propagationBufferMs` | `5000` | How long to buffer observations before animating |
|
||||
|
||||
## HTTPS
|
||||
|
||||
```json
|
||||
"https": {
|
||||
"cert": "/path/to/cert.pem",
|
||||
"key": "/path/to/key.pem"
|
||||
}
|
||||
```
|
||||
|
||||
Provide cert and key paths to enable HTTPS.
|
||||
|
||||
## Home page
|
||||
|
||||
The `home` section customizes the onboarding experience. See `config.example.json` for the full structure including `steps`, `checklist`, and `footerLinks`.
|
||||
@@ -0,0 +1,78 @@
|
||||
# Customization
|
||||
|
||||
CoreScope includes a built-in theme customizer. Access it from **Tools → Customization** in the navigation menu.
|
||||
|
||||
[Screenshot: theme customizer panel with color pickers]
|
||||
|
||||
## What you can customize
|
||||
|
||||
### Branding
|
||||
|
||||
- **Site name** — displayed in the nav bar and browser tab
|
||||
- **Tagline** — shown on the home page
|
||||
- **Logo URL** — replace the default logo
|
||||
- **Favicon URL** — custom browser tab icon
|
||||
|
||||
### Theme colors (Light & Dark)
|
||||
|
||||
Every color in the UI is customizable:
|
||||
|
||||
- **Accent** — primary color for links, buttons, highlights
|
||||
- **Navigation** — nav bar background, text, and muted text colors
|
||||
- **Background** — page background and content area
|
||||
- **Surfaces** — cards, panels, input fields, detail panes
|
||||
- **Status** — green (healthy), yellow (degraded), red (silent)
|
||||
- **Text** — primary text, muted text, borders
|
||||
- **Tables** — row stripe, hover, and selected row colors
|
||||
|
||||
Both light and dark themes are independently configurable.
|
||||
|
||||
### Node colors
|
||||
|
||||
Set the color for each role: repeater, companion, room, sensor, observer. These colors appear on the map, in node badges, and throughout the UI.
|
||||
|
||||
### Packet type colors
|
||||
|
||||
Customize the color for each packet type: Advert, Channel Msg, Direct Msg, ACK, Request, Response, Trace, Path.
|
||||
|
||||
### Home page
|
||||
|
||||
Customize the onboarding experience:
|
||||
|
||||
- Hero title and subtitle
|
||||
- Getting-started steps (emoji, title, description for each)
|
||||
- FAQ items
|
||||
- Footer links
|
||||
|
||||
### Timestamps
|
||||
|
||||
- **Display mode** — relative ("5 min ago") or absolute
|
||||
- **Timezone** — local or UTC
|
||||
- **Format preset** — ISO or other presets
|
||||
|
||||
## Live preview
|
||||
|
||||
Changes apply instantly as you edit. You see the result in real time without saving.
|
||||
|
||||
## Exporting a theme
|
||||
|
||||
Click **Export JSON** to download your customizations as a JSON file. This produces a config-compatible block you can paste into your `config.json`.
|
||||
|
||||
## Importing a theme
|
||||
|
||||
Click **Import JSON** and paste a previously exported theme. The customizer loads all values and applies them immediately.
|
||||
|
||||
## Resetting
|
||||
|
||||
Click **Reset to Defaults** to restore all settings to the built-in defaults.
|
||||
|
||||
## How it works
|
||||
|
||||
The customizer writes CSS custom properties (variables) to override the defaults. Exported JSON maps directly to the `theme`, `nodeColors`, `branding`, and `home` sections of [config.json](configuration.md).
|
||||
|
||||
## Tips
|
||||
|
||||
- Start with the accent color — it cascades through buttons, links, and highlights
|
||||
- Dark mode has its own color set (`themeDark`), independent of light mode
|
||||
- Node colors affect the [Map](map.md), [Live](live.md) page, and node badges everywhere
|
||||
- Export your theme before upgrading CoreScope, then re-import it after
|
||||
@@ -0,0 +1,54 @@
|
||||
# FAQ
|
||||
|
||||
## 1. How do I add my node to CoreScope?
|
||||
|
||||
Go to the **Home** page, search for your node by name or public key, and click **+ Claim**. Your node appears on the dashboard with live status.
|
||||
|
||||
## 2. Why does my node show as "Silent"?
|
||||
|
||||
Your node hasn't been heard by any observer within the configured threshold. For companions, the default is 24 hours. For repeaters, it's 72 hours. Check that your node is advertising and within range of an observer. See [Configuration](configuration.md) for threshold settings.
|
||||
|
||||
## 3. What's the difference between "Last seen" and "Last heard"?
|
||||
|
||||
**Last seen** updates only when a node sends an advertisement. **Last heard** updates on *any* traffic from that node. CoreScope uses whichever is more recent for status calculations.
|
||||
|
||||
## 4. Why can't I read channel messages?
|
||||
|
||||
You need the channel encryption key in your `config.json`. See [Channels](channels.md) for how to configure `channelKeys`.
|
||||
|
||||
## 5. What do the packet types mean?
|
||||
|
||||
| Type | Meaning |
|
||||
|------|---------|
|
||||
| Advert | Node announcing itself to the mesh |
|
||||
| Channel Msg | Group message on a named channel |
|
||||
| Direct Msg | Private message between two nodes |
|
||||
| ACK | Acknowledgment of a received packet |
|
||||
| Request | Query sent to the mesh |
|
||||
| Response | Reply to a request |
|
||||
| Trace | Route tracing packet |
|
||||
| Path | Path discovery/announcement |
|
||||
|
||||
## 6. How do I filter packets by a specific node?
|
||||
|
||||
On the [Packets](packets.md) page, use the filter bar and type `from:NodeName` or click a node's name anywhere in the UI to jump to its packets.
|
||||
|
||||
## 7. Why do some nodes appear faded on the map?
|
||||
|
||||
Faded markers indicate **stale** nodes — they haven't been heard recently. The threshold depends on the node's role.
|
||||
|
||||
## 8. Can I run CoreScope without MQTT?
|
||||
|
||||
Yes. You can POST packets directly to the `/api/packets` endpoint using the API key. However, MQTT is the standard way to ingest data from mesh observers.
|
||||
|
||||
## 9. How do I change the map's default location?
|
||||
|
||||
Set `mapDefaults.center` and `mapDefaults.zoom` in your `config.json`. See [Configuration](configuration.md).
|
||||
|
||||
## 10. How do I share a link to a specific packet or view?
|
||||
|
||||
CoreScope uses URL hashes for deep linking. Copy the URL from your browser — it includes the current page, filters, and selected items. Examples:
|
||||
|
||||
- `#/packets/abc123` — a specific packet
|
||||
- `#/analytics?tab=collisions` — the hash issues tab
|
||||
- `#/nodes/pubkey123` — a specific node's detail page
|
||||
@@ -0,0 +1,70 @@
|
||||
# Getting Started
|
||||
|
||||
## What is CoreScope?
|
||||
|
||||
CoreScope is a web-based analyzer for **MeshCore LoRa mesh networks**. It shows you every node, packet, and signal path in your mesh — in real time.
|
||||
|
||||
Use it to monitor node health, debug connectivity, view decrypted channel messages, and understand how your mesh is performing.
|
||||
|
||||
## What you need
|
||||
|
||||
- A running CoreScope server (Go binary + SQLite database)
|
||||
- An MQTT broker feeding mesh packets into the CoreScope ingestor
|
||||
- A modern web browser
|
||||
|
||||
## Quick start
|
||||
|
||||
### 1. Configure
|
||||
|
||||
Copy `config.example.json` to `config.json` and edit it:
|
||||
|
||||
```json
|
||||
{
|
||||
"port": 3000,
|
||||
"apiKey": "pick-a-secret-key",
|
||||
"mqtt": {
|
||||
"broker": "mqtt://your-broker:1883",
|
||||
"topic": "meshcore/+/+/packets"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
See [Configuration](configuration.md) for all options.
|
||||
|
||||
### 2. Run
|
||||
|
||||
Start both the ingestor (reads MQTT → writes to SQLite) and the server (serves the UI + API):
|
||||
|
||||
```bash
|
||||
./corescope-ingestor &
|
||||
./corescope-server
|
||||
```
|
||||
|
||||
### 3. Open the UI
|
||||
|
||||
Go to `http://localhost:3000`. You'll see the **Home** page.
|
||||
|
||||
- **New to MeshCore?** Choose "I'm new" for setup guides and tips.
|
||||
- **Already set up?** Choose "I know what I'm doing" to jump straight in.
|
||||
|
||||
Search for your node by name or public key, then click **+ Claim** to add it to your personal dashboard.
|
||||
|
||||
## What's on each page
|
||||
|
||||
| Page | What it does |
|
||||
|------|-------------|
|
||||
| [Home](getting-started.md) | Your personal mesh dashboard — claimed nodes, health, stats |
|
||||
| [Nodes](nodes.md) | Browse all nodes with status, role, and filters |
|
||||
| [Packets](packets.md) | Inspect every packet — grouped or raw, with hex breakdown |
|
||||
| [Map](map.md) | See node locations on a live map |
|
||||
| [Live](live.md) | Watch packets flow in real time with map animations |
|
||||
| [Analytics](analytics.md) | Deep-dive charts: RF, topology, routes, hash stats |
|
||||
| [Channels](channels.md) | Read decrypted channel messages |
|
||||
|
||||
## Home page features
|
||||
|
||||
- **Claim nodes** — search and add nodes to "My Mesh" for at-a-glance status cards
|
||||
- **Node cards** — show status (🟢 Active / 🟡 Degraded / 🔴 Silent), SNR, hops, packet count, and 24h sparkline
|
||||
- **Health detail** — click a card to see full health: observers, recent packets, mini map
|
||||
- **Packet journey** — click a recent packet to see sender → observer flow
|
||||
- **Network stats** — total transmissions, nodes, observers, and 24h activity
|
||||
@@ -0,0 +1,76 @@
|
||||
# Live
|
||||
|
||||
The Live page shows packets flowing through your mesh in real time, with animated map visualizations.
|
||||
|
||||
[Screenshot: live page with map animations and packet feed]
|
||||
|
||||
## Real-time feed
|
||||
|
||||
Packets appear as they arrive via WebSocket. Each entry shows:
|
||||
|
||||
- Packet type icon and color
|
||||
- Sender name
|
||||
- Observer that captured it
|
||||
- SNR and hop count
|
||||
- Timestamp
|
||||
|
||||
The feed scrolls automatically. New packets appear at the top.
|
||||
|
||||
## Map animations
|
||||
|
||||
When a packet arrives, the Live map animates the signal path:
|
||||
|
||||
- A pulse appears at the sender's location
|
||||
- Lines animate from sender to each observer that heard the packet
|
||||
- Observer markers flash briefly on reception
|
||||
|
||||
### Realistic propagation
|
||||
|
||||
Enable **Realistic Propagation** in the controls to buffer observations of the same packet and animate them simultaneously — showing how a single transmission ripples through the mesh.
|
||||
|
||||
### Ghost hops
|
||||
|
||||
When enabled, intermediate relay hops are shown as faded markers even if they don't have known locations. Disable to show only nodes with GPS coordinates.
|
||||
|
||||
## VCR mode
|
||||
|
||||
The Live page has a built-in VCR (video cassette recorder) for packet replay.
|
||||
|
||||
| Button | Action |
|
||||
|--------|--------|
|
||||
| ⏸ Pause | Freeze the feed. New packets are buffered but not displayed. |
|
||||
| ▶ Play | Resume live feed or start replay. |
|
||||
| ⏪ Rewind | Step backward through packet history. |
|
||||
| ⏩ Fast-forward | Replay at 2×, 4×, or 8× speed. |
|
||||
|
||||
While paused, a badge shows how many packets arrived that you haven't seen yet.
|
||||
|
||||
## Timeline
|
||||
|
||||
The timeline bar at the bottom shows packet activity over the selected time scope (default: 1 hour). Click anywhere on the timeline to jump to that point in time.
|
||||
|
||||
## Packet type legend
|
||||
|
||||
Each packet type has a color and icon:
|
||||
|
||||
| Type | Icon | Color |
|
||||
|------|------|-------|
|
||||
| Advert | 📡 | Green |
|
||||
| Channel Msg | 💬 | Blue |
|
||||
| Direct Msg | ✉️ | Amber |
|
||||
| ACK | ✓ | Gray |
|
||||
| Request | ❓ | Purple |
|
||||
| Response | 📨 | Cyan |
|
||||
| Trace | 🔍 | Pink |
|
||||
| Path | 🛤️ | Teal |
|
||||
|
||||
## Controls
|
||||
|
||||
- **Favorites only** — show only packets from your claimed nodes
|
||||
- **Matrix mode** — visual effect overlay (just for fun)
|
||||
|
||||
## Tips
|
||||
|
||||
- Use VCR pause when you spot something interesting — then step through packet by packet
|
||||
- Realistic propagation mode is best for understanding multi-path reception
|
||||
- The timeline sparkline shows traffic patterns — useful for spotting quiet periods or bursts
|
||||
@@ -0,0 +1,71 @@
|
||||
# Map
|
||||
|
||||
The Map page shows all nodes on an interactive map, color-coded by role.
|
||||
|
||||
[Screenshot: map with colored markers and controls panel]
|
||||
|
||||
## Marker shapes and colors
|
||||
|
||||
Each node role has a distinct shape and color:
|
||||
|
||||
| Role | Shape | Default Color |
|
||||
|------|-------|---------------|
|
||||
| Repeater | Diamond | Red |
|
||||
| Companion | Circle | Blue |
|
||||
| Room | Square | Green |
|
||||
| Sensor | Triangle | Orange |
|
||||
| Observer | Star | Purple |
|
||||
|
||||
Stale nodes (not heard recently) appear faded.
|
||||
|
||||
## Hash labels
|
||||
|
||||
Repeaters can display their short mesh hash ID instead of a plain marker. Toggle **Hash Labels** in the map controls to switch between icon markers and hash-labeled markers.
|
||||
|
||||
## Map controls
|
||||
|
||||
Open the controls panel with the ⚙️ button (top-right corner).
|
||||
|
||||
### Node types
|
||||
|
||||
Check or uncheck roles to show/hide them on the map. All roles are visible by default.
|
||||
|
||||
### Byte size filter
|
||||
|
||||
Filter nodes by packet size category: All, Small, Medium, Large.
|
||||
|
||||
### Status filter
|
||||
|
||||
Show only active, degraded, or silent nodes.
|
||||
|
||||
### Last heard filter
|
||||
|
||||
Limit the map to nodes heard within a time window (e.g., 24h, 7d, 30d).
|
||||
|
||||
### Clustering
|
||||
|
||||
Enable clustering to group nearby nodes into cluster bubbles. Zoom in to expand clusters.
|
||||
|
||||
### Neighbor filter
|
||||
|
||||
Select a reference node to highlight only its direct neighbors.
|
||||
|
||||
## Show Route
|
||||
|
||||
Click a node marker, then click **Show Route** in the popup to see the paths packets take to reach that node. Routes are drawn as lines between nodes.
|
||||
|
||||
## Popups
|
||||
|
||||
Click any marker to see:
|
||||
|
||||
- Node name and role
|
||||
- Public key
|
||||
- Last seen timestamp
|
||||
- Link to the full node detail page
|
||||
|
||||
## Tips
|
||||
|
||||
- Zoom in on dense areas to see individual nodes
|
||||
- Use the role checkboxes to isolate repeaters and understand coverage
|
||||
- The neighbor filter is great for seeing which nodes can directly hear each other
|
||||
- Node colors are [customizable](customization.md) in the theme settings
|
||||
@@ -0,0 +1,70 @@
|
||||
# Nodes
|
||||
|
||||
The Nodes page lists every node your mesh has seen — repeaters, companions, rooms, and sensors.
|
||||
|
||||
[Screenshot: nodes list with status indicators]
|
||||
|
||||
## What you see
|
||||
|
||||
Each row shows:
|
||||
|
||||
- **Name** — the node's advertised name (or public key if unnamed)
|
||||
- **Role** — Repeater, Companion, Room, or Sensor
|
||||
- **Status** — color-coded health indicator
|
||||
- **Last seen** — when the node was last heard
|
||||
- **Advert count** — how many advertisements this node has sent
|
||||
|
||||
## Status indicators
|
||||
|
||||
| Indicator | Meaning |
|
||||
|-----------|---------|
|
||||
| 🟢 Active | Heard recently (within threshold for its role) |
|
||||
| 🟡 Degraded | Not heard for a while but not yet silent |
|
||||
| 🔴 Silent | Not heard for an extended period |
|
||||
|
||||
Thresholds differ by role. Infrastructure nodes (repeaters, rooms) have longer grace periods than companions. See [Configuration](configuration.md) for `healthThresholds`.
|
||||
|
||||
## Filtering
|
||||
|
||||
### Role tabs
|
||||
|
||||
Click **All**, **Repeaters**, **Rooms**, **Companions**, or **Sensors** to filter by role.
|
||||
|
||||
### Search
|
||||
|
||||
Type in the search box to filter by name or public key. The filter applies instantly.
|
||||
|
||||
### Status filter
|
||||
|
||||
Filter to show only active, degraded, or silent nodes.
|
||||
|
||||
### Last heard filter
|
||||
|
||||
Filter nodes by how recently they were heard (e.g., last hour, last 24h).
|
||||
|
||||
## Sorting
|
||||
|
||||
Click any column header to sort. Click again to reverse the order. Your sort preference is saved across sessions.
|
||||
|
||||
## Node detail
|
||||
|
||||
Click a node row to open the **detail pane** on the right. It shows:
|
||||
|
||||
- Full public key
|
||||
- Role and status explanation
|
||||
- Location (if known)
|
||||
- Recent packets involving this node
|
||||
- Neighbor nodes
|
||||
- Signal statistics
|
||||
|
||||
Click the node name in the detail pane to open the **full node page** with complete history, analytics, and health data.
|
||||
|
||||
## Favorites
|
||||
|
||||
Nodes you've claimed on the Home page appear as favorites. You can also star nodes directly from the Nodes page.
|
||||
|
||||
## Tips
|
||||
|
||||
- Use the search box for quick lookups — it matches partial names and keys
|
||||
- Sort by "Last seen" descending to find the most active nodes
|
||||
- The status explanation tells you exactly why a node is marked degraded or silent
|
||||
@@ -0,0 +1,78 @@
|
||||
# Packets
|
||||
|
||||
The Packets page shows every transmission captured by your mesh observers.
|
||||
|
||||
[Screenshot: packets table with grouped view]
|
||||
|
||||
## Grouped vs ungrouped view
|
||||
|
||||
By default, packets are **grouped by hash**. Each row represents one unique transmission, with a count of how many observers heard it.
|
||||
|
||||
Click **Ungroup** to see every individual observation as its own row.
|
||||
|
||||
Click the **▶** arrow on a grouped row to expand it and see all observations of that packet.
|
||||
|
||||
## What each row shows
|
||||
|
||||
- **Time** — when the packet was received
|
||||
- **From** — sender node name or hash prefix
|
||||
- **Type** — packet type (Advert, Channel Msg, Direct Msg, ACK, Request, Response, Trace, Path)
|
||||
- **Observer** — which observer captured the packet
|
||||
- **SNR** — signal-to-noise ratio in dB
|
||||
- **RSSI** — received signal strength
|
||||
- **Hops** — how many relay hops the packet took
|
||||
|
||||
## Filters
|
||||
|
||||
### Observer filter
|
||||
|
||||
Select a specific observer to see only packets it captured. Saved across sessions.
|
||||
|
||||
### Type filter
|
||||
|
||||
Filter by packet type (e.g., show only Adverts or Channel Messages).
|
||||
|
||||
### Time window
|
||||
|
||||
Choose how far back to look: 15 minutes, 1 hour, 6 hours, 24 hours, etc. On mobile, the window is capped at 3 hours for performance.
|
||||
|
||||
### Wireshark-style filter bar
|
||||
|
||||
Type filter expressions for advanced filtering:
|
||||
|
||||
```
|
||||
type:advert snr>5 hops<3
|
||||
from:MyNode observer:SJC
|
||||
```
|
||||
|
||||
See the filter bar's help tooltip for all supported fields and operators.
|
||||
|
||||
## Packet detail
|
||||
|
||||
Click any row to open the **detail pane** on the right showing:
|
||||
|
||||
- Full packet metadata (hash, type, size, timestamp)
|
||||
- Decoded payload fields
|
||||
- Hop path with resolved node names
|
||||
- All observers that heard this packet, sorted by SNR
|
||||
|
||||
### Hex breakdown
|
||||
|
||||
The detail pane includes a hex dump of the raw packet bytes with field boundaries highlighted.
|
||||
|
||||
## Observation sorting
|
||||
|
||||
When viewing a grouped packet's observations, they're sorted by SNR (best signal first). This helps you see which observer had the clearest reception.
|
||||
|
||||
## Display options
|
||||
|
||||
- **Hex hashes** — toggle to show packet hashes in hex format
|
||||
- **Panel resize** — drag the detail pane border to resize it
|
||||
- **Keyboard shortcuts** — press `Esc` to close the detail pane
|
||||
|
||||
## Tips
|
||||
|
||||
- Grouped view is best for understanding what's happening on the mesh
|
||||
- Ungrouped view is best for debugging signal paths and comparing observers
|
||||
- The time window filter is your best friend for managing large datasets
|
||||
- Packet hashes in the URL are deep-linkable — share a link to a specific packet
|
||||
+919
-13
@@ -86,6 +86,8 @@
|
||||
<button class="tab-btn" data-tab="nodes">Nodes</button>
|
||||
<button class="tab-btn" data-tab="distance">Distance</button>
|
||||
<button class="tab-btn" data-tab="neighbor-graph">Neighbor Graph</button>
|
||||
<button class="tab-btn" data-tab="rf-health">RF Health</button>
|
||||
<button class="tab-btn" data-tab="prefix-tool">Prefix Tool</button>
|
||||
</div>
|
||||
</div>
|
||||
<div id="analyticsContent" class="analytics-content">
|
||||
@@ -173,6 +175,8 @@
|
||||
case 'nodes': await renderNodesTab(el); break;
|
||||
case 'distance': await renderDistanceTab(el); break;
|
||||
case 'neighbor-graph': await renderNeighborGraphTab(el); break;
|
||||
case 'rf-health': await renderRFHealthTab(el); break;
|
||||
case 'prefix-tool': await renderPrefixTool(el); break;
|
||||
}
|
||||
// Auto-apply column resizing to all analytics tables
|
||||
requestAnimationFrame(() => {
|
||||
@@ -985,11 +989,13 @@
|
||||
<a href="#/analytics?tab=collisions§ion=hashMatrixSection" style="color:var(--accent)">🔢 Hash Matrix</a>
|
||||
<span style="color:var(--border)">|</span>
|
||||
<a href="#/analytics?tab=collisions§ion=collisionRiskSection" style="color:var(--accent)">💥 Collision Risk</a>
|
||||
<span style="color:var(--border)">|</span>
|
||||
<a href="#/analytics?tab=prefix-tool" style="color:var(--accent)">🔎 Check a prefix →</a>
|
||||
</nav>
|
||||
|
||||
<div class="analytics-card" id="inconsistentHashSection">
|
||||
<div style="display:flex;justify-content:space-between;align-items:center"><h3 style="margin:0">⚠️ Inconsistent Hash Sizes</h3><a href="#/analytics?tab=collisions" style="font-size:11px;color:var(--text-muted)">↑ top</a></div>
|
||||
<p class="text-muted" style="margin:4px 0 8px;font-size:0.8em">Nodes sending adverts with varying hash sizes. Caused by a <a href="https://github.com/meshcore-dev/MeshCore/commit/fcfdc5f" target="_blank" style="color:var(--accent)">bug</a> where automatic adverts ignored the configured multibyte path setting. Fixed in <a href="https://github.com/meshcore-dev/MeshCore/releases/tag/repeater-v1.14.1" target="_blank" style="color:var(--accent)">repeater v1.14.1</a>.</p>
|
||||
<p class="text-muted" style="margin:4px 0 8px;font-size:0.8em">Repeaters and room servers sending adverts with varying hash sizes in the last 7 days. Originally caused by a <a href="https://github.com/meshcore-dev/MeshCore/commit/fcfdc5f" target="_blank" style="color:var(--accent)">firmware bug</a> where automatic adverts ignored the configured multibyte path setting, fixed in <a href="https://github.com/meshcore-dev/MeshCore/releases/tag/repeater-v1.14.1" target="_blank" style="color:var(--accent)">repeater v1.14.1</a>. Companion nodes are excluded.</p>
|
||||
<div id="inconsistentHashList"><div class="text-muted" style="padding:8px"><span class="spinner"></span> Loading…</div></div>
|
||||
</div>
|
||||
|
||||
@@ -1398,12 +1404,8 @@
|
||||
el.innerHTML = '<div class="text-center text-muted" style="padding:40px">Analyzing route patterns…</div>';
|
||||
try {
|
||||
const rq = RegionFilter.regionQueryString();
|
||||
const [d2, d3, d4, d5] = await Promise.all([
|
||||
api('/analytics/subpaths?minLen=2&maxLen=2&limit=50' + rq, { ttl: CLIENT_TTL.analyticsRF }),
|
||||
api('/analytics/subpaths?minLen=3&maxLen=3&limit=30' + rq, { ttl: CLIENT_TTL.analyticsRF }),
|
||||
api('/analytics/subpaths?minLen=4&maxLen=4&limit=20' + rq, { ttl: CLIENT_TTL.analyticsRF }),
|
||||
api('/analytics/subpaths?minLen=5&maxLen=8&limit=15' + rq, { ttl: CLIENT_TTL.analyticsRF })
|
||||
]);
|
||||
const bulk = await api('/analytics/subpaths-bulk?groups=2-2:50,3-3:30,4-4:20,5-8:15' + rq, { ttl: CLIENT_TTL.analyticsRF });
|
||||
const [d2, d3, d4, d5] = bulk.results;
|
||||
|
||||
function renderTable(data, title) {
|
||||
if (!data.subpaths.length) return `<h4>${title}</h4><div class="text-muted">No data</div>`;
|
||||
@@ -1602,10 +1604,9 @@
|
||||
el.innerHTML = '<div style="padding:40px;text-align:center;color:var(--text-muted)">Loading node analytics…</div>';
|
||||
try {
|
||||
const rq = RegionFilter.regionQueryString();
|
||||
const [nodesResp, bulkHealth, netStatus] = await Promise.all([
|
||||
api('/nodes?limit=200&sortBy=lastSeen' + rq, { ttl: CLIENT_TTL.nodeList }),
|
||||
api('/nodes/bulk-health?limit=50' + rq, { ttl: CLIENT_TTL.analyticsRF }),
|
||||
api('/nodes/network-status' + (rq ? '?' + rq.slice(1) : ''), { ttl: CLIENT_TTL.analyticsRF })
|
||||
const [nodesResp, bulkHealth] = await Promise.all([
|
||||
api('/nodes?limit=10000&sortBy=lastSeen' + rq, { ttl: CLIENT_TTL.nodeList }),
|
||||
api('/nodes/bulk-health?limit=50' + rq, { ttl: CLIENT_TTL.analyticsRF })
|
||||
]);
|
||||
const nodes = nodesResp.nodes || nodesResp;
|
||||
const myNodes = JSON.parse(localStorage.getItem('meshcore-my-nodes') || '[]');
|
||||
@@ -1622,8 +1623,22 @@
|
||||
const byObservers = [...enriched].sort((a, b) => (b.health.observers?.length || 0) - (a.health.observers?.length || 0));
|
||||
const byRecent = [...enriched].filter(n => n.health.stats.lastHeard).sort((a, b) => new Date(b.health.stats.lastHeard) - new Date(a.health.stats.lastHeard));
|
||||
|
||||
// Use server-computed status across ALL nodes
|
||||
const { active, degraded, silent, total: totalNodes, roleCounts } = netStatus;
|
||||
// Compute network status client-side from loaded nodes using shared getHealthThresholds()
|
||||
const now = Date.now();
|
||||
let active = 0, degraded = 0, silent = 0;
|
||||
nodes.forEach(function(n) {
|
||||
const role = n.role || 'unknown';
|
||||
const th = getHealthThresholds(role);
|
||||
const lastMs = n.last_heard ? new Date(n.last_heard).getTime()
|
||||
: n.last_seen ? new Date(n.last_seen).getTime()
|
||||
: 0;
|
||||
const age = lastMs ? (now - lastMs) : Infinity;
|
||||
if (age < th.degradedMs) active++;
|
||||
else if (age < th.silentMs) degraded++;
|
||||
else silent++;
|
||||
});
|
||||
const totalNodes = nodesResp.total || nodes.length;
|
||||
const roleCounts = nodesResp.counts || {};
|
||||
|
||||
function nodeLink(n) {
|
||||
return `<a href="#/nodes/${encodeURIComponent(n.public_key)}/analytics" class="analytics-link">${esc(n.name || n.public_key.slice(0, 12))}</a>`;
|
||||
@@ -2293,5 +2308,896 @@ function destroy() { _analyticsData = {}; _channelData = null; if (_ngState && _
|
||||
_ngState.animId = requestAnimationFrame(tick);
|
||||
}
|
||||
|
||||
// --- Prefix Tool ---
|
||||
async function renderPrefixTool(el) {
|
||||
el.innerHTML = '<div style="padding:40px;text-align:center;color:var(--text-muted)">Loading prefix data…</div>';
|
||||
|
||||
const rq = RegionFilter.regionQueryString();
|
||||
const regionLabel = rq ? (new URLSearchParams(rq.slice(1)).get('region') || '') : '';
|
||||
|
||||
let nodesResp;
|
||||
try {
|
||||
nodesResp = await api('/nodes?limit=10000&sortBy=lastSeen' + rq, { ttl: CLIENT_TTL.nodeList });
|
||||
} catch (e) {
|
||||
el.innerHTML = `<div class="text-muted" role="alert" style="padding:40px">Failed to load: ${esc(e.message)}</div>`;
|
||||
return;
|
||||
}
|
||||
|
||||
// Deduplicate by public_key, require at least 6 hex chars to build all 3 tiers
|
||||
const nodeMap = new Map();
|
||||
(nodesResp.nodes || nodesResp).forEach(n => {
|
||||
if (n.public_key && n.public_key.length >= 6 && !nodeMap.has(n.public_key)) {
|
||||
nodeMap.set(n.public_key, n);
|
||||
}
|
||||
});
|
||||
const nodes = [...nodeMap.values()];
|
||||
|
||||
if (nodes.length === 0) {
|
||||
el.innerHTML = `<div class="analytics-card"><p class="text-muted">No nodes in the network yet. Any prefix is available!</p></div>`;
|
||||
return;
|
||||
}
|
||||
|
||||
// Build 3-tier prefix indexes: prefix (uppercase hex) -> [nodes]
|
||||
const idx = { 1: new Map(), 2: new Map(), 3: new Map() };
|
||||
nodes.forEach(n => {
|
||||
const pk = n.public_key.toUpperCase();
|
||||
[1, 2, 3].forEach(b => {
|
||||
const p = pk.slice(0, b * 2);
|
||||
if (!idx[b].has(p)) idx[b].set(p, []);
|
||||
idx[b].get(p).push(n);
|
||||
});
|
||||
});
|
||||
|
||||
// Network overview stats
|
||||
const spaceSizes = { 1: 256, 2: 65536, 3: 16777216 };
|
||||
const stats = {};
|
||||
[1, 2, 3].forEach(b => {
|
||||
stats[b] = {
|
||||
usedPrefixes: idx[b].size,
|
||||
collidingPrefixes: [...idx[b].values()].filter(arr => arr.length > 1).length,
|
||||
};
|
||||
});
|
||||
|
||||
// Recommendation by network size
|
||||
const totalNodes = nodes.length;
|
||||
let rec, recDetail;
|
||||
if (totalNodes < 20) {
|
||||
rec = '1-byte'; recDetail = `With only ${totalNodes} nodes, 1-byte prefixes have low collision risk.`;
|
||||
} else if (totalNodes < 500) {
|
||||
rec = '2-byte'; recDetail = `With ${totalNodes} nodes, 2-byte prefixes are recommended to avoid collisions.`;
|
||||
} else {
|
||||
rec = '2-byte'; recDetail = `With ${totalNodes} nodes, 2-byte prefixes are strongly recommended.`;
|
||||
}
|
||||
|
||||
// URL params for pre-fill / auto-run
|
||||
const hashParams = new URLSearchParams((location.hash.split('?')[1] || ''));
|
||||
const initPrefix = hashParams.get('prefix') || '';
|
||||
const initGenerate = hashParams.get('generate') || '';
|
||||
|
||||
const regionNote = regionLabel
|
||||
? `<p class="text-muted" style="font-size:0.85em;margin:4px 0 0">Showing data for region: <strong>${esc(regionLabel)}</strong>. <a href="#/analytics?tab=prefix-tool" style="color:var(--accent)">Check all nodes →</a></p>`
|
||||
: '';
|
||||
|
||||
el.innerHTML = `
|
||||
<div class="analytics-card" id="ptOverview">
|
||||
<div style="display:flex;align-items:center;gap:8px;cursor:pointer;user-select:none" id="ptOverviewToggle">
|
||||
<span id="ptOverviewChevron" style="font-size:0.75em;color:var(--text-muted);transition:transform 0.2s">▶</span>
|
||||
<h3 style="margin:0">Network Overview</h3>
|
||||
</div>
|
||||
<div id="ptOverviewBody" style="display:none">
|
||||
${regionNote}
|
||||
<div style="display:flex;gap:12px;flex-wrap:wrap;margin:12px 0 16px">
|
||||
<div class="analytics-stat-card" style="flex:1;min-width:110px">
|
||||
<div class="analytics-stat-label">Total nodes</div>
|
||||
<div class="analytics-stat-value">${totalNodes.toLocaleString()}</div>
|
||||
</div>
|
||||
${[1, 2, 3].map(b => `
|
||||
<div class="analytics-stat-card" style="flex:1;min-width:150px;border-color:${stats[b].collidingPrefixes > 0 ? 'var(--status-red)' : 'var(--border)'}">
|
||||
<div class="analytics-stat-label">${b}-byte prefixes</div>
|
||||
<div class="analytics-stat-value" style="font-size:1em">
|
||||
${stats[b].usedPrefixes.toLocaleString()}
|
||||
<span class="text-muted" style="font-size:0.7em"> / ${spaceSizes[b].toLocaleString()}</span>
|
||||
</div>
|
||||
<div style="font-size:0.82em;margin-top:4px;color:${stats[b].collidingPrefixes > 0 ? 'var(--status-red)' : 'var(--status-green)'}">
|
||||
${stats[b].collidingPrefixes === 0
|
||||
? '✅ No collisions'
|
||||
: `⚠️ ${stats[b].collidingPrefixes} prefix${stats[b].collidingPrefixes !== 1 ? 'es' : ''} collide`}
|
||||
</div>
|
||||
</div>`).join('')}
|
||||
</div>
|
||||
<div style="background:var(--bg-secondary,var(--bg));border:1px solid var(--border);border-radius:6px;padding:10px 14px">
|
||||
<strong>Recommendation: ${rec} prefixes</strong> — ${recDetail}
|
||||
<span class="text-muted" style="font-size:0.8em;display:block;margin-top:4px">Hash size is configured per-node in firmware. Changing requires reflashing.</span>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="analytics-card" id="ptChecker">
|
||||
<h3 style="margin-top:0">Check a Prefix</h3>
|
||||
<p class="text-muted" style="margin-top:0;font-size:0.9em">Enter a 1-byte (2 hex chars), 2-byte (4 hex chars), or 3-byte (6 hex chars) prefix — or paste a full public key.</p>
|
||||
<div style="display:flex;gap:8px;align-items:flex-start;flex-wrap:wrap">
|
||||
<input id="ptPrefixInput" type="text" placeholder="e.g. A3F1" maxlength="64"
|
||||
style="font-family:var(--mono);font-size:1em;padding:6px 10px;background:var(--bg);color:var(--text);border:1px solid var(--border);border-radius:4px;min-width:180px;flex:1"
|
||||
value="${esc(initPrefix)}">
|
||||
<button id="ptCheckBtn" style="padding:6px 16px;background:var(--accent);color:#fff;border:none;border-radius:4px;cursor:pointer;font-size:0.95em">Check</button>
|
||||
</div>
|
||||
<div id="ptCheckerResults" style="margin-top:14px"></div>
|
||||
</div>
|
||||
|
||||
<div class="analytics-card" id="ptGenerator">
|
||||
<h3 style="margin-top:0">Generate Available Prefix</h3>
|
||||
<p class="text-muted" style="margin-top:0;font-size:0.9em">Find a prefix with zero current collisions.</p>
|
||||
<div style="display:flex;gap:16px;align-items:center;flex-wrap:wrap;margin-bottom:12px">
|
||||
<label style="display:flex;align-items:center;gap:6px;cursor:pointer">
|
||||
<input type="radio" name="ptGenSize" value="1" ${initGenerate === '1' ? 'checked' : ''}> 1-byte
|
||||
</label>
|
||||
<label style="display:flex;align-items:center;gap:6px;cursor:pointer">
|
||||
<input type="radio" name="ptGenSize" value="2" ${initGenerate !== '1' && initGenerate !== '3' ? 'checked' : ''}> 2-byte
|
||||
<span class="text-muted" style="font-size:0.8em">(recommended)</span>
|
||||
</label>
|
||||
<label style="display:flex;align-items:center;gap:6px;cursor:pointer">
|
||||
<input type="radio" name="ptGenSize" value="3" ${initGenerate === '3' ? 'checked' : ''}> 3-byte
|
||||
</label>
|
||||
<button id="ptGenBtn" style="padding:6px 16px;background:var(--accent);color:#fff;border:none;border-radius:4px;cursor:pointer;font-size:0.95em">Generate</button>
|
||||
</div>
|
||||
<div id="ptGenResult"></div>
|
||||
<div style="margin-top:14px;padding:10px 14px;border:1px solid var(--accent);border-radius:6px;background:var(--bg-secondary,var(--bg));font-size:0.88em">
|
||||
📖 <strong>New to multi-byte prefixes?</strong>
|
||||
<a href="https://github.com/meshcore-dev/MeshCore/blob/main/docs/faq.md#39-q-what-is-multi-byte-support--what-do-1-byte-2-byte-3-byte-adverts-and-messages-mean"
|
||||
target="_blank" rel="noopener noreferrer" style="color:var(--accent);margin-left:4px">
|
||||
Read the MeshCore FAQ on multi-byte support →
|
||||
</a>
|
||||
</div>
|
||||
</div>`;
|
||||
|
||||
// --- Helpers ---
|
||||
function nodeEntry(n) {
|
||||
const name = esc(n.name || n.public_key.slice(0, 12));
|
||||
const role = n.role ? `<span class="text-muted" style="font-size:0.82em">${esc(n.role)}</span>` : '';
|
||||
const when = n.last_seen ? ` <span class="text-muted" style="font-size:0.8em">${new Date(n.last_seen).toLocaleDateString()}</span>` : '';
|
||||
return `<div style="padding:3px 0"><a href="#/nodes/${encodeURIComponent(n.public_key)}" class="analytics-link">${name}</a> ${role}${when}</div>`;
|
||||
}
|
||||
|
||||
function severityBadge(count) {
|
||||
if (count === 0) return '<span style="color:var(--status-green)">✅ Unique</span>';
|
||||
if (count <= 2) return `<span style="color:var(--status-yellow)">⚠️ ${count} collision${count !== 1 ? 's' : ''}</span>`;
|
||||
return `<span style="color:var(--status-red)">🔴 ${count} collisions</span>`;
|
||||
}
|
||||
|
||||
// --- Checker ---
|
||||
function doCheck(raw) {
|
||||
const resultsEl = document.getElementById('ptCheckerResults');
|
||||
if (!resultsEl) return;
|
||||
const input = raw.trim().toUpperCase();
|
||||
if (!input) { resultsEl.innerHTML = ''; return; }
|
||||
|
||||
if (!/^[0-9A-F]+$/.test(input)) {
|
||||
resultsEl.innerHTML = '<p style="color:var(--status-red);margin:0">Invalid input — hex characters only (0-9, A-F).</p>';
|
||||
return;
|
||||
}
|
||||
if (input.length % 2 !== 0 || (input.length !== 2 && input.length !== 4 && input.length !== 6 && input.length < 8)) {
|
||||
resultsEl.innerHTML = '<p style="color:var(--status-red);margin:0">Prefix must be 2, 4, or 6 hex characters. For a full public key, use 64 characters.</p>';
|
||||
return;
|
||||
}
|
||||
|
||||
const isFullKey = input.length >= 8;
|
||||
const tiers = isFullKey
|
||||
? [{ b: 1, prefix: input.slice(0, 2) }, { b: 2, prefix: input.slice(0, 4) }, { b: 3, prefix: input.slice(0, 6) }]
|
||||
: [{ b: input.length / 2, prefix: input }];
|
||||
|
||||
let html = '';
|
||||
if (isFullKey) {
|
||||
const inNetwork = nodes.some(n => n.public_key.toUpperCase() === input);
|
||||
html += `<p class="text-muted" style="font-size:0.85em;margin:0 0 10px">Derived prefixes: <code class="mono">${input.slice(0,2)}</code> / <code class="mono">${input.slice(0,4)}</code> / <code class="mono">${input.slice(0,6)}</code>${!inNetwork ? ' — <em>this node is not yet in the network</em>' : ''}</p>`;
|
||||
}
|
||||
|
||||
tiers.forEach(({ b, prefix }) => {
|
||||
const matches = idx[b].get(prefix) || [];
|
||||
const colliders = isFullKey ? matches.filter(n => n.public_key.toUpperCase() !== input) : matches;
|
||||
const count = colliders.length;
|
||||
html += `
|
||||
<div style="margin-bottom:10px;padding:10px 14px;border:1px solid var(--border);border-radius:6px;background:var(--bg-secondary,var(--bg))">
|
||||
<div style="display:flex;align-items:center;gap:8px;margin-bottom:6px">
|
||||
<code class="mono" style="font-weight:700">${prefix}</code>
|
||||
<span class="text-muted" style="font-size:0.82em">${b}-byte</span>
|
||||
${severityBadge(count)}
|
||||
</div>
|
||||
${count === 0
|
||||
? '<div class="text-muted" style="font-size:0.85em">No existing nodes use this prefix.</div>'
|
||||
: `<div style="font-size:0.85em;max-height:140px;overflow-y:auto">${colliders.map(nodeEntry).join('')}</div>`}
|
||||
</div>`;
|
||||
});
|
||||
|
||||
resultsEl.innerHTML = html;
|
||||
}
|
||||
|
||||
// --- Generator ---
|
||||
function doGenerate() {
|
||||
const genResultEl = document.getElementById('ptGenResult');
|
||||
if (!genResultEl) return;
|
||||
const sizeInput = el.querySelector('input[name="ptGenSize"]:checked');
|
||||
const b = sizeInput ? parseInt(sizeInput.value) : 2;
|
||||
const hexLen = b * 2;
|
||||
const totalSpace = spaceSizes[b];
|
||||
const available = totalSpace - idx[b].size;
|
||||
|
||||
if (available === 0) {
|
||||
const next = b < 3 ? (b + 1) + '-byte' : 'a different size';
|
||||
genResultEl.innerHTML = `<p style="color:var(--status-red);margin:0">No collision-free ${b}-byte prefixes available. Try ${next}.</p>`;
|
||||
return;
|
||||
}
|
||||
|
||||
let prefix;
|
||||
if (b === 1) {
|
||||
// Enumerate all 256 options
|
||||
const free = [];
|
||||
for (let i = 0; i < totalSpace; i++) {
|
||||
const p = i.toString(16).toUpperCase().padStart(hexLen, '0');
|
||||
if (!idx[b].has(p)) free.push(p);
|
||||
}
|
||||
prefix = free[Math.floor(Math.random() * free.length)];
|
||||
} else {
|
||||
// Random sampling — with 2K used / 65K space, hit rate >96%
|
||||
let attempts = 0;
|
||||
do {
|
||||
prefix = Math.floor(Math.random() * totalSpace).toString(16).toUpperCase().padStart(hexLen, '0');
|
||||
} while (idx[b].has(prefix) && ++attempts < 500);
|
||||
// Fallback to enumeration if sampling kept hitting used prefixes
|
||||
if (idx[b].has(prefix)) {
|
||||
for (let i = 0; i < totalSpace; i++) {
|
||||
const p = i.toString(16).toUpperCase().padStart(hexLen, '0');
|
||||
if (!idx[b].has(p)) { prefix = p; break; }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
genResultEl.innerHTML = `
|
||||
<div style="padding:12px 16px;border:1px solid var(--status-green);border-radius:6px;background:var(--bg-secondary,var(--bg))">
|
||||
<div style="display:flex;align-items:center;gap:10px;flex-wrap:wrap">
|
||||
<code class="mono" style="font-size:1.3em;font-weight:700;color:var(--status-green)">${prefix}</code>
|
||||
<span style="color:var(--status-green)">✅ No existing nodes use this prefix</span>
|
||||
</div>
|
||||
<div class="text-muted" style="font-size:0.85em;margin-top:6px">${available.toLocaleString()} of ${totalSpace.toLocaleString()} ${b}-byte prefixes are available.</div>
|
||||
<div style="margin-top:10px;display:flex;gap:8px;flex-wrap:wrap;align-items:center">
|
||||
<button id="ptRegenBtn" style="padding:5px 14px;background:var(--bg);color:var(--text);border:1px solid var(--border);border-radius:4px;cursor:pointer;font-size:0.9em">Try another</button>
|
||||
<a href="https://agessaman.github.io/meshcore-web-keygen/?prefix=${prefix}" target="_blank" rel="noopener noreferrer"
|
||||
style="padding:5px 14px;background:var(--bg);color:var(--accent);border:1px solid var(--border);border-radius:4px;text-decoration:none;font-size:0.9em">
|
||||
Generate key with this prefix →
|
||||
</a>
|
||||
</div>
|
||||
</div>`;
|
||||
document.getElementById('ptRegenBtn').addEventListener('click', doGenerate);
|
||||
}
|
||||
|
||||
// --- Wire up ---
|
||||
const checkBtn = document.getElementById('ptCheckBtn');
|
||||
const prefixInput = document.getElementById('ptPrefixInput');
|
||||
const genBtn = document.getElementById('ptGenBtn');
|
||||
|
||||
checkBtn.addEventListener('click', () => doCheck(prefixInput.value));
|
||||
prefixInput.addEventListener('keydown', e => { if (e.key === 'Enter') doCheck(prefixInput.value); });
|
||||
genBtn.addEventListener('click', doGenerate);
|
||||
|
||||
// Network Overview toggle
|
||||
document.getElementById('ptOverviewToggle').addEventListener('click', () => {
|
||||
const body = document.getElementById('ptOverviewBody');
|
||||
const chevron = document.getElementById('ptOverviewChevron');
|
||||
const open = body.style.display === 'none';
|
||||
body.style.display = open ? '' : 'none';
|
||||
chevron.style.transform = open ? 'rotate(90deg)' : '';
|
||||
});
|
||||
|
||||
// Auto-run from URL params
|
||||
if (initPrefix) {
|
||||
doCheck(initPrefix);
|
||||
setTimeout(() => { document.getElementById('ptChecker')?.scrollIntoView({ behavior: 'smooth', block: 'start' }); }, 150);
|
||||
} else if (initGenerate) {
|
||||
doGenerate();
|
||||
setTimeout(() => { document.getElementById('ptGenerator')?.scrollIntoView({ behavior: 'smooth', block: 'start' }); }, 150);
|
||||
}
|
||||
}
|
||||
|
||||
// ===================== RF HEALTH =====================
|
||||
|
||||
let _rfHealthState = { range: '24h', selectedObserver: null, customFrom: '', customTo: '' };
|
||||
|
||||
function rfHealthTimeRangeToParams(range, customFrom, customTo) {
|
||||
const now = new Date();
|
||||
let since, until;
|
||||
if (range === 'custom' && customFrom) {
|
||||
since = new Date(customFrom).toISOString();
|
||||
until = customTo ? new Date(customTo).toISOString() : now.toISOString();
|
||||
} else {
|
||||
const durations = { '1h': 1, '3h': 3, '6h': 6, '12h': 12, '24h': 24, '3d': 72, '7d': 168, '30d': 720 };
|
||||
const hours = durations[range] || 24;
|
||||
since = new Date(now.getTime() - hours * 3600000).toISOString();
|
||||
until = now.toISOString();
|
||||
}
|
||||
return { since, until };
|
||||
}
|
||||
|
||||
function rfHealthUpdateHash() {
|
||||
const params = new URLSearchParams();
|
||||
params.set('tab', 'rf-health');
|
||||
if (_rfHealthState.range !== '24h') params.set('range', _rfHealthState.range);
|
||||
if (_rfHealthState.selectedObserver) params.set('observer', _rfHealthState.selectedObserver);
|
||||
if (_rfHealthState.range === 'custom') {
|
||||
if (_rfHealthState.customFrom) params.set('from', _rfHealthState.customFrom);
|
||||
if (_rfHealthState.customTo) params.set('to', _rfHealthState.customTo);
|
||||
}
|
||||
history.replaceState(null, '', '#/analytics?' + params.toString());
|
||||
}
|
||||
|
||||
async function renderRFHealthTab(el) {
|
||||
// Restore state from URL
|
||||
const hashParams = new URLSearchParams((location.hash.split('?')[1] || ''));
|
||||
if (hashParams.get('range')) _rfHealthState.range = hashParams.get('range');
|
||||
if (hashParams.get('observer')) _rfHealthState.selectedObserver = hashParams.get('observer');
|
||||
if (hashParams.get('from')) { _rfHealthState.customFrom = hashParams.get('from'); _rfHealthState.range = 'custom'; }
|
||||
if (hashParams.get('to')) { _rfHealthState.customTo = hashParams.get('to'); _rfHealthState.range = 'custom'; }
|
||||
|
||||
const ranges = ['1h','3h','6h','12h','24h','3d','7d','30d'];
|
||||
const rangeButtons = ranges.map(r =>
|
||||
`<button class="rf-range-btn${_rfHealthState.range === r ? ' active' : ''}" data-range="${r}">${r}</button>`
|
||||
).join('');
|
||||
|
||||
el.innerHTML = `
|
||||
<div class="rf-health-container">
|
||||
<div class="rf-time-selector">
|
||||
${rangeButtons}
|
||||
<button class="rf-range-btn${_rfHealthState.range === 'custom' ? ' active' : ''}" data-range="custom">Custom</button>
|
||||
<span class="rf-custom-inputs" style="display:${_rfHealthState.range === 'custom' ? 'inline' : 'none'}">
|
||||
<input type="datetime-local" class="rf-datetime" id="rfFrom" value="${_rfHealthState.customFrom}">
|
||||
<span>→</span>
|
||||
<input type="datetime-local" class="rf-datetime" id="rfTo" value="${_rfHealthState.customTo}">
|
||||
<button class="rf-range-btn" id="rfCustomApply">Apply</button>
|
||||
</span>
|
||||
</div>
|
||||
<div class="rf-health-split">
|
||||
<div id="rfHealthGrid" class="rf-health-grid">
|
||||
<div class="text-muted" style="padding:20px">Loading RF metrics…</div>
|
||||
</div>
|
||||
<div id="rfHealthDetail" class="rf-health-detail rf-panel-empty">
|
||||
<span>Select an observer to view details</span>
|
||||
</div>
|
||||
</div>
|
||||
</div>`;
|
||||
|
||||
// Range button handlers
|
||||
el.querySelectorAll('.rf-range-btn[data-range]').forEach(btn => {
|
||||
btn.addEventListener('click', () => {
|
||||
const range = btn.dataset.range;
|
||||
_rfHealthState.range = range;
|
||||
el.querySelectorAll('.rf-range-btn').forEach(b => b.classList.remove('active'));
|
||||
btn.classList.add('active');
|
||||
const customInputs = el.querySelector('.rf-custom-inputs');
|
||||
if (customInputs) customInputs.style.display = range === 'custom' ? 'inline' : 'none';
|
||||
if (range !== 'custom') {
|
||||
rfHealthUpdateHash();
|
||||
loadRFHealthData(el);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
const applyBtn = document.getElementById('rfCustomApply');
|
||||
if (applyBtn) {
|
||||
applyBtn.addEventListener('click', () => {
|
||||
_rfHealthState.customFrom = document.getElementById('rfFrom').value;
|
||||
_rfHealthState.customTo = document.getElementById('rfTo').value;
|
||||
rfHealthUpdateHash();
|
||||
loadRFHealthData(el);
|
||||
});
|
||||
}
|
||||
|
||||
await loadRFHealthData(el);
|
||||
}
|
||||
|
||||
async function loadRFHealthData(el) {
|
||||
const grid = document.getElementById('rfHealthGrid');
|
||||
const detail = document.getElementById('rfHealthDetail');
|
||||
|
||||
try {
|
||||
// Compute window string for summary endpoint
|
||||
const windowMap = { '1h':'1h', '3h':'3h', '6h':'6h', '12h':'12h', '24h':'24h', '3d':'3d', '7d':'7d', '30d':'30d' };
|
||||
const window = windowMap[_rfHealthState.range] || '24h';
|
||||
const summaryData = await api('/observers/metrics/summary?window=' + window + (RegionFilter.regionQueryString() || ''));
|
||||
const observers = summaryData.observers || [];
|
||||
|
||||
// Filter to observers with sufficient sparkline data (≥2 non-null noise_floor values)
|
||||
const filteredObservers = observers.filter(obs => {
|
||||
const nfValues = (obs.sparkline || []).filter(v => v != null);
|
||||
return nfValues.length >= 2;
|
||||
});
|
||||
|
||||
if (!filteredObservers.length) {
|
||||
grid.innerHTML = '<div class="text-muted" style="padding:20px">No RF metrics data available yet. Metrics are collected from observer status messages every ~5 minutes.</div>';
|
||||
return;
|
||||
}
|
||||
|
||||
// Render small multiples grid
|
||||
grid.innerHTML = filteredObservers.map(obs => {
|
||||
const nf = obs.current_noise_floor != null ? obs.current_noise_floor.toFixed(1) : '—';
|
||||
const avgNf = obs.avg_noise_floor_24h != null ? obs.avg_noise_floor_24h.toFixed(1) : '—';
|
||||
const maxNf = obs.max_noise_floor_24h != null ? obs.max_noise_floor_24h.toFixed(1) : '—';
|
||||
const batt = obs.battery_mv != null ? (obs.battery_mv / 1000).toFixed(2) + 'V' : '';
|
||||
const name = obs.observer_name || obs.observer_id.substring(0, 8);
|
||||
const isSelected = _rfHealthState.selectedObserver === obs.observer_id;
|
||||
|
||||
// NF status coloring
|
||||
let nfClass = '';
|
||||
if (obs.current_noise_floor != null) {
|
||||
if (obs.current_noise_floor >= -85) nfClass = 'rf-nf-critical';
|
||||
else if (obs.current_noise_floor >= -100) nfClass = 'rf-nf-warning';
|
||||
}
|
||||
|
||||
return `<div class="rf-cell${isSelected ? ' rf-cell-selected' : ''}" data-observer="${obs.observer_id}" tabindex="0" role="button" aria-label="Observer ${name}, noise floor ${nf} dBm">
|
||||
<div class="rf-cell-header">
|
||||
<span class="rf-cell-name">${esc(name)}</span>
|
||||
<span class="rf-cell-nf ${nfClass}">${nf} dBm</span>
|
||||
${batt ? `<span class="rf-cell-batt">${batt}</span>` : ''}
|
||||
</div>
|
||||
<div class="rf-cell-sparkline" id="rf-spark-${obs.observer_id}"></div>
|
||||
<div class="rf-cell-stats">
|
||||
<span>avg: ${avgNf}</span>
|
||||
<span>max: ${maxNf}</span>
|
||||
<span>${obs.sample_count} samples</span>
|
||||
</div>
|
||||
</div>`;
|
||||
}).join('');
|
||||
|
||||
// Click handler for cells
|
||||
grid.querySelectorAll('.rf-cell').forEach(cell => {
|
||||
cell.addEventListener('click', () => {
|
||||
const obsId = cell.dataset.observer;
|
||||
grid.querySelectorAll('.rf-cell').forEach(c => c.classList.remove('rf-cell-selected'));
|
||||
cell.classList.add('rf-cell-selected');
|
||||
_rfHealthState.selectedObserver = obsId;
|
||||
rfHealthUpdateHash();
|
||||
loadRFHealthDetail(obsId, detail);
|
||||
});
|
||||
cell.addEventListener('keydown', e => {
|
||||
if (e.key === 'Enter' || e.key === ' ') { e.preventDefault(); cell.click(); }
|
||||
});
|
||||
});
|
||||
|
||||
// Render sparklines from summary data (no extra API calls)
|
||||
for (const obs of filteredObservers) {
|
||||
const nfValues = (obs.sparkline || []).filter(v => v != null);
|
||||
const container = document.getElementById(`rf-spark-${obs.observer_id}`);
|
||||
if (container && nfValues.length > 1) {
|
||||
container.innerHTML = rfNFSparkline(nfValues, 140, 24);
|
||||
}
|
||||
}
|
||||
|
||||
// Auto-expand selected observer from URL
|
||||
if (_rfHealthState.selectedObserver) {
|
||||
const selectedCell = grid.querySelector(`[data-observer="${_rfHealthState.selectedObserver}"]`);
|
||||
if (selectedCell) {
|
||||
selectedCell.classList.add('rf-cell-selected');
|
||||
loadRFHealthDetail(_rfHealthState.selectedObserver, detail);
|
||||
}
|
||||
}
|
||||
} catch (e) {
|
||||
grid.innerHTML = `<div class="text-muted" style="padding:20px">Failed to load RF health data: ${esc(e.message)}</div>`;
|
||||
}
|
||||
}
|
||||
|
||||
async function loadRFSparkline(observerId) {
|
||||
const { since, until } = rfHealthTimeRangeToParams(_rfHealthState.range, _rfHealthState.customFrom, _rfHealthState.customTo);
|
||||
try {
|
||||
const data = await api(`/observers/${observerId}/metrics?since=${encodeURIComponent(since)}&until=${encodeURIComponent(until)}`);
|
||||
const metrics = data.metrics || [];
|
||||
const nfValues = metrics.map(m => m.noise_floor).filter(v => v != null);
|
||||
const container = document.getElementById(`rf-spark-${observerId}`);
|
||||
if (container && nfValues.length > 1) {
|
||||
container.innerHTML = rfNFSparkline(nfValues, 140, 24);
|
||||
} else if (container) {
|
||||
container.innerHTML = '<span class="text-muted" style="font-size:10px">insufficient data</span>';
|
||||
}
|
||||
} catch (e) {
|
||||
// Non-fatal — sparkline just won't render
|
||||
}
|
||||
}
|
||||
|
||||
function rfNFSparkline(data, w, h) {
|
||||
if (!data.length) return '';
|
||||
// For noise floor, invert: more negative = better = lower on chart
|
||||
const min = Math.min(...data);
|
||||
const max = Math.max(...data);
|
||||
const range = max - min || 1;
|
||||
const pts = data.map((v, i) => {
|
||||
const x = (i / Math.max(data.length - 1, 1)) * w;
|
||||
// Higher dBm (worse) = higher on chart
|
||||
const y = h - 2 - ((v - min) / range) * (h - 4);
|
||||
return `${x.toFixed(1)},${y.toFixed(1)}`;
|
||||
}).join(' ');
|
||||
|
||||
// Reference lines
|
||||
let refs = '';
|
||||
if (min <= -100 && max >= -100) {
|
||||
const y100 = h - 2 - ((-100 - min) / range) * (h - 4);
|
||||
refs += `<line x1="0" y1="${y100.toFixed(1)}" x2="${w}" y2="${y100.toFixed(1)}" stroke="var(--text-muted)" stroke-width="0.5" stroke-dasharray="2"/>`;
|
||||
}
|
||||
|
||||
return `<svg viewBox="0 0 ${w} ${h}" style="width:${w}px;height:${h}px" role="img" aria-label="Noise floor sparkline"><title>Noise floor trend</title>${refs}<polyline points="${pts}" fill="none" stroke="var(--accent)" stroke-width="1.5"/></svg>`;
|
||||
}
|
||||
|
||||
async function loadRFHealthDetail(observerId, container) {
|
||||
container.classList.remove('rf-panel-empty');
|
||||
container.innerHTML = '<div class="text-muted" style="padding:10px">Loading detail…</div>';
|
||||
|
||||
const { since, until } = rfHealthTimeRangeToParams(_rfHealthState.range, _rfHealthState.customFrom, _rfHealthState.customTo);
|
||||
// Choose resolution based on time range
|
||||
let resolution = '5m';
|
||||
const rangeMap = { '7d': '1h', '30d': '1h' };
|
||||
if (rangeMap[_rfHealthState.range]) resolution = rangeMap[_rfHealthState.range];
|
||||
|
||||
try {
|
||||
const data = await api(`/observers/${observerId}/metrics?since=${encodeURIComponent(since)}&until=${encodeURIComponent(until)}&resolution=${resolution}`);
|
||||
const metrics = data.metrics || [];
|
||||
const reboots = (data.reboots || []).map(r => new Date(r).getTime());
|
||||
const name = data.observer_name || observerId.substring(0, 8);
|
||||
|
||||
if (!metrics.length) {
|
||||
container.innerHTML = `<div class="text-muted" style="padding:10px">No metrics data for ${esc(name)} in selected time range.</div>`;
|
||||
return;
|
||||
}
|
||||
|
||||
// Extract data series
|
||||
const nfData = metrics.map(m => ({ t: m.timestamp, v: m.noise_floor })).filter(d => d.v != null);
|
||||
const txData = metrics.map(m => ({ t: m.timestamp, v: m.tx_airtime_pct })).filter(d => d.v != null);
|
||||
const rxData = metrics.map(m => ({ t: m.timestamp, v: m.rx_airtime_pct })).filter(d => d.v != null);
|
||||
const errData = metrics.map(m => ({ t: m.timestamp, v: m.recv_error_rate })).filter(d => d.v != null);
|
||||
const battData = metrics.map(m => ({ t: m.timestamp, v: m.battery_mv })).filter(d => d.v != null && d.v > 0);
|
||||
|
||||
const hasAirtime = txData.length > 1 || rxData.length > 1;
|
||||
const hasErrors = errData.length > 1;
|
||||
const hasBattery = battData.length > 1;
|
||||
|
||||
// Current values
|
||||
const latest = metrics[metrics.length - 1];
|
||||
const nfValues = metrics.map(m => m.noise_floor).filter(v => v != null);
|
||||
const avgNf = nfValues.length ? (nfValues.reduce((a,b) => a+b, 0) / nfValues.length).toFixed(1) : '—';
|
||||
const minNf = nfValues.length ? Math.min(...nfValues).toFixed(1) : '—';
|
||||
const maxNf = nfValues.length ? Math.max(...nfValues).toFixed(1) : '—';
|
||||
const curNf = latest.noise_floor != null ? latest.noise_floor.toFixed(1) : '—';
|
||||
const curBatt = latest.battery_mv != null && latest.battery_mv > 0 ? (latest.battery_mv / 1000).toFixed(2) + 'V' : '—';
|
||||
const curTx = latest.tx_airtime_pct != null ? latest.tx_airtime_pct.toFixed(1) + '%' : '—';
|
||||
const curRx = latest.rx_airtime_pct != null ? latest.rx_airtime_pct.toFixed(1) + '%' : '—';
|
||||
const curErr = latest.recv_error_rate != null ? latest.recv_error_rate.toFixed(2) + '%' : '—';
|
||||
|
||||
container.innerHTML = `
|
||||
<div class="rf-detail-header">
|
||||
<h3>${esc(name)}</h3>
|
||||
<button class="rf-detail-close" aria-label="Close detail" title="Close">✕</button>
|
||||
</div>
|
||||
<div class="rf-detail-charts">
|
||||
<div class="rf-detail-chart" id="rfDetailNFChart"></div>
|
||||
${hasAirtime ? '<div class="rf-detail-chart" id="rfDetailAirtimeChart"></div>' : ''}
|
||||
${hasErrors ? '<div class="rf-detail-chart" id="rfDetailErrorChart"></div>' : ''}
|
||||
${hasBattery ? '<div class="rf-detail-chart" id="rfDetailBatteryChart"></div>' : ''}
|
||||
</div>
|
||||
<div class="rf-detail-summary">
|
||||
NF: ${curNf} dBm | avg: ${avgNf} | min: ${minNf} | max: ${maxNf} | TX: ${curTx} | RX: ${curRx} | Err: ${curErr} | Batt: ${curBatt}${reboots.length ? ' | ' + reboots.length + ' reboots' : ''}
|
||||
</div>`;
|
||||
|
||||
// Close button
|
||||
container.querySelector('.rf-detail-close').addEventListener('click', () => {
|
||||
container.classList.add('rf-panel-empty');
|
||||
container.innerHTML = '<span>Select an observer to view details</span>';
|
||||
_rfHealthState.selectedObserver = null;
|
||||
rfHealthUpdateHash();
|
||||
document.querySelectorAll('.rf-cell').forEach(c => c.classList.remove('rf-cell-selected'));
|
||||
});
|
||||
|
||||
// Compute shared time range across all charts
|
||||
const allTimestamps = metrics.map(m => new Date(m.timestamp).getTime());
|
||||
const minT = Math.min(...allTimestamps);
|
||||
const maxT = Math.max(...allTimestamps);
|
||||
|
||||
// Render noise floor chart
|
||||
const nfEl = document.getElementById('rfDetailNFChart');
|
||||
if (nfEl && nfData.length > 1) {
|
||||
nfEl.innerHTML = rfNFLineChart(nfData, nfEl.clientWidth || 700, 180, reboots, minT, maxT);
|
||||
} else if (nfEl) {
|
||||
nfEl.innerHTML = '<span class="text-muted">Not enough noise floor data</span>';
|
||||
}
|
||||
|
||||
// Render airtime chart
|
||||
if (hasAirtime) {
|
||||
const atEl = document.getElementById('rfDetailAirtimeChart');
|
||||
if (atEl) {
|
||||
atEl.innerHTML = rfAirtimeChart(txData, rxData, atEl.clientWidth || 700, 150, reboots, minT, maxT);
|
||||
}
|
||||
}
|
||||
|
||||
// Render error rate chart
|
||||
if (hasErrors) {
|
||||
const errEl = document.getElementById('rfDetailErrorChart');
|
||||
if (errEl) {
|
||||
errEl.innerHTML = rfErrorRateChart(errData, errEl.clientWidth || 700, 120, reboots, minT, maxT);
|
||||
}
|
||||
}
|
||||
|
||||
// Render battery chart
|
||||
if (hasBattery) {
|
||||
const battEl = document.getElementById('rfDetailBatteryChart');
|
||||
if (battEl) {
|
||||
battEl.innerHTML = rfBatteryChart(battData, battEl.clientWidth || 700, 120, reboots, minT, maxT);
|
||||
}
|
||||
}
|
||||
} catch (e) {
|
||||
container.innerHTML = `<div class="text-muted" style="padding:10px">Failed to load detail: ${esc(e.message)}</div>`;
|
||||
}
|
||||
}
|
||||
|
||||
// Shared helper: render reboot markers as vertical hairlines
|
||||
function rfRebootMarkers(reboots, sx, pad, h, w) {
|
||||
let svg = '';
|
||||
for (const rt of reboots) {
|
||||
const x = sx(rt);
|
||||
if (x >= pad.left && x <= w - pad.right) {
|
||||
svg += `<line x1="${x.toFixed(1)}" y1="${pad.top}" x2="${x.toFixed(1)}" y2="${(h - pad.bottom).toFixed(1)}" stroke="var(--text-muted)" stroke-width="0.5" stroke-dasharray="3,3" opacity="0.6"/>`;
|
||||
svg += `<text x="${(x + 2).toFixed(1)}" y="${(pad.top + 8).toFixed(1)}" font-size="7" fill="var(--text-muted)" opacity="0.7">reboot</text>`;
|
||||
}
|
||||
}
|
||||
return svg;
|
||||
}
|
||||
|
||||
// Shared helper: render X-axis time labels
|
||||
function rfXAxisLabels(data, sx, h, pad) {
|
||||
let svg = '';
|
||||
const xTicks = Math.min(6, data.length);
|
||||
for (let i = 0; i < xTicks; i++) {
|
||||
const idx = Math.floor(i * (data.length - 1) / Math.max(xTicks - 1, 1));
|
||||
const t = new Date(data[idx].t);
|
||||
const x = sx(t.getTime());
|
||||
const label = t.toLocaleTimeString([], { hour: '2-digit', minute: '2-digit' });
|
||||
svg += `<text x="${x.toFixed(1)}" y="${h - 5}" text-anchor="middle" font-size="9" fill="var(--text-muted)">${label}</text>`;
|
||||
}
|
||||
return svg;
|
||||
}
|
||||
|
||||
// Shared: build polyline points string from data, skip nulls (break line)
|
||||
// Airtime chart: TX (red/orange) + RX (blue) lines, Y 0-100%
|
||||
function rfAirtimeChart(txData, rxData, w, h, reboots, sharedMinT, sharedMaxT) {
|
||||
const pad = { top: 20, right: 50, bottom: 30, left: 55 };
|
||||
const cw = w - pad.left - pad.right;
|
||||
const ch = h - pad.top - pad.bottom;
|
||||
const minT = sharedMinT, maxT = sharedMaxT;
|
||||
const rangeT = maxT - minT || 1;
|
||||
|
||||
const sx = t => pad.left + ((t - minT) / rangeT) * cw;
|
||||
const sy = v => pad.top + ch - (v / 100) * ch; // 0-100%
|
||||
|
||||
let svg = `<svg viewBox="0 0 ${w} ${h}" style="width:100%;max-height:${h}px" role="img" aria-label="Airtime chart"><title>Airtime %</title>`;
|
||||
|
||||
// Chart title
|
||||
svg += `<text x="${pad.left}" y="12" font-size="10" fill="var(--text-muted)" font-weight="600">Airtime %</text>`;
|
||||
|
||||
// Y-axis: 0, 25, 50, 75, 100
|
||||
for (let pct = 0; pct <= 100; pct += 25) {
|
||||
const y = sy(pct);
|
||||
svg += `<text x="${pad.left - 4}" y="${(y + 3).toFixed(1)}" text-anchor="end" font-size="9" fill="var(--text-muted)">${pct}</text>`;
|
||||
svg += `<line x1="${pad.left}" y1="${y.toFixed(1)}" x2="${w - pad.right}" y2="${y.toFixed(1)}" stroke="var(--border)" stroke-width="0.3"/>`;
|
||||
}
|
||||
|
||||
// Reboot markers
|
||||
svg += rfRebootMarkers(reboots, sx, pad, h, w);
|
||||
|
||||
// TX line (red/orange)
|
||||
if (txData.length > 1) {
|
||||
const txPts = txData.map(d => `${sx(new Date(d.t).getTime()).toFixed(1)},${sy(d.v).toFixed(1)}`).join(' ');
|
||||
svg += `<polyline points="${txPts}" fill="none" stroke="var(--danger, #e74c3c)" stroke-width="1.5"/>`;
|
||||
// Direct label at last point
|
||||
const lastTx = txData[txData.length - 1];
|
||||
const lx = sx(new Date(lastTx.t).getTime());
|
||||
const ly = sy(lastTx.v);
|
||||
// Offset label up if RX label would overlap (within 12px)
|
||||
const lastRx = rxData.length > 1 ? rxData[rxData.length - 1] : null;
|
||||
const rxLy = lastRx ? sy(lastRx.v) : Infinity;
|
||||
const txLabelY = (Math.abs(ly - rxLy) < 12) ? ly - 8 : ly + 3;
|
||||
svg += `<text x="${(lx + 4).toFixed(1)}" y="${txLabelY.toFixed(1)}" font-size="9" fill="var(--danger, #e74c3c)">TX ${lastTx.v.toFixed(1)}%</text>`;
|
||||
}
|
||||
|
||||
// RX line (blue)
|
||||
if (rxData.length > 1) {
|
||||
const rxPts = rxData.map(d => `${sx(new Date(d.t).getTime()).toFixed(1)},${sy(d.v).toFixed(1)}`).join(' ');
|
||||
svg += `<polyline points="${rxPts}" fill="none" stroke="var(--info, #3498db)" stroke-width="1.5"/>`;
|
||||
// Direct label at last point
|
||||
const lastRx = rxData[rxData.length - 1];
|
||||
const lx = sx(new Date(lastRx.t).getTime());
|
||||
const ly = sy(lastRx.v);
|
||||
// Offset label down if TX label is nearby
|
||||
const lastTx = txData.length > 1 ? txData[txData.length - 1] : null;
|
||||
const txLy = lastTx ? sy(lastTx.v) : -Infinity;
|
||||
const rxLabelY = (Math.abs(ly - txLy) < 12) ? ly + 12 : ly + 3;
|
||||
svg += `<text x="${(lx + 4).toFixed(1)}" y="${rxLabelY.toFixed(1)}" font-size="9" fill="var(--info, #3498db)">RX ${lastRx.v.toFixed(1)}%</text>`;
|
||||
}
|
||||
|
||||
// X-axis labels
|
||||
const allData = txData.length >= rxData.length ? txData : rxData;
|
||||
svg += rfXAxisLabels(allData, sx, h, pad);
|
||||
|
||||
svg += '</svg>';
|
||||
return svg;
|
||||
}
|
||||
|
||||
// Error rate chart: recv_error_rate line
|
||||
function rfErrorRateChart(errData, w, h, reboots, sharedMinT, sharedMaxT) {
|
||||
const pad = { top: 20, right: 50, bottom: 30, left: 55 };
|
||||
const cw = w - pad.left - pad.right;
|
||||
const ch = h - pad.top - pad.bottom;
|
||||
const minT = sharedMinT, maxT = sharedMaxT;
|
||||
const rangeT = maxT - minT || 1;
|
||||
|
||||
const values = errData.map(d => d.v);
|
||||
const maxV = Math.max(...values, 1); // at least 1% scale
|
||||
const rangeV = maxV || 1;
|
||||
|
||||
const sx = t => pad.left + ((t - minT) / rangeT) * cw;
|
||||
const sy = v => pad.top + ch - (v / rangeV) * ch;
|
||||
|
||||
let svg = `<svg viewBox="0 0 ${w} ${h}" style="width:100%;max-height:${h}px" role="img" aria-label="Error rate chart"><title>Error Rate</title>`;
|
||||
|
||||
// Chart title
|
||||
svg += `<text x="${pad.left}" y="12" font-size="10" fill="var(--text-muted)" font-weight="600">Error Rate %</text>`;
|
||||
|
||||
// Y-axis
|
||||
const yTicks = 4;
|
||||
for (let i = 0; i <= yTicks; i++) {
|
||||
const v = (rangeV * i / yTicks);
|
||||
const y = sy(v);
|
||||
svg += `<text x="${pad.left - 4}" y="${(y + 3).toFixed(1)}" text-anchor="end" font-size="9" fill="var(--text-muted)">${v.toFixed(1)}</text>`;
|
||||
svg += `<line x1="${pad.left}" y1="${y.toFixed(1)}" x2="${w - pad.right}" y2="${y.toFixed(1)}" stroke="var(--border)" stroke-width="0.3"/>`;
|
||||
}
|
||||
|
||||
// Reboot markers
|
||||
svg += rfRebootMarkers(reboots, sx, pad, h, w);
|
||||
|
||||
// Error rate line
|
||||
const pts = errData.map(d => `${sx(new Date(d.t).getTime()).toFixed(1)},${sy(d.v).toFixed(1)}`).join(' ');
|
||||
svg += `<polyline points="${pts}" fill="none" stroke="var(--warning, #f39c12)" stroke-width="1.5"/>`;
|
||||
|
||||
// Direct label at last point
|
||||
const last = errData[errData.length - 1];
|
||||
const lx = sx(new Date(last.t).getTime());
|
||||
const ly = sy(last.v);
|
||||
svg += `<text x="${(lx + 4).toFixed(1)}" y="${(ly + 3).toFixed(1)}" font-size="9" fill="var(--warning, #f39c12)">${last.v.toFixed(2)}%</text>`;
|
||||
|
||||
// X-axis labels
|
||||
svg += rfXAxisLabels(errData, sx, h, pad);
|
||||
|
||||
svg += '</svg>';
|
||||
return svg;
|
||||
}
|
||||
|
||||
// Battery voltage chart
|
||||
function rfBatteryChart(battData, w, h, reboots, sharedMinT, sharedMaxT) {
|
||||
const pad = { top: 20, right: 50, bottom: 30, left: 55 };
|
||||
const cw = w - pad.left - pad.right;
|
||||
const ch = h - pad.top - pad.bottom;
|
||||
const minT = sharedMinT, maxT = sharedMaxT;
|
||||
const rangeT = maxT - minT || 1;
|
||||
|
||||
const values = battData.map(d => d.v);
|
||||
const minV = Math.min(...values);
|
||||
const maxV = Math.max(...values);
|
||||
const rangeV = maxV - minV || 100; // at least 100mV range
|
||||
|
||||
const sx = t => pad.left + ((t - minT) / rangeT) * cw;
|
||||
const sy = v => pad.top + ch - ((v - minV) / rangeV) * ch;
|
||||
|
||||
let svg = `<svg viewBox="0 0 ${w} ${h}" style="width:100%;max-height:${h}px" role="img" aria-label="Battery voltage chart"><title>Battery</title>`;
|
||||
|
||||
// Chart title
|
||||
svg += `<text x="${pad.left}" y="12" font-size="10" fill="var(--text-muted)" font-weight="600">Battery</text>`;
|
||||
|
||||
// Y-axis (in volts)
|
||||
const yTicks = 4;
|
||||
for (let i = 0; i <= yTicks; i++) {
|
||||
const v = minV + (rangeV * i / yTicks);
|
||||
const y = sy(v);
|
||||
svg += `<text x="${pad.left - 4}" y="${(y + 3).toFixed(1)}" text-anchor="end" font-size="9" fill="var(--text-muted)">${(v/1000).toFixed(2)}V</text>`;
|
||||
svg += `<line x1="${pad.left}" y1="${y.toFixed(1)}" x2="${w - pad.right}" y2="${y.toFixed(1)}" stroke="var(--border)" stroke-width="0.3"/>`;
|
||||
}
|
||||
|
||||
// Low battery reference line at 3.3V
|
||||
const lowBattMv = 3300;
|
||||
if (lowBattMv >= minV && lowBattMv <= maxV) {
|
||||
const y = sy(lowBattMv);
|
||||
svg += `<line x1="${pad.left}" y1="${y.toFixed(1)}" x2="${w - pad.right}" y2="${y.toFixed(1)}" stroke="var(--warning, #f39c12)" stroke-width="0.5" stroke-dasharray="4,2"/>`;
|
||||
svg += `<text x="${w - pad.right + 2}" y="${(y + 3).toFixed(1)}" font-size="8" fill="var(--warning, #f39c12)">3.3V low</text>`;
|
||||
}
|
||||
|
||||
// Reboot markers
|
||||
svg += rfRebootMarkers(reboots, sx, pad, h, w);
|
||||
|
||||
// Battery line
|
||||
const pts = battData.map(d => `${sx(new Date(d.t).getTime()).toFixed(1)},${sy(d.v).toFixed(1)}`).join(' ');
|
||||
svg += `<polyline points="${pts}" fill="none" stroke="var(--success, #27ae60)" stroke-width="1.5"/>`;
|
||||
|
||||
// Direct label at last point
|
||||
const last = battData[battData.length - 1];
|
||||
const lx = sx(new Date(last.t).getTime());
|
||||
const ly = sy(last.v);
|
||||
svg += `<text x="${(lx + 4).toFixed(1)}" y="${(ly + 3).toFixed(1)}" font-size="9" fill="var(--success, #27ae60)">${(last.v/1000).toFixed(2)}V</text>`;
|
||||
|
||||
// X-axis labels
|
||||
svg += rfXAxisLabels(battData, sx, h, pad);
|
||||
|
||||
svg += '</svg>';
|
||||
return svg;
|
||||
}
|
||||
|
||||
function rfNFLineChart(data, w, h, reboots, sharedMinT, sharedMaxT) {
|
||||
reboots = reboots || [];
|
||||
const pad = { top: 20, right: 40, bottom: 30, left: 55 };
|
||||
const cw = w - pad.left - pad.right;
|
||||
const ch = h - pad.top - pad.bottom;
|
||||
|
||||
const values = data.map(d => d.v);
|
||||
const minT = sharedMinT != null ? sharedMinT : Math.min(...data.map(d => new Date(d.t).getTime()));
|
||||
const maxT = sharedMaxT != null ? sharedMaxT : Math.max(...data.map(d => new Date(d.t).getTime()));
|
||||
const minV = Math.min(...values);
|
||||
const maxV = Math.max(...values);
|
||||
const rangeV = maxV - minV || 1;
|
||||
const rangeT = maxT - minT || 1;
|
||||
|
||||
const sx = t => pad.left + ((t - minT) / rangeT) * cw;
|
||||
const sy = v => pad.top + ch - ((v - minV) / rangeV) * ch;
|
||||
|
||||
const pts = data.map(d => `${sx(new Date(d.t).getTime()).toFixed(1)},${sy(d.v).toFixed(1)}`).join(' ');
|
||||
|
||||
let svg = `<svg viewBox="0 0 ${w} ${h}" style="width:100%;max-height:${h}px" role="img" aria-label="Noise floor line chart"><title>Noise floor over time</title>`;
|
||||
|
||||
// Chart title
|
||||
svg += `<text x="${pad.left}" y="12" font-size="10" fill="var(--text-muted)" font-weight="600">Noise Floor dBm</text>`;
|
||||
|
||||
// Reference lines
|
||||
const refLines = [-100, -85];
|
||||
const refLabels = ['-100 warning', '-85 critical'];
|
||||
refLines.forEach((ref, i) => {
|
||||
if (ref >= minV && ref <= maxV) {
|
||||
const y = sy(ref);
|
||||
svg += `<line x1="${pad.left}" y1="${y.toFixed(1)}" x2="${w - pad.right}" y2="${y.toFixed(1)}" stroke="var(--text-muted)" stroke-width="0.5" stroke-dasharray="4,2"/>`;
|
||||
svg += `<text x="${w - pad.right + 2}" y="${(y + 3).toFixed(1)}" font-size="9" fill="var(--text-muted)">${refLabels[i]}</text>`;
|
||||
}
|
||||
});
|
||||
|
||||
// Y-axis labels
|
||||
const yTicks = 5;
|
||||
for (let i = 0; i <= yTicks; i++) {
|
||||
const v = minV + (rangeV * i / yTicks);
|
||||
const y = sy(v);
|
||||
svg += `<text x="${pad.left - 4}" y="${(y + 3).toFixed(1)}" text-anchor="end" font-size="9" fill="var(--text-muted)">${v.toFixed(0)}</text>`;
|
||||
svg += `<line x1="${pad.left}" y1="${y.toFixed(1)}" x2="${w - pad.right}" y2="${y.toFixed(1)}" stroke="var(--border)" stroke-width="0.3"/>`;
|
||||
}
|
||||
|
||||
// Reboot markers
|
||||
svg += rfRebootMarkers(reboots, sx, pad, h, w);
|
||||
|
||||
// X-axis labels
|
||||
svg += rfXAxisLabels(data, sx, h, pad);
|
||||
|
||||
// Data polyline
|
||||
svg += `<polyline points="${pts}" fill="none" stroke="var(--accent)" stroke-width="1.5"/>`;
|
||||
|
||||
// Direct labels: min and max points
|
||||
const times = data.map(d => new Date(d.t).getTime());
|
||||
const maxIdx = values.indexOf(maxV);
|
||||
const minIdx = values.indexOf(minV);
|
||||
svg += `<circle cx="${sx(times[maxIdx]).toFixed(1)}" cy="${sy(maxV).toFixed(1)}" r="3" fill="var(--danger, red)"/>`;
|
||||
svg += `<text x="${sx(times[maxIdx]).toFixed(1)}" y="${(sy(maxV) - 6).toFixed(1)}" text-anchor="middle" font-size="9" fill="var(--danger, red)">${maxV.toFixed(1)}</text>`;
|
||||
svg += `<circle cx="${sx(times[minIdx]).toFixed(1)}" cy="${sy(minV).toFixed(1)}" r="3" fill="var(--success, green)"/>`;
|
||||
svg += `<text x="${sx(times[minIdx]).toFixed(1)}" y="${(sy(minV) + 14).toFixed(1)}" text-anchor="middle" font-size="9" fill="var(--success, green)">${minV.toFixed(1)}</text>`;
|
||||
|
||||
// Y-axis label
|
||||
svg += `<text x="12" y="${(h / 2)}" text-anchor="middle" font-size="10" fill="var(--text-muted)" transform="rotate(-90,12,${h/2})">dBm</text>`;
|
||||
|
||||
svg += '</svg>';
|
||||
return svg;
|
||||
}
|
||||
|
||||
registerPage('analytics', { init, destroy });
|
||||
})();
|
||||
|
||||
@@ -463,6 +463,9 @@ function navigate() {
|
||||
currentPage = basePage;
|
||||
|
||||
const app = document.getElementById('app');
|
||||
// Pages with fixed-height containers (maps, virtual-scroll, split-panels)
|
||||
const fixedPages = { packets: 1, nodes: 1, map: 1, live: 1, channels: 1, 'audio-lab': 1 };
|
||||
app.classList.toggle('app-fixed', basePage in fixedPages);
|
||||
if (pages[basePage]?.init) {
|
||||
const t0 = performance.now();
|
||||
pages[basePage].init(app, routeParam);
|
||||
|
||||
@@ -0,0 +1,109 @@
|
||||
/**
|
||||
* Channel Color Highlighting — Storage Model (M1)
|
||||
*
|
||||
* localStorage key: 'live-channel-colors'
|
||||
* Value: JSON object mapping channel names to hex colors
|
||||
* e.g. { "#wardriving": "#ef4444", "#meshnet": "#3b82f6" }
|
||||
*
|
||||
* Only applies to GRP_TXT packets. Other types retain default styling.
|
||||
*/
|
||||
(function() {
|
||||
'use strict';
|
||||
|
||||
var STORAGE_KEY = 'live-channel-colors';
|
||||
|
||||
function _load() {
|
||||
try {
|
||||
return JSON.parse(localStorage.getItem(STORAGE_KEY)) || {};
|
||||
} catch (e) {
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
function _save(colors) {
|
||||
localStorage.setItem(STORAGE_KEY, JSON.stringify(colors));
|
||||
}
|
||||
|
||||
/** Validate hex color format: #RGB or #RRGGBB */
|
||||
var HEX_RE = /^#(?:[0-9a-fA-F]{3}|[0-9a-fA-F]{6})$/;
|
||||
function _isValidHex(color) {
|
||||
return typeof color === 'string' && HEX_RE.test(color);
|
||||
}
|
||||
|
||||
/** Normalize 3-digit hex to 6-digit: #abc → #aabbcc */
|
||||
function _normalize(color) {
|
||||
if (color.length === 4) {
|
||||
return '#' + color[1] + color[1] + color[2] + color[2] + color[3] + color[3];
|
||||
}
|
||||
return color;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the assigned color for a channel, or null if unassigned.
|
||||
* @param {string} channel - Channel name (e.g. "#test")
|
||||
* @returns {string|null} Hex color or null
|
||||
*/
|
||||
function getChannelColor(channel) {
|
||||
if (!channel) return null;
|
||||
var colors = _load();
|
||||
return colors[channel] || null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Assign a color to a channel.
|
||||
* @param {string} channel - Channel name
|
||||
* @param {string} color - Hex color (e.g. "#ef4444")
|
||||
*/
|
||||
function setChannelColor(channel, color) {
|
||||
if (!channel || !color) return;
|
||||
if (!_isValidHex(color)) return;
|
||||
var colors = _load();
|
||||
colors[channel] = _normalize(color);
|
||||
_save(colors);
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove the color assignment for a channel.
|
||||
* @param {string} channel - Channel name
|
||||
*/
|
||||
function removeChannelColor(channel) {
|
||||
if (!channel) return;
|
||||
var colors = _load();
|
||||
delete colors[channel];
|
||||
_save(colors);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all channel-color assignments.
|
||||
* @returns {Object} Map of channel name → hex color
|
||||
*/
|
||||
function getAllChannelColors() {
|
||||
return _load();
|
||||
}
|
||||
|
||||
/**
|
||||
* Compute inline style string for a feed row / table row based on channel color.
|
||||
* Returns empty string if no channel color is assigned.
|
||||
* @param {string} typeName - Packet type name (e.g. "GRP_TXT", "CHAN")
|
||||
* @param {string|null} channel - Channel name from decoded payload
|
||||
* @returns {string} Inline style string or empty
|
||||
*/
|
||||
function getChannelRowStyle(typeName, channel) {
|
||||
// Only GRP_TXT / CHAN packets get channel coloring
|
||||
if (typeName !== 'GRP_TXT' && typeName !== 'CHAN') return '';
|
||||
if (!channel) return '';
|
||||
var color = getChannelColor(channel);
|
||||
if (!color) return '';
|
||||
// 4px left border + 10% opacity background tint
|
||||
return 'border-left:4px solid ' + color + ';background:' + color + '1a;';
|
||||
}
|
||||
|
||||
// Export to window for use by live.js and packets.js
|
||||
window.ChannelColors = {
|
||||
get: getChannelColor,
|
||||
set: setChannelColor,
|
||||
remove: removeChannelColor,
|
||||
getAll: getAllChannelColors,
|
||||
getRowStyle: getChannelRowStyle
|
||||
};
|
||||
})();
|
||||
+1
-1
@@ -48,7 +48,7 @@ if (typeof window !== 'undefined') window.comparePacketSets = comparePacketSets;
|
||||
packetsB = [];
|
||||
currentView = 'summary';
|
||||
|
||||
app.innerHTML = '<div class="compare-page" style="overflow-y:auto;height:calc(100vh - 56px);padding:16px">' +
|
||||
app.innerHTML = '<div class="compare-page" style="padding:16px">' +
|
||||
'<div class="page-header" style="display:flex;align-items:center;gap:12px;margin-bottom:16px">' +
|
||||
'<a href="#/observers" class="btn-icon" title="Back to Observers" aria-label="Back">\u2190</a>' +
|
||||
'<h2 style="margin:0">\uD83D\uDD0D Observer Comparison</h2>' +
|
||||
|
||||
+1
-2
@@ -1,7 +1,6 @@
|
||||
/* === CoreScope — home.css === */
|
||||
|
||||
/* Override #app overflow:hidden for home page scrolling */
|
||||
#app:has(.home-hero), #app:has(.home-chooser) { overflow-y: auto; }
|
||||
/* Home page now uses body scroll (no #app override needed — see style.css) */
|
||||
|
||||
/* Chooser */
|
||||
.home-chooser {
|
||||
|
||||
+107
-17
@@ -8,9 +8,11 @@ window.HopResolver = (function() {
|
||||
const MAX_HOP_DIST = 1.8; // ~200km in degrees
|
||||
const REGION_RADIUS_KM = 300;
|
||||
let prefixIdx = {}; // lowercase hex prefix → [node, ...]
|
||||
let pubkeyIdx = {}; // full lowercase pubkey → node (O(1) lookup)
|
||||
let nodesList = [];
|
||||
let observerIataMap = {}; // observer_id → iata
|
||||
let iataCoords = {}; // iata → {lat, lon}
|
||||
let affinityMap = {}; // pubkey → { neighborPubkey → score }
|
||||
|
||||
function dist(lat1, lon1, lat2, lon2) {
|
||||
return Math.sqrt((lat1 - lat2) ** 2 + (lon1 - lon2) ** 2);
|
||||
@@ -34,9 +36,11 @@ window.HopResolver = (function() {
|
||||
function init(nodes, opts) {
|
||||
nodesList = nodes || [];
|
||||
prefixIdx = {};
|
||||
pubkeyIdx = {};
|
||||
for (const n of nodesList) {
|
||||
if (!n.public_key) continue;
|
||||
const pk = n.public_key.toLowerCase();
|
||||
pubkeyIdx[pk] = n;
|
||||
for (let len = 1; len <= 3; len++) {
|
||||
const p = pk.slice(0, len * 2);
|
||||
if (!prefixIdx[p]) prefixIdx[p] = [];
|
||||
@@ -67,6 +71,34 @@ window.HopResolver = (function() {
|
||||
return null; // no GPS — can't geo-filter client-side
|
||||
}
|
||||
|
||||
/**
|
||||
* Pick the best candidate using affinity first, then geo-distance fallback.
|
||||
* @param {Array} candidates - candidates with lat/lon/pubkey/name
|
||||
* @param {string|null} adjacentPubkey - pubkey of the previously/next resolved hop
|
||||
* @param {Object|null} anchor - {lat, lon} for geo fallback
|
||||
* @param {number|null} fallbackLat - fallback anchor lat (e.g. observer)
|
||||
* @param {number|null} fallbackLon - fallback anchor lon
|
||||
* @returns {Object} best candidate
|
||||
*/
|
||||
function pickByAffinity(candidates, adjacentPubkey, anchor, fallbackLat, fallbackLon) {
|
||||
// If we have affinity data and an adjacent hop, prefer neighbors
|
||||
if (adjacentPubkey && Object.keys(affinityMap).length > 0) {
|
||||
const withAffinity = candidates
|
||||
.map(c => ({ ...c, affinity: getAffinity(adjacentPubkey, c.pubkey) }))
|
||||
.filter(c => c.affinity > 0);
|
||||
if (withAffinity.length > 0) {
|
||||
withAffinity.sort((a, b) => b.affinity - a.affinity);
|
||||
return withAffinity[0];
|
||||
}
|
||||
}
|
||||
// Fallback: geo-distance sort (existing behavior)
|
||||
const effectiveAnchor = anchor || (fallbackLat != null ? { lat: fallbackLat, lon: fallbackLon } : null);
|
||||
if (effectiveAnchor) {
|
||||
candidates.sort((a, b) => dist(a.lat, a.lon, effectiveAnchor.lat, effectiveAnchor.lon) - dist(b.lat, b.lon, effectiveAnchor.lat, effectiveAnchor.lon));
|
||||
}
|
||||
return candidates[0];
|
||||
}
|
||||
|
||||
/**
|
||||
* Resolve an array of hex hop prefixes to node info.
|
||||
* Returns a map: { hop: {name, pubkey, lat, lon, ambiguous, unreliable} }
|
||||
@@ -139,40 +171,50 @@ window.HopResolver = (function() {
|
||||
|
||||
// Forward pass
|
||||
let lastPos = (originLat != null && originLon != null) ? { lat: originLat, lon: originLon } : null;
|
||||
let lastResolvedPubkey = null;
|
||||
for (let i = 0; i < hops.length; i++) {
|
||||
const hop = hops[i];
|
||||
if (hopPositions[hop]) { lastPos = hopPositions[hop]; continue; }
|
||||
if (hopPositions[hop]) {
|
||||
lastPos = hopPositions[hop];
|
||||
lastResolvedPubkey = resolved[hop] ? resolved[hop].pubkey : null;
|
||||
continue;
|
||||
}
|
||||
const r = resolved[hop];
|
||||
if (!r || !r.ambiguous) continue;
|
||||
const withLoc = r.candidates.filter(c => c.lat && c.lon && !(c.lat === 0 && c.lon === 0));
|
||||
if (!withLoc.length) continue;
|
||||
let anchor = lastPos;
|
||||
if (!anchor && i === hops.length - 1 && observerLat != null) {
|
||||
anchor = { lat: observerLat, lon: observerLon };
|
||||
}
|
||||
if (anchor) {
|
||||
withLoc.sort((a, b) => dist(a.lat, a.lon, anchor.lat, anchor.lon) - dist(b.lat, b.lon, anchor.lat, anchor.lon));
|
||||
}
|
||||
r.name = withLoc[0].name;
|
||||
r.pubkey = withLoc[0].pubkey;
|
||||
hopPositions[hop] = { lat: withLoc[0].lat, lon: withLoc[0].lon };
|
||||
|
||||
// Affinity-aware: prefer candidates that are neighbors of the previous hop
|
||||
const picked = pickByAffinity(withLoc, lastResolvedPubkey, lastPos, i === hops.length - 1 ? observerLat : null, i === hops.length - 1 ? observerLon : null);
|
||||
r.name = picked.name;
|
||||
r.pubkey = picked.pubkey;
|
||||
hopPositions[hop] = { lat: picked.lat, lon: picked.lon };
|
||||
lastPos = hopPositions[hop];
|
||||
lastResolvedPubkey = picked.pubkey;
|
||||
}
|
||||
|
||||
// Backward pass
|
||||
let nextPos = (observerLat != null && observerLon != null) ? { lat: observerLat, lon: observerLon } : null;
|
||||
let nextResolvedPubkey = null;
|
||||
for (let i = hops.length - 1; i >= 0; i--) {
|
||||
const hop = hops[i];
|
||||
if (hopPositions[hop]) { nextPos = hopPositions[hop]; continue; }
|
||||
if (hopPositions[hop]) {
|
||||
nextPos = hopPositions[hop];
|
||||
nextResolvedPubkey = resolved[hop] ? resolved[hop].pubkey : null;
|
||||
continue;
|
||||
}
|
||||
const r = resolved[hop];
|
||||
if (!r || !r.ambiguous) continue;
|
||||
const withLoc = r.candidates.filter(c => c.lat && c.lon && !(c.lat === 0 && c.lon === 0));
|
||||
if (!withLoc.length || !nextPos) continue;
|
||||
withLoc.sort((a, b) => dist(a.lat, a.lon, nextPos.lat, nextPos.lon) - dist(b.lat, b.lon, nextPos.lat, nextPos.lon));
|
||||
r.name = withLoc[0].name;
|
||||
r.pubkey = withLoc[0].pubkey;
|
||||
hopPositions[hop] = { lat: withLoc[0].lat, lon: withLoc[0].lon };
|
||||
|
||||
// Affinity-aware: prefer candidates that are neighbors of the next hop
|
||||
const picked = pickByAffinity(withLoc, nextResolvedPubkey, nextPos, null, null);
|
||||
r.name = picked.name;
|
||||
r.pubkey = picked.pubkey;
|
||||
hopPositions[hop] = { lat: picked.lat, lon: picked.lon };
|
||||
nextPos = hopPositions[hop];
|
||||
nextResolvedPubkey = picked.pubkey;
|
||||
}
|
||||
|
||||
// Sanity check: drop hops impossibly far from neighbors
|
||||
@@ -203,5 +245,53 @@ window.HopResolver = (function() {
|
||||
return nodesList.length > 0;
|
||||
}
|
||||
|
||||
return { init: init, resolve: resolve, ready: ready, haversineKm: haversineKm };
|
||||
/**
|
||||
* Load neighbor-graph affinity data.
|
||||
* @param {Object} graph - { edges: [{source, target, score, weight}, ...] }
|
||||
*/
|
||||
function setAffinity(graph) {
|
||||
affinityMap = {};
|
||||
if (!graph || !graph.edges) return;
|
||||
for (const e of graph.edges) {
|
||||
if (!affinityMap[e.source]) affinityMap[e.source] = {};
|
||||
affinityMap[e.source][e.target] = e.score || e.weight || 1;
|
||||
if (!affinityMap[e.target]) affinityMap[e.target] = {};
|
||||
affinityMap[e.target][e.source] = e.score || e.weight || 1;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the affinity score between two pubkeys (0 if not neighbors).
|
||||
*/
|
||||
function getAffinity(pubkeyA, pubkeyB) {
|
||||
if (!pubkeyA || !pubkeyB || !affinityMap[pubkeyA]) return 0;
|
||||
return affinityMap[pubkeyA][pubkeyB] || 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Resolve hops using server-provided resolved_path (full pubkeys).
|
||||
* Returns the same format as resolve() — { [hop]: { name, pubkey, ... } }.
|
||||
* resolved_path is an array aligned with path_json: each element is a
|
||||
* 64-char lowercase hex pubkey or null. Skips entries that are null.
|
||||
*/
|
||||
function resolveFromServer(hops, resolvedPath) {
|
||||
if (!hops || !resolvedPath || hops.length !== resolvedPath.length) return {};
|
||||
var result = {};
|
||||
for (var i = 0; i < hops.length; i++) {
|
||||
var hop = hops[i];
|
||||
var pubkey = resolvedPath[i];
|
||||
if (!pubkey) continue; // null = unresolved, leave for client-side fallback
|
||||
// O(1) lookup via pubkeyIdx built during init()
|
||||
var node = pubkeyIdx[pubkey.toLowerCase()] || null;
|
||||
result[hop] = {
|
||||
name: node ? node.name : pubkey.slice(0, 8),
|
||||
pubkey: pubkey,
|
||||
candidates: node ? [{ name: node.name, pubkey: pubkey, lat: node.lat, lon: node.lon }] : [],
|
||||
conflicts: []
|
||||
};
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
return { init: init, resolve: resolve, resolveFromServer: resolveFromServer, ready: ready, haversineKm: haversineKm, setAffinity: setAffinity, getAffinity: getAffinity };
|
||||
})();
|
||||
|
||||
@@ -94,6 +94,7 @@
|
||||
<script src="home.js?v=__BUST__"></script>
|
||||
<script src="packet-filter.js?v=__BUST__"></script>
|
||||
<script src="packet-helpers.js?v=__BUST__"></script>
|
||||
<script src="channel-colors.js?v=__BUST__"></script>
|
||||
<script src="packets.js?v=__BUST__"></script>
|
||||
<script src="geo-filter-overlay.js?v=__BUST__"></script>
|
||||
<script src="map.js?v=__BUST__" onerror="console.error('Failed to load:', this.src)"></script>
|
||||
|
||||
+20
-2
@@ -201,6 +201,15 @@
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: 3px;
|
||||
transition: opacity 0.3s, transform 0.3s;
|
||||
}
|
||||
|
||||
/* Collapsible legend (#279) */
|
||||
.live-legend.hidden {
|
||||
opacity: 0;
|
||||
transform: translateX(100%);
|
||||
pointer-events: none;
|
||||
visibility: hidden;
|
||||
}
|
||||
|
||||
.legend-title {
|
||||
@@ -272,6 +281,16 @@
|
||||
background: rgba(59, 130, 246, 0.2) !important;
|
||||
}
|
||||
|
||||
/* ---- Medium breakpoint (#279) ---- */
|
||||
@media (max-width: 768px) {
|
||||
.live-feed { width: 280px; max-height: 200px; }
|
||||
.live-node-detail { width: 260px; }
|
||||
.live-legend { font-size: 10px; padding: 8px 10px; }
|
||||
.live-header { gap: 8px; padding: 6px 12px; }
|
||||
.live-stat-pill { font-size: 11px; padding: 2px 8px; }
|
||||
.live-toggles { font-size: 10px; gap: 6px; }
|
||||
}
|
||||
|
||||
/* ---- Responsive ---- */
|
||||
@media (max-width: 640px) {
|
||||
.live-feed { display: none !important; }
|
||||
@@ -702,9 +721,8 @@
|
||||
border: 0;
|
||||
}
|
||||
|
||||
/* Legend toggle button for mobile (#60) */
|
||||
/* Legend toggle button — visible at all sizes (#60, #279) */
|
||||
.legend-toggle-btn {
|
||||
display: none;
|
||||
position: absolute;
|
||||
bottom: 82px;
|
||||
right: 12px;
|
||||
|
||||
+159
-41
@@ -43,6 +43,7 @@
|
||||
timelineScope: 3600000, // 1h default ms
|
||||
timelineTimestamps: [], // historical timestamps from DB for sparkline
|
||||
timelineFetchedScope: 0, // last fetched scope to avoid redundant fetches
|
||||
replayGen: 0, // generation counter — incremented on each replay/rewind to discard stale async results
|
||||
};
|
||||
|
||||
// ROLE_COLORS loaded from shared roles.js (includes 'unknown')
|
||||
@@ -116,6 +117,7 @@
|
||||
|
||||
function vcrResumeLive() {
|
||||
stopReplay();
|
||||
VCR.replayGen++; // invalidate any in-flight async chunk processing
|
||||
VCR.playhead = -1;
|
||||
VCR.speed = 1;
|
||||
VCR.missedCount = 0;
|
||||
@@ -142,6 +144,8 @@
|
||||
function vcrReplayFromTs(targetTs) {
|
||||
const fetchFrom = new Date(targetTs).toISOString();
|
||||
stopReplay();
|
||||
VCR.replayGen++;
|
||||
var gen = VCR.replayGen;
|
||||
vcrSetMode('REPLAY');
|
||||
|
||||
// Reload map nodes to match the replay time
|
||||
@@ -153,7 +157,10 @@
|
||||
.then(r => r.json())
|
||||
.then(data => {
|
||||
const pkts = data.packets || [];
|
||||
const replayEntries = expandToBufferEntries(pkts);
|
||||
return expandToBufferEntriesAsync(pkts);
|
||||
})
|
||||
.then(function(replayEntries) {
|
||||
if (gen !== VCR.replayGen) return; // stale async result — user changed mode
|
||||
if (replayEntries.length === 0) {
|
||||
vcrSetMode('PAUSED');
|
||||
return;
|
||||
@@ -202,6 +209,8 @@
|
||||
|
||||
function vcrRewind(ms) {
|
||||
stopReplay();
|
||||
VCR.replayGen++;
|
||||
var gen = VCR.replayGen;
|
||||
// Fetch packets from DB for the time window
|
||||
const now = Date.now();
|
||||
const from = new Date(now - ms).toISOString();
|
||||
@@ -212,8 +221,11 @@
|
||||
// Prepend to buffer (avoid duplicates by ID)
|
||||
const existingIds = new Set(VCR.buffer.map(b => b.pkt.id).filter(Boolean));
|
||||
const filtered = pkts.filter(p => !existingIds.has(p.id));
|
||||
const newEntries = expandToBufferEntries(filtered);
|
||||
VCR.buffer = [...newEntries, ...VCR.buffer];
|
||||
return expandToBufferEntriesAsync(filtered);
|
||||
})
|
||||
.then(function(newEntries) {
|
||||
if (gen !== VCR.replayGen) return; // stale async result
|
||||
VCR.buffer = [].concat(newEntries, VCR.buffer);
|
||||
VCR.playhead = 0;
|
||||
VCR.speed = 1;
|
||||
vcrSetMode('REPLAY');
|
||||
@@ -274,15 +286,18 @@
|
||||
// Get timestamp of last packet in buffer to fetch the next page
|
||||
const last = VCR.buffer[VCR.buffer.length - 1];
|
||||
if (!last) return Promise.resolve(false);
|
||||
var gen = VCR.replayGen;
|
||||
const since = new Date(last.ts + 1).toISOString(); // +1ms to avoid dupe
|
||||
return fetch(`/api/packets?limit=10000&grouped=false&expand=observations&since=${encodeURIComponent(since)}&order=asc`)
|
||||
.then(r => r.json())
|
||||
.then(data => {
|
||||
const pkts = data.packets || [];
|
||||
if (pkts.length === 0) return false;
|
||||
const newEntries = expandToBufferEntries(pkts);
|
||||
VCR.buffer = VCR.buffer.concat(newEntries);
|
||||
return true;
|
||||
return expandToBufferEntriesAsync(pkts).then(function(newEntries) {
|
||||
if (gen !== VCR.replayGen) return false; // stale
|
||||
VCR.buffer = VCR.buffer.concat(newEntries);
|
||||
return true;
|
||||
});
|
||||
})
|
||||
.catch(() => false);
|
||||
}
|
||||
@@ -442,6 +457,7 @@
|
||||
id: pkt.id, hash: pkt.hash,
|
||||
raw: pkt.raw_hex,
|
||||
path_json: pkt.path_json,
|
||||
resolved_path: pkt.resolved_path,
|
||||
_ts: new Date(pkt.timestamp || pkt.created_at).getTime(),
|
||||
decoded: { header: { payloadTypeName: typeName }, payload: raw, path: { hops } },
|
||||
snr: pkt.snr, rssi: pkt.rssi, observer: pkt.observer_name
|
||||
@@ -449,11 +465,53 @@
|
||||
}
|
||||
|
||||
// Expand a DB packet (with optional observations[]) into VCR buffer entries
|
||||
/**
|
||||
* Process packets into buffer entries in chunks to avoid blocking the main thread.
|
||||
* Returns a Promise that resolves with the entries array.
|
||||
* Each chunk processes CHUNK_SIZE packets, then yields to the event loop via setTimeout(0).
|
||||
*/
|
||||
var VCR_CHUNK_SIZE = 200;
|
||||
function expandToBufferEntriesAsync(pkts) {
|
||||
return new Promise(function(resolve) {
|
||||
var entries = [];
|
||||
var i = 0;
|
||||
function processChunk() {
|
||||
var end = Math.min(i + VCR_CHUNK_SIZE, pkts.length);
|
||||
for (; i < end; i++) {
|
||||
var p = pkts[i];
|
||||
if (p.observations && p.observations.length > 0) {
|
||||
for (var j = 0; j < p.observations.length; j++) {
|
||||
var obs = p.observations[j];
|
||||
entries.push({
|
||||
ts: new Date(obs.timestamp || p.timestamp || p.created_at).getTime(),
|
||||
pkt: dbPacketToLive(Object.assign({}, p, obs, { hash: p.hash, raw_hex: p.raw_hex, decoded_json: p.decoded_json }))
|
||||
});
|
||||
}
|
||||
} else {
|
||||
entries.push({
|
||||
ts: new Date(p.timestamp || p.created_at).getTime(),
|
||||
pkt: dbPacketToLive(p)
|
||||
});
|
||||
}
|
||||
}
|
||||
if (i < pkts.length) {
|
||||
setTimeout(processChunk, 0);
|
||||
} else {
|
||||
resolve(entries);
|
||||
}
|
||||
}
|
||||
processChunk();
|
||||
});
|
||||
}
|
||||
|
||||
// Synchronous version kept for small datasets and backward compat (tests)
|
||||
function expandToBufferEntries(pkts) {
|
||||
const entries = [];
|
||||
for (const p of pkts) {
|
||||
var entries = [];
|
||||
for (var k = 0; k < pkts.length; k++) {
|
||||
var p = pkts[k];
|
||||
if (p.observations && p.observations.length > 0) {
|
||||
for (const obs of p.observations) {
|
||||
for (var j = 0; j < p.observations.length; j++) {
|
||||
var obs = p.observations[j];
|
||||
entries.push({
|
||||
ts: new Date(obs.timestamp || p.timestamp || p.created_at).getTime(),
|
||||
pkt: dbPacketToLive(Object.assign({}, p, obs, { hash: p.hash, raw_hex: p.raw_hex, decoded_json: p.decoded_json }))
|
||||
@@ -482,6 +540,8 @@
|
||||
clearTimeout(entry.timer);
|
||||
}
|
||||
propagationBuffer.clear();
|
||||
// Batch-update timeline once on restore instead of per-packet while hidden
|
||||
updateTimeline();
|
||||
}
|
||||
});
|
||||
|
||||
@@ -506,7 +566,6 @@
|
||||
if (VCR.mode === 'LIVE') {
|
||||
// Skip animations when tab is backgrounded — just buffer for VCR timeline
|
||||
if (_tabHidden) {
|
||||
updateTimeline();
|
||||
return;
|
||||
}
|
||||
if (realisticPropagation && pkt.hash) {
|
||||
@@ -703,7 +762,7 @@
|
||||
<button class="feed-hide-btn" id="nodeDetailClose" title="Close">✕</button>
|
||||
<div id="nodeDetailContent"></div>
|
||||
</div>
|
||||
<button class="legend-toggle-btn hidden" id="legendToggleBtn" aria-label="Show legend" title="Show legend">🎨</button>
|
||||
<button class="legend-toggle-btn" id="legendToggleBtn" aria-label="Show legend" title="Show legend">🎨</button>
|
||||
<div class="live-overlay live-legend" id="liveLegend" role="region" aria-label="Map legend">
|
||||
<h3 class="legend-title">PACKET TYPES</h3>
|
||||
<ul class="legend-list">
|
||||
@@ -984,10 +1043,19 @@
|
||||
const legendEl = document.getElementById('liveLegend');
|
||||
const legendToggleBtn = document.getElementById('legendToggleBtn');
|
||||
if (legendToggleBtn && legendEl) {
|
||||
// Restore legend collapsed state from localStorage (#279)
|
||||
try {
|
||||
if (localStorage.getItem('live-legend-hidden') === 'true') {
|
||||
legendEl.classList.add('hidden');
|
||||
legendToggleBtn.setAttribute('aria-label', 'Show legend');
|
||||
legendToggleBtn.textContent = '🎨';
|
||||
}
|
||||
} catch (_) { /* private browsing / storage disabled */ }
|
||||
legendToggleBtn.addEventListener('click', () => {
|
||||
const isVisible = legendEl.classList.toggle('legend-mobile-visible');
|
||||
legendToggleBtn.setAttribute('aria-label', isVisible ? 'Hide legend' : 'Show legend');
|
||||
legendToggleBtn.textContent = isVisible ? '✕' : '🎨';
|
||||
const nowHidden = legendEl.classList.toggle('hidden');
|
||||
legendToggleBtn.setAttribute('aria-label', nowHidden ? 'Show legend' : 'Hide legend');
|
||||
legendToggleBtn.textContent = nowHidden ? '🎨' : '✕';
|
||||
try { localStorage.setItem('live-legend-hidden', String(nowHidden)); } catch (_) { /* ignore */ }
|
||||
});
|
||||
}
|
||||
|
||||
@@ -1286,7 +1354,7 @@
|
||||
html += `<h4 style="font-size:12px;margin:12px 0 6px;color:var(--text-muted);">Recent Packets</h4>
|
||||
<div style="font-size:11px;max-height:200px;overflow-y:auto;">` +
|
||||
recent.slice(0, 10).map(p => `<div style="padding:2px 0;display:flex;justify-content:space-between;">
|
||||
<a href="#/packets/${encodeURIComponent(p.hash || '')}" style="color:var(--accent);text-decoration:none;">${escapeHtml(p.payload_type || '?')}${p.observation_count > 1 ? ' <span class="badge badge-obs" style="font-size:9px">👁 ' + p.observation_count + '</span>' : ''}</a>
|
||||
<a href="#/packets/${encodeURIComponent(p.hash || '')}" style="color:var(--accent);text-decoration:none;">${escapeHtml(p.payload_type || '?')}${transportBadge(p.route_type)}${p.observation_count > 1 ? ' <span class="badge badge-obs" style="font-size:9px">👁 ' + p.observation_count + '</span>' : ''}</a>
|
||||
<span style="color:var(--text-muted)">${formatLiveTimestampHtml(p.timestamp)}</span>
|
||||
</div>`).join('') +
|
||||
'</div>';
|
||||
@@ -1359,9 +1427,29 @@
|
||||
const _el2 = document.getElementById('liveNodeCount'); if (_el2) _el2.textContent = Object.keys(nodeMarkers).length;
|
||||
// Initialize shared HopResolver with loaded nodes
|
||||
if (window.HopResolver) HopResolver.init(list);
|
||||
// Fetch affinity data for hop disambiguation
|
||||
fetchAffinityData();
|
||||
startAffinityRefresh();
|
||||
} catch (e) { console.error('Failed to load nodes:', e); }
|
||||
}
|
||||
|
||||
let _affinityInterval = null;
|
||||
|
||||
async function fetchAffinityData() {
|
||||
try {
|
||||
const resp = await fetch('/api/analytics/neighbor-graph');
|
||||
const graph = await resp.json();
|
||||
if (window.HopResolver && HopResolver.setAffinity) {
|
||||
HopResolver.setAffinity(graph);
|
||||
}
|
||||
} catch (e) { console.warn('Failed to fetch affinity data:', e); }
|
||||
}
|
||||
|
||||
function startAffinityRefresh() {
|
||||
if (_affinityInterval) clearInterval(_affinityInterval);
|
||||
_affinityInterval = setInterval(fetchAffinityData, 60000);
|
||||
}
|
||||
|
||||
function clearNodeMarkers() {
|
||||
if (nodesLayer) nodesLayer.clearLayers();
|
||||
if (animLayer) animLayer.clearLayers();
|
||||
@@ -1471,7 +1559,7 @@
|
||||
item.innerHTML = `
|
||||
<span class="feed-icon" style="color:${color}">${icon}</span>
|
||||
<span class="feed-type" style="color:${color}">${typeName}</span>
|
||||
${hopStr}${obsBadge}
|
||||
${transportBadge(pkt.route_type)}${hopStr}${obsBadge}
|
||||
<span class="feed-text">${escapeHtml(preview)}</span>
|
||||
<span class="feed-time">${formatLiveTimestampHtml(group.latestTs || Date.now())}</span>
|
||||
`;
|
||||
@@ -1573,6 +1661,7 @@
|
||||
}
|
||||
delete nodeMarkers[key];
|
||||
delete nodeData[key];
|
||||
delete nodeActivity[key];
|
||||
pruned = true;
|
||||
}
|
||||
} else if (marker && marker._staleDimmed) {
|
||||
@@ -1588,15 +1677,21 @@
|
||||
if (_el2) _el2.textContent = Object.keys(nodeMarkers).length;
|
||||
if (window.HopResolver) HopResolver.init(Object.values(nodeData));
|
||||
}
|
||||
// Prune orphaned nodeActivity entries (nodes removed above or never tracked)
|
||||
for (var aKey in nodeActivity) {
|
||||
if (!(aKey in nodeData)) delete nodeActivity[aKey];
|
||||
}
|
||||
}
|
||||
|
||||
// Expose for testing
|
||||
window._livePruneStaleNodes = pruneStaleNodes;
|
||||
window._liveNodeMarkers = function() { return nodeMarkers; };
|
||||
window._liveNodeData = function() { return nodeData; };
|
||||
window._liveNodeActivity = function() { return nodeActivity; };
|
||||
window._vcrFormatTime = vcrFormatTime;
|
||||
window._liveDbPacketToLive = dbPacketToLive;
|
||||
window._liveExpandToBufferEntries = expandToBufferEntries;
|
||||
window._liveExpandToBufferEntriesAsync = expandToBufferEntriesAsync;
|
||||
window._liveSEG_MAP = SEG_MAP;
|
||||
window._liveBufferPacket = bufferPacket;
|
||||
window._liveVCR = function() { return VCR; };
|
||||
@@ -1612,20 +1707,13 @@
|
||||
|
||||
async function replayRecent() {
|
||||
try {
|
||||
const resp = await fetch('/api/packets?limit=8&groupByHash=true');
|
||||
// Single bulk fetch with expand=observations — no N+1 calls
|
||||
const resp = await fetch('/api/packets?limit=8&expand=observations');
|
||||
const data = await resp.json();
|
||||
const groups = (data.packets || []).reverse();
|
||||
|
||||
// Fetch all observations first, then stagger rendering
|
||||
const allGroups = [];
|
||||
for (let i = 0; i < groups.length; i++) {
|
||||
const group = groups[i];
|
||||
let observations = [];
|
||||
try {
|
||||
const detail = await fetch('/api/packets/' + encodeURIComponent(group.hash));
|
||||
const detailData = await detail.json();
|
||||
observations = detailData.observations || [];
|
||||
} catch {}
|
||||
const allGroups = groups.map((group) => {
|
||||
const observations = group.observations || [];
|
||||
|
||||
const livePackets = observations.map(obs => {
|
||||
const livePkt = dbPacketToLive(Object.assign({}, group, obs, {
|
||||
@@ -1644,8 +1732,8 @@
|
||||
}
|
||||
|
||||
livePackets.forEach(lp => VCR.buffer.push({ ts: lp._ts, pkt: lp }));
|
||||
allGroups.push(livePackets);
|
||||
}
|
||||
return livePackets;
|
||||
});
|
||||
|
||||
// Render with real timing gaps between packets
|
||||
// Sort by earliest timestamp
|
||||
@@ -1777,7 +1865,7 @@
|
||||
var pathKey = hops.join(',');
|
||||
if (seenPathKeys.has(pathKey)) continue;
|
||||
seenPathKeys.add(pathKey);
|
||||
var hopPositions = resolveHopPositions(hops, qp);
|
||||
var hopPositions = resolveHopPositions(hops, qp, window.getResolvedPath ? getResolvedPath(qpkt) : null);
|
||||
if (hopPositions.length >= 2) {
|
||||
allPaths.push({ hopPositions: hopPositions, raw: qpkt.raw || first.raw });
|
||||
} else if (hopPositions.length === 1) {
|
||||
@@ -1814,15 +1902,29 @@
|
||||
}
|
||||
}
|
||||
|
||||
function resolveHopPositions(hops, payload) {
|
||||
// Delegate to shared HopResolver (from hop-resolver.js) instead of reimplementing
|
||||
const originLat = payload.lat != null && !(payload.lat === 0 && payload.lon === 0) ? payload.lat : null;
|
||||
const originLon = payload.lon != null && !(payload.lon === 0 && payload.lon === 0) ? payload.lon : null;
|
||||
function resolveHopPositions(hops, payload, resolvedPath) {
|
||||
// Prefer server-side resolved_path when available
|
||||
var resolvedMap;
|
||||
if (resolvedPath && resolvedPath.length === hops.length && window.HopResolver && HopResolver.ready()) {
|
||||
resolvedMap = HopResolver.resolveFromServer(hops, resolvedPath);
|
||||
// Fill in any null entries from client-side fallback, preserving sender GPS context
|
||||
var nullHops = hops.filter(function(h, i) { return !resolvedPath[i] && !resolvedMap[h]; });
|
||||
if (nullHops.length) {
|
||||
const originLat = payload.lat != null && !(payload.lat === 0 && payload.lon === 0) ? payload.lat : null;
|
||||
const originLon = payload.lon != null && !(payload.lon === 0 && payload.lon === 0) ? payload.lon : null;
|
||||
var fallback = HopResolver.resolve(nullHops, originLat, originLon, null, null, null);
|
||||
for (var k in fallback) resolvedMap[k] = fallback[k];
|
||||
}
|
||||
} else {
|
||||
// Delegate to shared HopResolver (from hop-resolver.js) instead of reimplementing
|
||||
const originLat = payload.lat != null && !(payload.lat === 0 && payload.lon === 0) ? payload.lat : null;
|
||||
const originLon = payload.lon != null && !(payload.lon === 0 && payload.lon === 0) ? payload.lon : null;
|
||||
|
||||
// Use HopResolver if available and initialized, otherwise fall back to simple lookup
|
||||
const resolvedMap = (window.HopResolver && HopResolver.ready())
|
||||
? HopResolver.resolve(hops, originLat, originLon, null, null, null)
|
||||
: {};
|
||||
// Use HopResolver if available and initialized, otherwise fall back to simple lookup
|
||||
resolvedMap = (window.HopResolver && HopResolver.ready())
|
||||
? HopResolver.resolve(hops, originLat, originLon, null, null, null)
|
||||
: {};
|
||||
}
|
||||
|
||||
// Convert HopResolver's map format to the array format live.js expects: {key, pos, name, known}
|
||||
const raw = hops.map(hop => {
|
||||
@@ -2393,6 +2495,15 @@
|
||||
if (heatLayer) { map.removeLayer(heatLayer); heatLayer = null; }
|
||||
}
|
||||
|
||||
/** Extract channel row style from a packet (shared by feed item builders). */
|
||||
function _getChannelStyle(pkt) {
|
||||
if (!window.ChannelColors) return '';
|
||||
var d = pkt.decoded || {};
|
||||
var h = d.header || {};
|
||||
var p = d.payload || {};
|
||||
return window.ChannelColors.getRowStyle(h.payloadTypeName || '', p.channelName || null);
|
||||
}
|
||||
|
||||
function addFeedItemDOM(icon, typeName, payload, hops, color, pkt, feed) {
|
||||
const text = payload.text || payload.name || '';
|
||||
const preview = text ? ' ' + (text.length > 35 ? text.slice(0, 35) + '…' : text) : '';
|
||||
@@ -2403,10 +2514,13 @@
|
||||
item.setAttribute('tabindex', '0');
|
||||
item.setAttribute('role', 'button');
|
||||
item.style.cursor = 'pointer';
|
||||
// Channel color highlighting for GRP_TXT packets (#271)
|
||||
var _cs = _getChannelStyle(pkt);
|
||||
if (_cs) item.style.cssText += _cs;
|
||||
item.innerHTML = `
|
||||
<span class="feed-icon" style="color:${color}">${icon}</span>
|
||||
<span class="feed-type" style="color:${color}">${typeName}</span>
|
||||
${hopStr}${obsBadge}
|
||||
${transportBadge(pkt.route_type)}${hopStr}${obsBadge}
|
||||
<span class="feed-text">${escapeHtml(preview)}</span>
|
||||
<span class="feed-time">${formatLiveTimestampHtml(pkt._ts || Date.now())}</span>
|
||||
`;
|
||||
@@ -2471,10 +2585,13 @@
|
||||
item.setAttribute('role', 'button');
|
||||
if (hash) item.setAttribute('data-hash', hash);
|
||||
item.style.cursor = 'pointer';
|
||||
// Channel color highlighting for GRP_TXT packets (#271)
|
||||
var _chanStyle = _getChannelStyle(pkt);
|
||||
if (_chanStyle) item.style.cssText += _chanStyle;
|
||||
item.innerHTML = `
|
||||
<span class="feed-icon" style="color:${color}">${icon}</span>
|
||||
<span class="feed-type" style="color:${color}">${typeName}</span>
|
||||
${hopStr}${obsBadge}
|
||||
${transportBadge(pkt.route_type)}${hopStr}${obsBadge}
|
||||
<span class="feed-text">${escapeHtml(preview)}</span>
|
||||
<span class="feed-time">${formatLiveTimestampHtml(pkt._ts || Date.now())}</span>
|
||||
`;
|
||||
@@ -2552,6 +2669,7 @@
|
||||
if (_lcdClockInterval) { clearInterval(_lcdClockInterval); _lcdClockInterval = null; }
|
||||
if (_rateCounterInterval) { clearInterval(_rateCounterInterval); _rateCounterInterval = null; }
|
||||
if (_pruneInterval) { clearInterval(_pruneInterval); _pruneInterval = null; }
|
||||
if (_affinityInterval) { clearInterval(_affinityInterval); _affinityInterval = null; }
|
||||
if (ws) { ws.onclose = null; ws.close(); ws = null; }
|
||||
if (map) { map.remove(); map = null; }
|
||||
if (_onResize) {
|
||||
@@ -2584,7 +2702,7 @@
|
||||
packetCount = 0; activeAnims = 0;
|
||||
nodeActivity = {}; pktTimestamps = [];
|
||||
feedDedup.clear();
|
||||
VCR.buffer = []; VCR.playhead = -1; VCR.mode = 'LIVE'; VCR.missedCount = 0; VCR.speed = 1;
|
||||
VCR.buffer = []; VCR.playhead = -1; VCR.mode = 'LIVE'; VCR.missedCount = 0; VCR.speed = 1; VCR.replayGen = 0;
|
||||
}
|
||||
|
||||
let _themeRefreshHandler = null;
|
||||
|
||||
+100
-14
@@ -9,7 +9,7 @@
|
||||
let nodes = [];
|
||||
let targetNodeKey = null;
|
||||
let observers = [];
|
||||
let filters = { repeater: true, companion: true, room: true, sensor: true, observer: true, lastHeard: '30d', neighbors: false, clusters: false, hashLabels: localStorage.getItem('meshcore-map-hash-labels') !== 'false', statusFilter: localStorage.getItem('meshcore-map-status-filter') || 'all' };
|
||||
let filters = { repeater: true, companion: true, room: true, sensor: true, observer: true, lastHeard: '30d', neighbors: false, clusters: false, hashLabels: localStorage.getItem('meshcore-map-hash-labels') !== 'false', statusFilter: localStorage.getItem('meshcore-map-status-filter') || 'all', byteSize: localStorage.getItem('meshcore-map-byte-filter') || 'all' };
|
||||
let selectedReferenceNode = null; // pubkey of the reference node for neighbor filtering
|
||||
let neighborPubkeys = null; // Set of pubkeys that are direct neighbors of selected node
|
||||
let wsHandler = null;
|
||||
@@ -94,6 +94,15 @@
|
||||
<legend class="mc-label">Node Types</legend>
|
||||
<div id="mcRoleChecks"></div>
|
||||
</fieldset>
|
||||
<fieldset class="mc-section">
|
||||
<legend class="mc-label">Byte Size</legend>
|
||||
<div class="filter-group" id="mcByteFilter">
|
||||
<button class="btn ${filters.byteSize==='all'?'active':''}" data-byte="all">All</button>
|
||||
<button class="btn ${filters.byteSize==='1'?'active':''}" data-byte="1">1-byte</button>
|
||||
<button class="btn ${filters.byteSize==='2'?'active':''}" data-byte="2">2-byte</button>
|
||||
<button class="btn ${filters.byteSize==='3'?'active':''}" data-byte="3">3-byte</button>
|
||||
</div>
|
||||
</fieldset>
|
||||
<fieldset class="mc-section">
|
||||
<legend class="mc-label">Display</legend>
|
||||
<label for="mcClusters"><input type="checkbox" id="mcClusters"> Show clusters</label>
|
||||
@@ -181,11 +190,17 @@
|
||||
});
|
||||
|
||||
map.on('zoomend', () => {
|
||||
if (!_renderingMarkers) renderMarkers();
|
||||
clearTimeout(_zoomResizeTimer);
|
||||
_zoomResizeTimer = setTimeout(() => {
|
||||
if (!_renderingMarkers) _repositionMarkers();
|
||||
}, 150);
|
||||
});
|
||||
|
||||
map.on('resize', () => {
|
||||
if (!_renderingMarkers) renderMarkers();
|
||||
clearTimeout(_zoomResizeTimer);
|
||||
_zoomResizeTimer = setTimeout(() => {
|
||||
if (!_renderingMarkers) _repositionMarkers();
|
||||
}, 150);
|
||||
});
|
||||
|
||||
markerLayer = L.layerGroup().addTo(map);
|
||||
@@ -262,6 +277,16 @@
|
||||
});
|
||||
});
|
||||
|
||||
// Byte size filter buttons
|
||||
document.querySelectorAll('#mcByteFilter .btn').forEach(btn => {
|
||||
btn.addEventListener('click', () => {
|
||||
filters.byteSize = btn.dataset.byte;
|
||||
localStorage.setItem('meshcore-map-byte-filter', filters.byteSize);
|
||||
document.querySelectorAll('#mcByteFilter .btn').forEach(b => b.classList.toggle('active', b.dataset.byte === filters.byteSize));
|
||||
renderMarkers();
|
||||
});
|
||||
});
|
||||
|
||||
// Geo filter overlay
|
||||
(async function () {
|
||||
try {
|
||||
@@ -612,6 +637,8 @@
|
||||
|
||||
var _renderingMarkers = false;
|
||||
var _lastDeconflictZoom = null;
|
||||
var _currentMarkerData = []; // stored marker data for zoom-only repositioning
|
||||
var _zoomResizeTimer = null;
|
||||
|
||||
function deconflictLabels(markers, mapRef) {
|
||||
const placed = [];
|
||||
@@ -662,6 +689,62 @@
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Create, update, or remove the offset indicator (dashed line + dot at true GPS position)
|
||||
* for a deconflicted marker. Shared by _renderMarkersInner and _repositionMarkers.
|
||||
* @param {Object} m - marker data object with latLng, adjustedLatLng, offset, _leafletLine, _leafletDot
|
||||
* @param {L.LayerGroup} layer - layer group to add/remove indicators from
|
||||
*/
|
||||
function _updateOffsetIndicator(m, layer) {
|
||||
var pos = m.adjustedLatLng || m.latLng;
|
||||
var redColor = getComputedStyle(document.documentElement).getPropertyValue('--status-red').trim() || '#ef4444';
|
||||
|
||||
if (m.offset > 10) {
|
||||
// Line from true position to adjusted position
|
||||
if (m._leafletLine) {
|
||||
m._leafletLine.setLatLngs([m.latLng, pos]);
|
||||
} else {
|
||||
m._leafletLine = L.polyline([m.latLng, pos], {
|
||||
color: redColor, weight: 2, dashArray: '6,4', opacity: 0.85
|
||||
});
|
||||
layer.addLayer(m._leafletLine);
|
||||
}
|
||||
// Dot at true GPS position
|
||||
if (!m._leafletDot) {
|
||||
m._leafletDot = L.circleMarker(m.latLng, {
|
||||
radius: 3, fillColor: redColor, fillOpacity: 0.9, stroke: true, color: '#fff', weight: 1
|
||||
});
|
||||
layer.addLayer(m._leafletDot);
|
||||
}
|
||||
} else {
|
||||
// No offset — remove indicator if it existed
|
||||
if (m._leafletLine) { layer.removeLayer(m._leafletLine); m._leafletLine = null; }
|
||||
if (m._leafletDot) { layer.removeLayer(m._leafletDot); m._leafletDot = null; }
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Reposition existing markers by re-running deconfliction at the current zoom.
|
||||
* Avoids clearing and rebuilding all markers — eliminates flicker on zoom/resize.
|
||||
*/
|
||||
function _repositionMarkers() {
|
||||
if (!map || _currentMarkerData.length === 0) return;
|
||||
map.invalidateSize({ animate: false });
|
||||
|
||||
// Re-run deconfliction with current zoom pixel coordinates
|
||||
deconflictLabels(_currentMarkerData, map);
|
||||
|
||||
for (var i = 0; i < _currentMarkerData.length; i++) {
|
||||
var m = _currentMarkerData[i];
|
||||
var pos = m.adjustedLatLng || m.latLng;
|
||||
|
||||
// Update marker position
|
||||
if (m._leafletMarker) m._leafletMarker.setLatLng(pos);
|
||||
|
||||
_updateOffsetIndicator(m, markerLayer);
|
||||
}
|
||||
}
|
||||
|
||||
function renderMarkers() {
|
||||
if (_renderingMarkers) return;
|
||||
_renderingMarkers = true;
|
||||
@@ -670,10 +753,16 @@
|
||||
|
||||
function _renderMarkersInner() {
|
||||
markerLayer.clearLayers();
|
||||
_currentMarkerData = [];
|
||||
|
||||
const filtered = nodes.filter(n => {
|
||||
if (!n.lat || !n.lon) return false;
|
||||
if (!filters[n.role || 'companion']) return false;
|
||||
// Byte size filter (applies only to repeaters)
|
||||
if (filters.byteSize !== 'all' && (n.role || 'companion') === 'repeater') {
|
||||
const hs = n.hash_size || 1;
|
||||
if (String(hs) !== filters.byteSize) return false;
|
||||
}
|
||||
// Status filter
|
||||
if (filters.statusFilter !== 'all') {
|
||||
const role = (n.role || 'companion').toLowerCase();
|
||||
@@ -719,24 +808,20 @@
|
||||
deconflictLabels(allMarkers, map);
|
||||
}
|
||||
|
||||
// Store marker data for zoom/resize repositioning (avoids full rebuild)
|
||||
_currentMarkerData = allMarkers;
|
||||
|
||||
for (const m of allMarkers) {
|
||||
const pos = m.adjustedLatLng || m.latLng;
|
||||
const marker = L.marker(pos, { icon: m.icon, alt: m.alt });
|
||||
marker._nodeKey = m.node.public_key || m.node.id || null;
|
||||
marker.bindPopup(m.popupFn(), { maxWidth: 280 });
|
||||
markerLayer.addLayer(marker);
|
||||
m._leafletMarker = marker;
|
||||
m._leafletLine = null;
|
||||
m._leafletDot = null;
|
||||
|
||||
if (m.offset > 10) {
|
||||
const line = L.polyline([m.latLng, pos], {
|
||||
color: getComputedStyle(document.documentElement).getPropertyValue('--status-red').trim() || '#ef4444', weight: 2, dashArray: '6,4', opacity: 0.85
|
||||
});
|
||||
markerLayer.addLayer(line);
|
||||
// Small dot at true GPS position
|
||||
const dot = L.circleMarker(m.latLng, {
|
||||
radius: 3, fillColor: getComputedStyle(document.documentElement).getPropertyValue('--status-red').trim() || '#ef4444', fillOpacity: 0.9, stroke: true, color: '#fff', weight: 1
|
||||
});
|
||||
markerLayer.addLayer(dot);
|
||||
}
|
||||
_updateOffsetIndicator(m, markerLayer);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -870,6 +955,7 @@
|
||||
map = null;
|
||||
}
|
||||
markerLayer = null;
|
||||
_currentMarkerData = [];
|
||||
routeLayer = null;
|
||||
if (heatLayer) { heatLayer = null; }
|
||||
geoFilterLayer = null;
|
||||
|
||||
@@ -51,7 +51,7 @@
|
||||
const nodeName = escapeHtml(n.name || n.public_key.slice(0, 12));
|
||||
|
||||
container.innerHTML = `
|
||||
<div style="max-width:1000px;margin:0 auto;padding:12px 16px;height:100%;overflow-y:auto">
|
||||
<div style="max-width:1000px;margin:0 auto;padding:12px 16px">
|
||||
<div style="margin-bottom:12px">
|
||||
<a href="#/nodes/${encodeURIComponent(n.public_key)}" style="color:var(--accent);text-decoration:none;font-size:12px">← Back to ${nodeName}</a>
|
||||
<h2 style="margin:4px 0 2px;font-size:18px">📊 ${nodeName} — Analytics</h2>
|
||||
|
||||
+17
-9
@@ -372,13 +372,25 @@
|
||||
}, 5000);
|
||||
}
|
||||
|
||||
/**
|
||||
* Fetch node detail + health data in parallel.
|
||||
* Both selectNode() and loadFullNode() need the same data —
|
||||
* this shared helper avoids duplicating the fetch logic (fixes #391).
|
||||
*/
|
||||
async function fetchNodeDetail(pubkey) {
|
||||
const [nodeData, healthData] = await Promise.all([
|
||||
api('/nodes/' + encodeURIComponent(pubkey), { ttl: CLIENT_TTL.nodeDetail }),
|
||||
api('/nodes/' + encodeURIComponent(pubkey) + '/health', { ttl: CLIENT_TTL.nodeDetail }).catch(() => null)
|
||||
]);
|
||||
nodeData.healthData = healthData;
|
||||
return nodeData;
|
||||
}
|
||||
|
||||
async function loadFullNode(pubkey) {
|
||||
const body = document.getElementById('nodeFullBody');
|
||||
try {
|
||||
const [nodeData, healthData] = await Promise.all([
|
||||
api('/nodes/' + encodeURIComponent(pubkey), { ttl: CLIENT_TTL.nodeDetail }),
|
||||
api('/nodes/' + encodeURIComponent(pubkey) + '/health', { ttl: CLIENT_TTL.nodeDetail }).catch(() => null)
|
||||
]);
|
||||
const nodeData = await fetchNodeDetail(pubkey);
|
||||
const healthData = nodeData.healthData;
|
||||
const n = nodeData.node;
|
||||
const adverts = (nodeData.recentAdverts || []).sort((a, b) => new Date(b.timestamp) - new Date(a.timestamp));
|
||||
const title = document.querySelector('.node-full-title');
|
||||
@@ -963,11 +975,7 @@
|
||||
panel.innerHTML = '<div class="text-center text-muted" style="padding:40px">Loading…</div>';
|
||||
|
||||
try {
|
||||
const [data, healthData] = await Promise.all([
|
||||
api('/nodes/' + encodeURIComponent(pubkey), { ttl: CLIENT_TTL.nodeDetail }),
|
||||
api('/nodes/' + encodeURIComponent(pubkey) + '/health', { ttl: CLIENT_TTL.nodeDetail }).catch(() => null)
|
||||
]);
|
||||
data.healthData = healthData;
|
||||
const data = await fetchNodeDetail(pubkey);
|
||||
renderDetail(panel, data);
|
||||
} catch (e) {
|
||||
panel.innerHTML = `<div class="text-muted">Error: ${e.message}</div>`;
|
||||
|
||||
@@ -37,7 +37,7 @@
|
||||
}
|
||||
|
||||
app.innerHTML = `
|
||||
<div class="observer-detail-page" style="overflow-y:auto;height:calc(100vh - 56px);padding:16px">
|
||||
<div class="observer-detail-page" style="padding:16px">
|
||||
<div class="page-header" style="display:flex;align-items:center;gap:12px;margin-bottom:16px">
|
||||
<a href="#/observers" class="btn-icon" title="Back to Observers" aria-label="Back">←</a>
|
||||
<h2 style="margin:0" id="obsTitle">Observer Detail</h2>
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 1.1 MiB After Width: | Height: | Size: 229 KiB |
@@ -10,7 +10,7 @@
|
||||
*/
|
||||
|
||||
window.getParsedPath = function getParsedPath(p) {
|
||||
if (p._parsedPath !== undefined) return p._parsedPath;
|
||||
if (p._parsedPath !== undefined) return p._parsedPath || [];
|
||||
var raw = p.path_json;
|
||||
if (typeof raw !== 'string') {
|
||||
p._parsedPath = Array.isArray(raw) ? raw : [];
|
||||
@@ -28,11 +28,29 @@ window.getParsedPath = function getParsedPath(p) {
|
||||
window.clearParsedCache = function clearParsedCache(p) {
|
||||
delete p._parsedPath;
|
||||
delete p._parsedDecoded;
|
||||
delete p._parsedResolvedPath;
|
||||
return p;
|
||||
};
|
||||
|
||||
/**
|
||||
* Parse resolved_path (server-side resolved full pubkeys).
|
||||
* Returns array of pubkey strings (or null entries) if present, or null if absent.
|
||||
* Cached as _parsedResolvedPath on the packet object.
|
||||
*/
|
||||
window.getResolvedPath = function getResolvedPath(p) {
|
||||
if (p._parsedResolvedPath !== undefined) return p._parsedResolvedPath;
|
||||
var raw = p.resolved_path;
|
||||
if (!raw) { p._parsedResolvedPath = null; return null; }
|
||||
if (typeof raw !== 'string') {
|
||||
p._parsedResolvedPath = Array.isArray(raw) ? raw : null;
|
||||
return p._parsedResolvedPath;
|
||||
}
|
||||
try { p._parsedResolvedPath = JSON.parse(raw) || null; } catch (e) { p._parsedResolvedPath = null; }
|
||||
return p._parsedResolvedPath;
|
||||
};
|
||||
|
||||
window.getParsedDecoded = function getParsedDecoded(p) {
|
||||
if (p._parsedDecoded !== undefined) return p._parsedDecoded;
|
||||
if (p._parsedDecoded !== undefined) return p._parsedDecoded || {};
|
||||
var raw = p.decoded_json;
|
||||
if (typeof raw !== 'string') {
|
||||
p._parsedDecoded = (raw && typeof raw === 'object') ? raw : {};
|
||||
|
||||
+235
-79
@@ -40,6 +40,21 @@
|
||||
clearTimeout(_renderTimer);
|
||||
_renderTimer = setTimeout(() => renderTableRows(), 200);
|
||||
}
|
||||
|
||||
// Coalesce WS-triggered renders into one per animation frame (#396).
|
||||
// Multiple WS batches arriving within the same frame only trigger a single
|
||||
// renderTableRows() call on the next rAF, preventing rapid full rebuilds.
|
||||
function scheduleWSRender() {
|
||||
_wsRenderDirty = true;
|
||||
if (_wsRafId) return; // already scheduled
|
||||
_wsRafId = requestAnimationFrame(function () {
|
||||
_wsRafId = null;
|
||||
if (_wsRenderDirty) {
|
||||
_wsRenderDirty = false;
|
||||
renderTableRows();
|
||||
}
|
||||
});
|
||||
}
|
||||
const PANEL_WIDTH_KEY = 'meshcore-panel-width';
|
||||
const PANEL_CLOSE_HTML = '<button class="panel-close-btn" title="Close detail pane (Esc)">✕</button>';
|
||||
|
||||
@@ -53,11 +68,14 @@
|
||||
let _displayPackets = []; // filtered packets for current view
|
||||
let _displayGrouped = false; // whether _displayPackets is in grouped mode
|
||||
let _rowCounts = []; // per-entry DOM row counts (1 for flat, 1+children for expanded groups)
|
||||
let _rowCountsDirty = false; // set when _rowCounts may be stale (e.g. WS added children) (#410)
|
||||
let _cumulativeOffsetsCache = null; // cached cumulative offsets, invalidated on _rowCounts change
|
||||
let _lastVisibleStart = -1; // last rendered start index (for dirty checking)
|
||||
let _lastVisibleEnd = -1; // last rendered end index (for dirty checking)
|
||||
let _vsScrollHandler = null; // scroll listener reference
|
||||
let _wsRenderTimer = null; // debounce timer for WS-triggered renders
|
||||
let _wsRafId = null; // rAF id for coalescing WS-triggered renders (#396)
|
||||
let _wsRenderDirty = false; // dirty flag for rAF render coalescing (#396)
|
||||
let _observerFilterSet = null; // cached Set from filters.observer, hoisted above loops (#427)
|
||||
|
||||
function closeDetailPanel() {
|
||||
@@ -170,6 +188,29 @@
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Pre-populate hopNameCache from server-side resolved_path on packets.
|
||||
* Packets with resolved_path skip client-side HopResolver entirely.
|
||||
* Must call ensureHopResolver() first so nodesList is available for name lookup.
|
||||
*/
|
||||
async function cacheResolvedPaths(packets) {
|
||||
if (!packets || !packets.length) return;
|
||||
let needsInit = false;
|
||||
for (const p of packets) {
|
||||
const rp = getResolvedPath(p);
|
||||
if (rp) { needsInit = true; break; }
|
||||
}
|
||||
if (!needsInit) return;
|
||||
await ensureHopResolver();
|
||||
for (const p of packets) {
|
||||
const rp = getResolvedPath(p);
|
||||
if (!rp) continue;
|
||||
const hops = getParsedPath(p);
|
||||
const resolved = HopResolver.resolveFromServer(hops, rp);
|
||||
Object.assign(hopNameCache, resolved);
|
||||
}
|
||||
}
|
||||
|
||||
function renderHop(h, observerId) {
|
||||
// Use per-packet cache key if observer context available (ambiguous hops differ by region)
|
||||
const cacheKey = observerId ? h + ':' + observerId : h;
|
||||
@@ -268,7 +309,7 @@
|
||||
const obs = data.observations.find(o => String(o.id) === String(obsTarget));
|
||||
if (obs) {
|
||||
expandedHashes.add(h);
|
||||
const obsPacket = {...data.packet, observer_id: obs.observer_id, observer_name: obs.observer_name, snr: obs.snr, rssi: obs.rssi, path_json: obs.path_json, timestamp: obs.timestamp, first_seen: obs.timestamp};
|
||||
const obsPacket = {...data.packet, observer_id: obs.observer_id, observer_name: obs.observer_name, snr: obs.snr, rssi: obs.rssi, path_json: obs.path_json, resolved_path: obs.resolved_path, timestamp: obs.timestamp, first_seen: obs.timestamp};
|
||||
clearParsedCache(obsPacket);
|
||||
selectPacket(obs.id, h, {packet: obsPacket, breakdown: data.breakdown, observations: data.observations}, obs.id);
|
||||
} else {
|
||||
@@ -357,7 +398,7 @@
|
||||
if (pktTime && pktTime < cutoff) return false;
|
||||
}
|
||||
if (filters.type) { const types = filters.type.split(',').map(Number); if (!types.includes(p.payload_type)) return false; }
|
||||
if (filters.observer) { const obsSet = new Set(filters.observer.split(',')); if (!obsSet.has(p.observer_id)) return false; }
|
||||
if (filters.observer) { const obsSet = new Set(filters.observer.split(',')); if (!obsSet.has(p.observer_id) && !(p._children && p._children.some(c => obsSet.has(String(c.observer_id))))) return false; }
|
||||
if (filters.hash && p.hash !== filters.hash) return false;
|
||||
if (RegionFilter.getRegionParam()) {
|
||||
const selectedRegions = RegionFilter.getRegionParam().split(',');
|
||||
@@ -370,9 +411,16 @@
|
||||
if (!filtered.length) return;
|
||||
|
||||
// Resolve any new hops, then update and re-render
|
||||
// Pre-populate from server-side resolved_path, then fall back for remaining
|
||||
const newHops = new Set();
|
||||
for (const p of filtered) {
|
||||
try { getParsedPath(p).forEach(h => { if (!(h in hopNameCache)) newHops.add(h); }); } catch {}
|
||||
const rp = getResolvedPath(p);
|
||||
const hops = getParsedPath(p);
|
||||
if (rp && rp.length === hops.length && window.HopResolver && HopResolver.ready()) {
|
||||
const resolved = HopResolver.resolveFromServer(hops, rp);
|
||||
Object.assign(hopNameCache, resolved);
|
||||
}
|
||||
try { hops.forEach(h => { if (!(h in hopNameCache)) newHops.add(h); }); } catch {}
|
||||
}
|
||||
(newHops.size ? resolveHops([...newHops]) : Promise.resolve()).then(() => {
|
||||
if (groupByHash) {
|
||||
@@ -396,6 +444,9 @@
|
||||
existing._children.unshift(p);
|
||||
if (existing._children.length > 200) existing._children.length = 200;
|
||||
sortGroupChildren(existing);
|
||||
// Invalidate row counts — child count changed, so virtual scroll
|
||||
// heights are stale until next renderTableRows() (#410)
|
||||
_invalidateRowCounts();
|
||||
}
|
||||
} else {
|
||||
// New group
|
||||
@@ -427,9 +478,8 @@
|
||||
if (packets.length > PACKET_LIMIT) packets.length = PACKET_LIMIT;
|
||||
}
|
||||
totalCount += filtered.length;
|
||||
// Debounce WS-triggered renders to avoid rapid full rebuilds
|
||||
clearTimeout(_wsRenderTimer);
|
||||
_wsRenderTimer = setTimeout(function () { renderTableRows(); }, 200);
|
||||
// Coalesce WS-triggered renders via rAF (#396)
|
||||
scheduleWSRender();
|
||||
});
|
||||
});
|
||||
}
|
||||
@@ -440,8 +490,11 @@
|
||||
wsHandler = null;
|
||||
detachVScrollListener();
|
||||
clearTimeout(_wsRenderTimer);
|
||||
if (_wsRafId) { cancelAnimationFrame(_wsRafId); _wsRafId = null; }
|
||||
_wsRenderDirty = false;
|
||||
_displayPackets = [];
|
||||
_rowCounts = [];
|
||||
_rowCountsDirty = false;
|
||||
_cumulativeOffsetsCache = null;
|
||||
_observerFilterSet = null;
|
||||
_lastVisibleStart = -1;
|
||||
@@ -488,7 +541,12 @@
|
||||
if (regionParam) params.set('region', regionParam);
|
||||
if (filters.hash) params.set('hash', filters.hash);
|
||||
if (filters.node) params.set('node', filters.node);
|
||||
params.set('groupByHash', 'true'); // always fetch grouped
|
||||
if (filters.observer) params.set('observer', filters.observer);
|
||||
if (groupByHash) {
|
||||
params.set('groupByHash', 'true');
|
||||
} else {
|
||||
params.set('expand', 'observations');
|
||||
}
|
||||
|
||||
const data = await api('/packets?' + params.toString());
|
||||
packets = data.packets || [];
|
||||
@@ -496,20 +554,14 @@
|
||||
for (const p of packets) { if (p.hash) hashIndex.set(p.hash, p); }
|
||||
totalCount = data.total || packets.length;
|
||||
|
||||
// When ungrouped, fetch observations for all multi-obs packets and flatten
|
||||
// When ungrouped, flatten observations inline (single API call, no N+1)
|
||||
if (!groupByHash) {
|
||||
const multiObs = packets.filter(p => (p.observation_count || p.count || 1) > 1);
|
||||
await Promise.all(multiObs.map(async (p) => {
|
||||
try {
|
||||
const d = await api(`/packets/${p.hash}`);
|
||||
if (d?.observations) p._children = d.observations.map(o => clearParsedCache({...d.packet, ...o, _isObservation: true}));
|
||||
} catch {}
|
||||
}));
|
||||
// Flatten: replace grouped packets with individual observations
|
||||
const flat = [];
|
||||
for (const p of packets) {
|
||||
if (p._children && p._children.length > 1) {
|
||||
for (const c of p._children) flat.push(c);
|
||||
if (p.observations && p.observations.length > 1) {
|
||||
for (const o of p.observations) {
|
||||
flat.push(clearParsedCache({...p, ...o, _isObservation: true, observations: undefined}));
|
||||
}
|
||||
} else {
|
||||
flat.push(p);
|
||||
}
|
||||
@@ -518,7 +570,10 @@
|
||||
totalCount = flat.length;
|
||||
}
|
||||
|
||||
// Pre-resolve all path hops to node names
|
||||
// Pre-resolve from server-side resolved_path (preferred, no client-side disambiguation needed)
|
||||
await cacheResolvedPaths(packets);
|
||||
|
||||
// Pre-resolve all path hops to node names (fallback for packets without resolved_path)
|
||||
const allHops = new Set();
|
||||
for (const p of packets) {
|
||||
try { getParsedPath(p).forEach(h => allHops.add(h)); } catch {}
|
||||
@@ -541,19 +596,22 @@
|
||||
// Ambiguous hops are already resolved by HopResolver client-side
|
||||
// No need for per-observer server API calls
|
||||
|
||||
// Restore expanded group children
|
||||
// Restore expanded group children (parallel fetch, Map lookup)
|
||||
if (groupByHash && expandedHashes.size > 0) {
|
||||
for (const hash of expandedHashes) {
|
||||
const group = packets.find(p => p.hash === hash);
|
||||
if (group) {
|
||||
try {
|
||||
const childData = await api(`/packets?hash=${hash}&limit=20`);
|
||||
group._children = childData.packets || [];
|
||||
sortGroupChildren(group);
|
||||
} catch {}
|
||||
} else {
|
||||
// Group no longer in results — remove from expanded
|
||||
const expandedArr = [...expandedHashes];
|
||||
const results = await Promise.all(expandedArr.map(hash => {
|
||||
const group = hashIndex.get(hash);
|
||||
if (!group) return { hash, group: null, data: null };
|
||||
return api(`/packets?hash=${hash}&limit=20`)
|
||||
.then(data => ({ hash, group, data }))
|
||||
.catch(() => ({ hash, group, data: null }));
|
||||
}));
|
||||
for (const { hash, group, data } of results) {
|
||||
if (!group) {
|
||||
expandedHashes.delete(hash);
|
||||
} else if (data) {
|
||||
group._children = data.packets || [];
|
||||
sortGroupChildren(group);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -562,7 +620,7 @@
|
||||
} catch (e) {
|
||||
console.error('Failed to load packets:', e);
|
||||
const tbody = document.getElementById('pktBody');
|
||||
if (tbody) tbody.innerHTML = '<tr><td colspan="10" class="text-center" style="padding:24px;color:var(--error,#ef4444)"><div role="alert" aria-live="polite">Failed to load packets. Please try again.</div></td></tr>';
|
||||
if (tbody) tbody.innerHTML = '<tr><td colspan="' + _getColCount() + '" class="text-center" style="padding:24px;color:var(--error,#ef4444)"><div role="alert" aria-live="polite">Failed to load packets. Please try again.</div></td></tr>';
|
||||
}
|
||||
}
|
||||
|
||||
@@ -831,18 +889,30 @@
|
||||
obsSortSel.addEventListener('change', async function () {
|
||||
obsSortMode = this.value;
|
||||
localStorage.setItem('meshcore-obs-sort', obsSortMode);
|
||||
// For non-observer sorts, fetch children for visible groups that don't have them yet
|
||||
// For non-observer sorts, batch-fetch children for visible groups that don't have them yet
|
||||
if (obsSortMode !== SORT_OBSERVER && groupByHash) {
|
||||
const toFetch = packets.filter(p => p.hash && !p._children && (p.observation_count || 0) > 1);
|
||||
await Promise.all(toFetch.map(async (p) => {
|
||||
if (toFetch.length > 0) {
|
||||
const hashes = toFetch.map(p => p.hash);
|
||||
try {
|
||||
const data = await api(`/packets/${p.hash}`);
|
||||
if (data?.packet && data.observations) {
|
||||
p._children = data.observations.map(o => clearParsedCache({...data.packet, ...o, _isObservation: true}));
|
||||
p._fetchedData = data;
|
||||
const resp = await fetch('/api/packets/observations', {
|
||||
method: 'POST',
|
||||
headers: {'Content-Type': 'application/json'},
|
||||
body: JSON.stringify({hashes})
|
||||
});
|
||||
if (resp.ok) {
|
||||
const data = await resp.json();
|
||||
const results = data.results || {};
|
||||
for (const p of toFetch) {
|
||||
const obs = results[p.hash];
|
||||
if (obs && obs.length) {
|
||||
p._children = obs.map(o => clearParsedCache({...p, ...o, _isObservation: true}));
|
||||
p._fetchedData = {packet: p, observations: obs};
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch {}
|
||||
}));
|
||||
}
|
||||
}
|
||||
// Re-sort all groups with children
|
||||
for (const p of packets) {
|
||||
@@ -1006,11 +1076,11 @@
|
||||
}
|
||||
else if (action === 'select-observation') {
|
||||
const parentHash = row.dataset.parentHash;
|
||||
const group = packets.find(p => p.hash === parentHash);
|
||||
const group = hashIndex.get(parentHash);
|
||||
const child = group?._children?.find(c => String(c.id) === String(value));
|
||||
if (child) {
|
||||
const parentData = group._fetchedData;
|
||||
const obsPacket = parentData ? {...parentData.packet, observer_id: child.observer_id, observer_name: child.observer_name, snr: child.snr, rssi: child.rssi, path_json: child.path_json, timestamp: child.timestamp, first_seen: child.timestamp} : child;
|
||||
const obsPacket = parentData ? {...parentData.packet, observer_id: child.observer_id, observer_name: child.observer_name, snr: child.snr, rssi: child.rssi, path_json: child.path_json, resolved_path: child.resolved_path, timestamp: child.timestamp, first_seen: child.timestamp} : child;
|
||||
if (parentData) { clearParsedCache(obsPacket); }
|
||||
selectPacket(child.id, parentHash, {packet: obsPacket, breakdown: parentData?.breakdown, observations: parentData?.observations}, child.id);
|
||||
}
|
||||
@@ -1034,7 +1104,7 @@
|
||||
}
|
||||
|
||||
// Build HTML for a single grouped packet row
|
||||
function buildGroupRowHtml(p) {
|
||||
function buildGroupRowHtml(p, entryIdx = -1) {
|
||||
const isExpanded = expandedHashes.has(p.hash);
|
||||
let headerObserverId = p.observer_id;
|
||||
let headerPathJson = p.path_json;
|
||||
@@ -1054,7 +1124,10 @@
|
||||
const groupSize = p.raw_hex ? Math.floor(p.raw_hex.length / 2) : 0;
|
||||
const groupHashBytes = ((parseInt(p.raw_hex?.slice(2, 4), 16) || 0) >> 6) + 1;
|
||||
const isSingle = p.count <= 1;
|
||||
let html = `<tr class="${isSingle ? '' : 'group-header'} ${isExpanded ? 'expanded' : ''}" data-hash="${p.hash}" data-action="${isSingle ? 'select-hash' : 'toggle-select'}" data-value="${p.hash}" tabindex="0" role="row">
|
||||
// Channel color highlighting (#271)
|
||||
const _grpDecoded = getParsedDecoded(p) || {};
|
||||
const _grpChanStyle = window.ChannelColors ? window.ChannelColors.getRowStyle(_grpDecoded.type || groupTypeName, _grpDecoded.channel) : '';
|
||||
let html = `<tr class="${isSingle ? '' : 'group-header'} ${isExpanded ? 'expanded' : ''}" data-hash="${p.hash}" data-action="${isSingle ? 'select-hash' : 'toggle-select'}" data-value="${p.hash}" data-entry-idx="${entryIdx}" tabindex="0" role="row"${_grpChanStyle ? ' style="' + _grpChanStyle + '"' : ''}>
|
||||
<td style="width:28px;text-align:center;cursor:pointer">${isSingle ? '' : (isExpanded ? '▼' : '▶')}</td>
|
||||
<td class="col-region">${groupRegion ? `<span class="badge-region">${groupRegion}</span>` : '—'}</td>
|
||||
<td class="col-time">${renderTimestampCell(p.latest)}</td>
|
||||
@@ -1080,7 +1153,7 @@
|
||||
const childRegion = c.observer_id ? (observerMap.get(c.observer_id)?.iata || '') : '';
|
||||
const childPath = getParsedPath(c);
|
||||
const childPathStr = renderPath(childPath, c.observer_id);
|
||||
html += `<tr class="group-child" data-id="${c.id}" data-hash="${c.hash || ''}" data-action="select-observation" data-value="${c.id}" data-parent-hash="${p.hash}" tabindex="0" role="row">
|
||||
html += `<tr class="group-child" data-id="${c.id}" data-hash="${c.hash || ''}" data-action="select-observation" data-value="${c.id}" data-parent-hash="${p.hash}" data-entry-idx="${entryIdx}" tabindex="0" role="row">
|
||||
<td></td><td class="col-region">${childRegion ? `<span class="badge-region">${childRegion}</span>` : '—'}</td>
|
||||
<td class="col-time">${renderTimestampCell(c.timestamp)}</td>
|
||||
<td class="mono col-hash">${truncate(c.hash || '', 8)}</td>
|
||||
@@ -1098,17 +1171,19 @@
|
||||
}
|
||||
|
||||
// Build HTML for a single flat (ungrouped) packet row
|
||||
function buildFlatRowHtml(p) {
|
||||
const decoded = getParsedDecoded(p);
|
||||
const pathHops = getParsedPath(p);
|
||||
function buildFlatRowHtml(p, entryIdx = -1) {
|
||||
const decoded = getParsedDecoded(p) || {};
|
||||
const pathHops = getParsedPath(p) || [];
|
||||
const region = p.observer_id ? (observerMap.get(p.observer_id)?.iata || '') : '';
|
||||
const typeName = payloadTypeName(p.payload_type);
|
||||
const typeClass = payloadTypeColor(p.payload_type);
|
||||
// Channel color highlighting (#271)
|
||||
const _chanStyle = window.ChannelColors ? window.ChannelColors.getRowStyle(decoded.type || typeName, decoded.channel) : '';
|
||||
const size = p.raw_hex ? Math.floor(p.raw_hex.length / 2) : 0;
|
||||
const hashBytes = ((parseInt(p.raw_hex?.slice(2, 4), 16) || 0) >> 6) + 1;
|
||||
const pathStr = renderPath(pathHops, p.observer_id);
|
||||
const detail = getDetailPreview(decoded);
|
||||
return `<tr data-id="${p.id}" data-hash="${p.hash || ''}" data-action="select-hash" data-value="${p.hash || p.id}" tabindex="0" role="row" class="${selectedId === p.id ? 'selected' : ''}">
|
||||
return `<tr data-id="${p.id}" data-hash="${p.hash || ''}" data-action="select-hash" data-value="${p.hash || p.id}" data-entry-idx="${entryIdx}" tabindex="0" role="row" class="${selectedId === p.id ? 'selected' : ''}"${_chanStyle ? ' style="' + _chanStyle + '"' : ''}>
|
||||
<td></td><td class="col-region">${region ? `<span class="badge-region">${region}</span>` : '—'}</td>
|
||||
<td class="col-time">${renderTimestampCell(p.timestamp)}</td>
|
||||
<td class="mono col-hash">${truncate(p.hash || String(p.id), 8)}</td>
|
||||
@@ -1122,6 +1197,21 @@
|
||||
</tr>`;
|
||||
}
|
||||
|
||||
// Mark _rowCounts as stale so renderVisibleRows() recomputes them lazily.
|
||||
// Called when expanded group children change outside renderTableRows() (#410).
|
||||
function _invalidateRowCounts() {
|
||||
_rowCountsDirty = true;
|
||||
_cumulativeOffsetsCache = null;
|
||||
}
|
||||
|
||||
// Recompute _rowCounts from _displayPackets if they've been invalidated.
|
||||
function _refreshRowCountsIfDirty() {
|
||||
if (!_rowCountsDirty || !_displayPackets.length) return;
|
||||
_rowCounts = _displayPackets.map(function(p) { return _getRowCount(p); });
|
||||
_cumulativeOffsetsCache = null;
|
||||
_rowCountsDirty = false;
|
||||
}
|
||||
|
||||
// Compute the number of DOM <tr> rows a single entry produces.
|
||||
// Used by both row counting and renderVisibleRows to avoid divergence (#424).
|
||||
function _getRowCount(p) {
|
||||
@@ -1154,12 +1244,16 @@
|
||||
}
|
||||
|
||||
function renderVisibleRows() {
|
||||
const _rvr_t0 = performance.now();
|
||||
const tbody = document.getElementById('pktBody');
|
||||
if (!tbody || !_displayPackets.length) return;
|
||||
|
||||
const scrollContainer = document.getElementById('pktLeft');
|
||||
if (!scrollContainer) return;
|
||||
|
||||
// Recompute row counts if they were invalidated (e.g. WS added children) (#410)
|
||||
_refreshRowCountsIfDirty();
|
||||
|
||||
// Compute total DOM rows accounting for expanded groups
|
||||
const offsets = _cumulativeRowOffsets();
|
||||
const totalDomRows = offsets[offsets.length - 1];
|
||||
@@ -1214,7 +1308,13 @@
|
||||
const endIdx = Math.min(_displayPackets.length, lastEntry + VSCROLL_BUFFER);
|
||||
|
||||
// Skip DOM rebuild if visible range hasn't changed
|
||||
if (startIdx === _lastVisibleStart && endIdx === _lastVisibleEnd) return;
|
||||
if (startIdx === _lastVisibleStart && endIdx === _lastVisibleEnd) {
|
||||
if (window.__PERF_LOG_RENDER) console.log('[perf] renderVisibleRows: skip (no change) %.2fms', performance.now() - _rvr_t0);
|
||||
return;
|
||||
}
|
||||
|
||||
const prevStart = _lastVisibleStart;
|
||||
const prevEnd = _lastVisibleEnd;
|
||||
_lastVisibleStart = startIdx;
|
||||
_lastVisibleEnd = endIdx;
|
||||
|
||||
@@ -1225,14 +1325,51 @@
|
||||
topSpacer.firstChild.style.height = topPad + 'px';
|
||||
bottomSpacer.firstChild.style.height = bottomPad + 'px';
|
||||
|
||||
// LAZY ROW GENERATION: only build HTML for the visible slice (#422)
|
||||
const builder = _displayGrouped ? buildGroupRowHtml : buildFlatRowHtml;
|
||||
const visibleSlice = _displayPackets.slice(startIdx, endIdx);
|
||||
const visibleHtml = visibleSlice.map(p => builder(p)).join('');
|
||||
tbody.innerHTML = '';
|
||||
tbody.appendChild(topSpacer);
|
||||
tbody.insertAdjacentHTML('beforeend', visibleHtml);
|
||||
tbody.appendChild(bottomSpacer);
|
||||
const hasOverlap = prevStart !== -1 && startIdx < prevEnd && endIdx > prevStart;
|
||||
|
||||
if (!hasOverlap) {
|
||||
// Full rebuild: initial render or large scroll jump past buffer
|
||||
const visibleHtml = _displayPackets.slice(startIdx, endIdx)
|
||||
.map((p, i) => builder(p, startIdx + i)).join('');
|
||||
tbody.innerHTML = '';
|
||||
tbody.appendChild(topSpacer);
|
||||
tbody.insertAdjacentHTML('beforeend', visibleHtml);
|
||||
tbody.appendChild(bottomSpacer);
|
||||
if (window.__PERF_LOG_RENDER) console.log('[perf] renderVisibleRows: full rebuild %d entries, %.2fms', endIdx - startIdx, performance.now() - _rvr_t0);
|
||||
return;
|
||||
}
|
||||
|
||||
// Incremental update: remove rows that scrolled out at the top (positional)
|
||||
const headRowCount = offsets[Math.min(startIdx, prevEnd)] - offsets[prevStart];
|
||||
for (let r = 0; r < headRowCount; r++) {
|
||||
const row = topSpacer.nextElementSibling;
|
||||
if (row && row !== bottomSpacer) row.remove();
|
||||
}
|
||||
// Remove rows that scrolled out at the bottom (positional)
|
||||
const tailFrom = Math.max(endIdx, prevStart);
|
||||
const tailRowCount = offsets[prevEnd] - offsets[tailFrom];
|
||||
for (let r = 0; r < tailRowCount; r++) {
|
||||
const row = bottomSpacer.previousElementSibling;
|
||||
if (row && row !== topSpacer) row.remove();
|
||||
}
|
||||
// Prepend rows that scrolled into view at the top
|
||||
if (startIdx < prevStart) {
|
||||
let html = '';
|
||||
for (let i = startIdx; i < Math.min(prevStart, endIdx); i++) {
|
||||
html += builder(_displayPackets[i], i);
|
||||
}
|
||||
topSpacer.insertAdjacentHTML('afterend', html);
|
||||
}
|
||||
// Append rows that scrolled into view at the bottom
|
||||
if (endIdx > prevEnd) {
|
||||
let html = '';
|
||||
for (let i = Math.max(prevEnd, startIdx); i < endIdx; i++) {
|
||||
html += builder(_displayPackets[i], i);
|
||||
}
|
||||
bottomSpacer.insertAdjacentHTML('beforebegin', html);
|
||||
}
|
||||
if (window.__PERF_LOG_RENDER) console.log('[perf] renderVisibleRows: incremental head=%d tail=%d, %.2fms', headRowCount, tailRowCount, performance.now() - _rvr_t0);
|
||||
}
|
||||
|
||||
// Attach/detach scroll listener for virtual scrolling
|
||||
@@ -1291,7 +1428,11 @@
|
||||
}
|
||||
if (filters.observer) {
|
||||
const obsIds = new Set(filters.observer.split(','));
|
||||
displayPackets = displayPackets.filter(p => obsIds.has(p.observer_id));
|
||||
displayPackets = displayPackets.filter(p => {
|
||||
if (obsIds.has(p.observer_id)) return true;
|
||||
if (p._children) return p._children.some(c => obsIds.has(String(c.observer_id)));
|
||||
return false;
|
||||
});
|
||||
}
|
||||
|
||||
// Packet Filter Language
|
||||
@@ -1312,6 +1453,7 @@
|
||||
if (!displayPackets.length) {
|
||||
_displayPackets = [];
|
||||
_rowCounts = [];
|
||||
_rowCountsDirty = false;
|
||||
_cumulativeOffsetsCache = null;
|
||||
_observerFilterSet = null;
|
||||
_lastVisibleStart = -1;
|
||||
@@ -1331,6 +1473,7 @@
|
||||
_displayGrouped = groupByHash;
|
||||
_observerFilterSet = filters.observer ? new Set(filters.observer.split(',')) : null;
|
||||
_rowCounts = displayPackets.map(p => _getRowCount(p));
|
||||
_rowCountsDirty = false;
|
||||
_cumulativeOffsetsCache = null;
|
||||
|
||||
attachVScrollListener();
|
||||
@@ -1436,8 +1579,8 @@
|
||||
const pkt = data.packet;
|
||||
const breakdown = data.breakdown || {};
|
||||
const ranges = breakdown.ranges || [];
|
||||
const decoded = getParsedDecoded(pkt);
|
||||
const pathHops = getParsedPath(pkt);
|
||||
const decoded = getParsedDecoded(pkt) || {};
|
||||
const pathHops = getParsedPath(pkt) || [];
|
||||
|
||||
// Resolve sender GPS — from packet directly, or from known node in DB
|
||||
let senderLat = decoded.lat != null ? decoded.lat : (decoded.latitude || null);
|
||||
@@ -1459,11 +1602,18 @@
|
||||
} catch {}
|
||||
}
|
||||
|
||||
// Re-resolve hops using client-side HopResolver with sender GPS context
|
||||
// Resolve hops: prefer server-side resolved_path, fall back to client-side HopResolver
|
||||
if (pathHops.length) {
|
||||
try {
|
||||
await ensureHopResolver();
|
||||
const resolved = HopResolver.resolve(pathHops);
|
||||
const serverResolved = getResolvedPath(pkt);
|
||||
let resolved;
|
||||
if (serverResolved && serverResolved.length === pathHops.length) {
|
||||
await ensureHopResolver();
|
||||
resolved = HopResolver.resolveFromServer(pathHops, serverResolved);
|
||||
} else {
|
||||
await ensureHopResolver();
|
||||
resolved = HopResolver.resolve(pathHops);
|
||||
}
|
||||
if (resolved) {
|
||||
for (const [k, v] of Object.entries(resolved)) {
|
||||
hopNameCache[k] = v;
|
||||
@@ -1636,26 +1786,29 @@
|
||||
}
|
||||
|
||||
// Wire up view route on map button
|
||||
const routeBtn = document.getElementById('viewRouteBtn');
|
||||
const routeBtn = panel.querySelector('#viewRouteBtn');
|
||||
if (routeBtn && pathHops.length) {
|
||||
routeBtn.addEventListener('click', async () => {
|
||||
try {
|
||||
// Anchor disambiguation from sender's location if known (e.g. ADVERT lat/lon)
|
||||
const senderLat = decoded.lat || decoded.latitude;
|
||||
const senderLon = decoded.lon || decoded.longitude;
|
||||
// Resolve observer position for backward-pass anchor
|
||||
let obsLat = null, obsLon = null;
|
||||
const obsId = obsName(pkt.observer_id);
|
||||
if (obsId && HopResolver.ready()) {
|
||||
// Try to find observer in nodes list by name — best effort
|
||||
// Prefer server-side resolved_path if available
|
||||
const serverResolved = getResolvedPath(pkt);
|
||||
let resolvedKeys;
|
||||
if (serverResolved && serverResolved.length === pathHops.length) {
|
||||
// Use server-resolved pubkeys, fall back to short prefix for null entries
|
||||
resolvedKeys = pathHops.map((h, i) => serverResolved[i] || h);
|
||||
} else {
|
||||
// Fall back to client-side HopResolver
|
||||
const senderLat = decoded.lat || decoded.latitude;
|
||||
const senderLon = decoded.lon || decoded.longitude;
|
||||
let obsLat = null, obsLon = null;
|
||||
const obsId = obsName(pkt.observer_id);
|
||||
await ensureHopResolver();
|
||||
const data = { resolved: HopResolver.resolve(pathHops, senderLat || null, senderLon || null, obsLat, obsLon, pkt.observer_id) };
|
||||
resolvedKeys = pathHops.map(h => {
|
||||
const r = data.resolved?.[h];
|
||||
return r?.pubkey || h;
|
||||
});
|
||||
}
|
||||
await ensureHopResolver();
|
||||
const data = { resolved: HopResolver.resolve(pathHops, senderLat || null, senderLon || null, obsLat, obsLon, pkt.observer_id) };
|
||||
// Pass full pubkeys (client-disambiguated) to map, falling back to short prefix
|
||||
const resolvedKeys = pathHops.map(h => {
|
||||
const r = data.resolved?.[h];
|
||||
return r?.pubkey || h;
|
||||
});
|
||||
// Build origin info for the sender node
|
||||
const origin = {};
|
||||
if (decoded.pubKey) origin.pubkey = decoded.pubKey;
|
||||
@@ -1979,14 +2132,15 @@
|
||||
const data = await api(`/packets/${hash}`);
|
||||
const pkt = data.packet;
|
||||
if (!pkt) return;
|
||||
const group = packets.find(p => p.hash === hash);
|
||||
const group = hashIndex.get(hash);
|
||||
if (group && data.observations) {
|
||||
group._children = data.observations.map(o => clearParsedCache({...pkt, ...o, _isObservation: true}));
|
||||
group._fetchedData = data;
|
||||
// Sort children based on current sort mode
|
||||
sortGroupChildren(group);
|
||||
}
|
||||
// Resolve any new hops from children
|
||||
// Resolve hops from children: prefer server-side resolved_path
|
||||
await cacheResolvedPaths(group?._children || []);
|
||||
const childHops = new Set();
|
||||
for (const c of (group?._children || [])) {
|
||||
try { getParsedPath(c).forEach(h => childHops.add(h)); } catch {}
|
||||
@@ -2039,6 +2193,8 @@
|
||||
renderPath,
|
||||
_getRowCount,
|
||||
_cumulativeRowOffsets,
|
||||
_invalidateRowCounts,
|
||||
_refreshRowCountsIfDirty,
|
||||
buildGroupRowHtml,
|
||||
buildFlatRowHtml,
|
||||
};
|
||||
|
||||
+1
-1
@@ -5,7 +5,7 @@
|
||||
let interval = null;
|
||||
|
||||
async function render(app) {
|
||||
app.innerHTML = '<div id="perfWrapper" style="height:100%;overflow-y:auto;padding:16px 24px;"><h2>⚡ Performance Dashboard</h2><div id="perfContent">Loading...</div></div>';
|
||||
app.innerHTML = '<div id="perfWrapper" style="padding:16px 24px;"><h2>⚡ Performance Dashboard</h2><div id="perfContent">Loading...</div></div>';
|
||||
await refresh();
|
||||
}
|
||||
|
||||
|
||||
+91
-5
@@ -181,7 +181,12 @@ a:focus-visible, button:focus-visible, input:focus-visible, select:focus-visible
|
||||
}
|
||||
|
||||
/* === Layout === */
|
||||
#app { height: calc(100vh - 52px); height: calc(100dvh - 52px); overflow: hidden; }
|
||||
/* Default: body-scroll mode — content pushes beyond viewport, iOS status-bar
|
||||
tap-to-scroll works because <body> is the scroll container. Pages that need
|
||||
a fixed-height container (maps, virtual-scroll, split-panels) add
|
||||
.app-fixed via the router so their children can use height:100%. */
|
||||
#app { min-height: calc(100vh - 52px); min-height: calc(100dvh - 52px); }
|
||||
#app.app-fixed { height: calc(100vh - 52px); height: calc(100dvh - 52px); min-height: 0; overflow: hidden; }
|
||||
|
||||
.split-layout {
|
||||
display: flex; height: 100%; overflow: hidden;
|
||||
@@ -674,7 +679,7 @@ button.ch-item.selected { background: var(--selected-bg); }
|
||||
.advert-info { font-size: 12px; line-height: 1.5; }
|
||||
|
||||
/* === Traces Page === */
|
||||
.traces-page { padding: 16px; max-width: var(--trace-max-width, 95vw); margin: 0 auto; overflow-y: auto; height: 100%; }
|
||||
.traces-page { padding: 16px; max-width: var(--trace-max-width, 95vw); margin: 0 auto; }
|
||||
.trace-search {
|
||||
display: flex; gap: 8px; margin-bottom: 20px;
|
||||
}
|
||||
@@ -746,7 +751,7 @@ button.ch-item.selected { background: var(--selected-bg); }
|
||||
::-webkit-scrollbar-thumb:hover { background: var(--text-muted); }
|
||||
|
||||
/* === Observers Page === */
|
||||
.observers-page { padding: 20px; max-width: 1200px; margin: 0 auto; overflow-y: auto; height: calc(100vh - 56px); }
|
||||
.observers-page { padding: 20px; max-width: 1200px; margin: 0 auto; }
|
||||
.obs-summary { display: flex; gap: 20px; margin-bottom: 16px; flex-wrap: wrap; }
|
||||
.obs-stat { display: flex; align-items: center; gap: 6px; font-size: 14px; color: var(--text-muted); }
|
||||
.health-dot { width: 10px; height: 10px; border-radius: 50%; display: inline-block; flex-shrink: 0; }
|
||||
@@ -947,7 +952,9 @@ button.ch-item.selected { background: var(--selected-bg); }
|
||||
.filter-bar { flex-direction: row; flex-wrap: wrap; gap: 4px; }
|
||||
.filter-toggle-btn { display: inline-flex !important; }
|
||||
.filter-bar > *:not(.filter-toggle-btn):not(.col-toggle-wrap) { display: none; }
|
||||
.filter-bar.filters-expanded > * { display: inline-flex; }
|
||||
/* Must match :not() specificity of the hide rule above, otherwise .filters-expanded loses
|
||||
the specificity battle and filter children stay hidden (see issue #534). */
|
||||
.filter-bar.filters-expanded > *:not(.filter-toggle-btn):not(.col-toggle-wrap) { display: inline-flex; }
|
||||
.filter-bar.filters-expanded > .col-toggle-wrap { display: inline-block; }
|
||||
.filter-bar.filters-expanded input { width: 100%; }
|
||||
.filter-bar.filters-expanded select { width: 100%; }
|
||||
@@ -1136,7 +1143,7 @@ button.ch-item.ch-item-encrypted .ch-badge { filter: grayscale(0.6); }
|
||||
.node-activity-time { color: var(--text-muted); white-space: nowrap; min-width: 70px; font-size: 12px; }
|
||||
|
||||
/* Analytics page */
|
||||
.analytics-page { padding: 16px 24px; max-width: 1600px; margin: 0 auto; overflow-y: auto; height: 100%; }
|
||||
.analytics-page { padding: 16px 24px; max-width: 1600px; margin: 0 auto; }
|
||||
.analytics-header { margin-bottom: 20px; }
|
||||
.analytics-header h2 { margin: 0 0 4px; }
|
||||
.analytics-card { background: var(--card-bg); border: 1px solid var(--border); border-radius: 8px; padding: 16px; margin-bottom: 16px; }
|
||||
@@ -1951,3 +1958,82 @@ tr[data-hops]:hover { background: rgba(59,130,246,0.1); }
|
||||
#ngCanvas:focus:not(:focus-visible) {
|
||||
outline: none;
|
||||
}
|
||||
|
||||
/* ===================== RF Health Dashboard ===================== */
|
||||
.rf-health-container { padding: 0; }
|
||||
.rf-time-selector {
|
||||
display: flex; flex-wrap: wrap; gap: 4px; align-items: center;
|
||||
margin-bottom: 8px; padding: 8px 0;
|
||||
}
|
||||
.rf-range-btn {
|
||||
padding: 4px 10px; border: 1px solid var(--border); border-radius: 4px;
|
||||
background: var(--bg-secondary, var(--card-bg, #1e1e1e)); color: var(--text-primary, #e0e0e0);
|
||||
cursor: pointer; font-size: 12px; transition: background 0.15s;
|
||||
}
|
||||
.rf-range-btn:hover { background: var(--bg-hover, #333); }
|
||||
.rf-range-btn.active { background: var(--accent); color: #fff; border-color: var(--accent); }
|
||||
.rf-custom-inputs { display: inline-flex; gap: 4px; align-items: center; margin-left: 8px; }
|
||||
.rf-datetime {
|
||||
padding: 3px 6px; border: 1px solid var(--border); border-radius: 4px;
|
||||
background: var(--bg-secondary, var(--card-bg)); color: var(--text-primary); font-size: 12px;
|
||||
}
|
||||
|
||||
.rf-health-split {
|
||||
display: flex; height: calc(100vh - 180px); min-height: 300px; overflow: hidden;
|
||||
}
|
||||
.rf-health-grid {
|
||||
flex: 1; min-width: 0; overflow-y: auto; padding: 0 8px 8px 0;
|
||||
display: grid; grid-template-columns: repeat(auto-fill, minmax(220px, 1fr));
|
||||
gap: 8px; align-content: start;
|
||||
}
|
||||
.rf-cell {
|
||||
border: 1px solid var(--border); border-radius: 6px; padding: 8px 10px;
|
||||
cursor: pointer; transition: border-color 0.15s, background 0.15s;
|
||||
background: var(--bg-secondary, var(--card-bg, #1e1e1e));
|
||||
}
|
||||
.rf-cell:hover { border-color: var(--accent); }
|
||||
.rf-cell:focus-visible { outline: 2px solid var(--accent); outline-offset: 1px; }
|
||||
.rf-cell-selected { border-color: var(--accent); background: var(--bg-hover, rgba(96,165,250,0.08)); }
|
||||
|
||||
.rf-cell-header { display: flex; justify-content: space-between; align-items: baseline; gap: 6px; margin-bottom: 4px; }
|
||||
.rf-cell-name { font-weight: 600; font-size: 13px; white-space: nowrap; overflow: hidden; text-overflow: ellipsis; max-width: 120px; }
|
||||
.rf-cell-nf { font-size: 13px; font-variant-numeric: tabular-nums; white-space: nowrap; }
|
||||
.rf-cell-batt { font-size: 11px; color: var(--text-muted); white-space: nowrap; }
|
||||
.rf-nf-warning { color: var(--status-yellow, #f59e0b); }
|
||||
.rf-nf-critical { color: var(--status-red, #ef4444); }
|
||||
|
||||
.rf-cell-sparkline { height: 24px; margin: 2px 0; overflow: hidden; }
|
||||
.rf-cell-stats { display: flex; gap: 8px; font-size: 10px; color: var(--text-muted); }
|
||||
|
||||
/* Side panel for observer detail */
|
||||
.rf-health-detail {
|
||||
width: 420px; min-width: 280px; max-width: 50vw;
|
||||
border-left: 1px solid var(--border); background: var(--bg-secondary, var(--card-bg));
|
||||
overflow-y: auto; padding: 16px; position: relative;
|
||||
animation: slideInRight 200ms ease-out;
|
||||
}
|
||||
.rf-health-detail.rf-panel-empty {
|
||||
display: flex; align-items: center; justify-content: center;
|
||||
color: var(--text-muted); font-size: 14px; animation: none;
|
||||
}
|
||||
.rf-detail-header { display: flex; justify-content: space-between; align-items: center; margin-bottom: 8px; }
|
||||
.rf-detail-header h3 { margin: 0; font-size: 16px; }
|
||||
.rf-detail-close {
|
||||
background: none; border: none; color: var(--text-muted); cursor: pointer;
|
||||
font-size: 18px; padding: 2px 6px; border-radius: 4px;
|
||||
}
|
||||
.rf-detail-close:hover { background: var(--bg-hover); }
|
||||
.rf-detail-charts { display: flex; flex-direction: column; gap: 4px; }
|
||||
.rf-detail-chart { margin: 0; overflow-x: auto; }
|
||||
.rf-detail-summary { font-size: 12px; color: var(--text-muted); font-variant-numeric: tabular-nums; }
|
||||
|
||||
@media (max-width: 640px) {
|
||||
.rf-health-split { flex-direction: column; height: auto; }
|
||||
.rf-health-grid { grid-template-columns: 1fr; max-height: 50vh; }
|
||||
.rf-health-detail {
|
||||
width: 100% !important; max-width: 100%; min-width: 0;
|
||||
border-left: none; border-top: 1px solid var(--border);
|
||||
}
|
||||
.rf-time-selector { gap: 3px; }
|
||||
.rf-custom-inputs { margin-left: 0; margin-top: 4px; flex-wrap: wrap; }
|
||||
}
|
||||
|
||||
@@ -0,0 +1,173 @@
|
||||
/* Unit tests for channel color highlighting (M1) — #271 */
|
||||
'use strict';
|
||||
const vm = require('vm');
|
||||
const fs = require('fs');
|
||||
const assert = require('assert');
|
||||
|
||||
let passed = 0, failed = 0;
|
||||
function test(name, fn) {
|
||||
try {
|
||||
fn();
|
||||
passed++;
|
||||
console.log(` ✅ ${name}`);
|
||||
} catch (e) {
|
||||
failed++;
|
||||
console.log(` ❌ ${name}: ${e.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
// Build minimal sandbox with localStorage mock
|
||||
function makeSandbox() {
|
||||
const store = {};
|
||||
const localStorage = {
|
||||
getItem: function(k) { return store[k] !== undefined ? store[k] : null; },
|
||||
setItem: function(k, v) { store[k] = String(v); },
|
||||
removeItem: function(k) { delete store[k]; },
|
||||
clear: function() { for (var k in store) delete store[k]; }
|
||||
};
|
||||
const ctx = {
|
||||
window: {},
|
||||
localStorage: localStorage,
|
||||
console: console,
|
||||
JSON: JSON,
|
||||
};
|
||||
ctx.window.ChannelColors = undefined;
|
||||
vm.createContext(ctx);
|
||||
const src = fs.readFileSync(__dirname + '/public/channel-colors.js', 'utf8');
|
||||
vm.runInContext(src, ctx);
|
||||
return ctx;
|
||||
}
|
||||
|
||||
console.log('\n🎨 Channel Colors — Storage CRUD');
|
||||
|
||||
test('getChannelColor returns null for unassigned channel', function() {
|
||||
const ctx = makeSandbox();
|
||||
assert.strictEqual(ctx.window.ChannelColors.get('#test'), null);
|
||||
});
|
||||
|
||||
test('setChannelColor + getChannelColor round-trip', function() {
|
||||
const ctx = makeSandbox();
|
||||
ctx.window.ChannelColors.set('#sf', '#ef4444');
|
||||
assert.strictEqual(ctx.window.ChannelColors.get('#sf'), '#ef4444');
|
||||
});
|
||||
|
||||
test('setChannelColor overwrites existing color', function() {
|
||||
const ctx = makeSandbox();
|
||||
ctx.window.ChannelColors.set('#sf', '#ef4444');
|
||||
ctx.window.ChannelColors.set('#sf', '#3b82f6');
|
||||
assert.strictEqual(ctx.window.ChannelColors.get('#sf'), '#3b82f6');
|
||||
});
|
||||
|
||||
test('removeChannelColor removes assignment', function() {
|
||||
const ctx = makeSandbox();
|
||||
ctx.window.ChannelColors.set('#test', '#ff0000');
|
||||
ctx.window.ChannelColors.remove('#test');
|
||||
assert.strictEqual(ctx.window.ChannelColors.get('#test'), null);
|
||||
});
|
||||
|
||||
test('removeChannelColor on non-existent channel is no-op', function() {
|
||||
const ctx = makeSandbox();
|
||||
ctx.window.ChannelColors.remove('#nonexistent');
|
||||
assert.deepStrictEqual(ctx.window.ChannelColors.getAll(), {});
|
||||
});
|
||||
|
||||
test('getAllChannelColors returns all assignments', function() {
|
||||
const ctx = makeSandbox();
|
||||
ctx.window.ChannelColors.set('#a', '#111111');
|
||||
ctx.window.ChannelColors.set('#b', '#222222');
|
||||
const all = ctx.window.ChannelColors.getAll();
|
||||
assert.strictEqual(JSON.stringify(all), JSON.stringify({ '#a': '#111111', '#b': '#222222' }));
|
||||
});
|
||||
|
||||
test('getAllChannelColors returns empty object when none set', function() {
|
||||
const ctx = makeSandbox();
|
||||
assert.strictEqual(JSON.stringify(ctx.window.ChannelColors.getAll()), '{}');
|
||||
});
|
||||
|
||||
test('handles corrupt localStorage gracefully', function() {
|
||||
const ctx = makeSandbox();
|
||||
ctx.localStorage.setItem('live-channel-colors', 'not-json{{{');
|
||||
assert.strictEqual(ctx.window.ChannelColors.get('#test'), null);
|
||||
assert.strictEqual(JSON.stringify(ctx.window.ChannelColors.getAll()), '{}');
|
||||
});
|
||||
|
||||
test('set with null/empty channel is no-op', function() {
|
||||
const ctx = makeSandbox();
|
||||
ctx.window.ChannelColors.set('', '#ff0000');
|
||||
ctx.window.ChannelColors.set(null, '#ff0000');
|
||||
assert.strictEqual(JSON.stringify(ctx.window.ChannelColors.getAll()), '{}');
|
||||
});
|
||||
|
||||
test('set rejects invalid hex colors', function() {
|
||||
const ctx = makeSandbox();
|
||||
ctx.window.ChannelColors.set('#ch', 'red');
|
||||
ctx.window.ChannelColors.set('#ch', '#xyz');
|
||||
ctx.window.ChannelColors.set('#ch', '#12345');
|
||||
ctx.window.ChannelColors.set('#ch', '#1234567');
|
||||
ctx.window.ChannelColors.set('#ch', 'ff0000');
|
||||
assert.strictEqual(ctx.window.ChannelColors.get('#ch'), null);
|
||||
});
|
||||
|
||||
test('set normalizes 3-digit hex to 6-digit', function() {
|
||||
const ctx = makeSandbox();
|
||||
ctx.window.ChannelColors.set('#ch', '#abc');
|
||||
assert.strictEqual(ctx.window.ChannelColors.get('#ch'), '#aabbcc');
|
||||
});
|
||||
|
||||
test('set accepts valid 6-digit hex', function() {
|
||||
const ctx = makeSandbox();
|
||||
ctx.window.ChannelColors.set('#ch', '#ef4444');
|
||||
assert.strictEqual(ctx.window.ChannelColors.get('#ch'), '#ef4444');
|
||||
});
|
||||
|
||||
test('get with null/empty channel returns null', function() {
|
||||
const ctx = makeSandbox();
|
||||
assert.strictEqual(ctx.window.ChannelColors.get(''), null);
|
||||
assert.strictEqual(ctx.window.ChannelColors.get(null), null);
|
||||
});
|
||||
|
||||
console.log('\n🎨 Channel Colors — Row Style Generation');
|
||||
|
||||
test('getRowStyle returns empty string for non-GRP_TXT types', function() {
|
||||
const ctx = makeSandbox();
|
||||
ctx.window.ChannelColors.set('#test', '#ff0000');
|
||||
assert.strictEqual(ctx.window.ChannelColors.getRowStyle('ADVERT', '#test'), '');
|
||||
assert.strictEqual(ctx.window.ChannelColors.getRowStyle('TXT_MSG', '#test'), '');
|
||||
assert.strictEqual(ctx.window.ChannelColors.getRowStyle('ACK', '#test'), '');
|
||||
});
|
||||
|
||||
test('getRowStyle returns empty string for unassigned channel', function() {
|
||||
const ctx = makeSandbox();
|
||||
assert.strictEqual(ctx.window.ChannelColors.getRowStyle('GRP_TXT', '#unassigned'), '');
|
||||
});
|
||||
|
||||
test('getRowStyle returns empty string for null channel', function() {
|
||||
const ctx = makeSandbox();
|
||||
assert.strictEqual(ctx.window.ChannelColors.getRowStyle('GRP_TXT', null), '');
|
||||
});
|
||||
|
||||
test('getRowStyle returns border + background for assigned GRP_TXT channel', function() {
|
||||
const ctx = makeSandbox();
|
||||
ctx.window.ChannelColors.set('#sf', '#ef4444');
|
||||
const style = ctx.window.ChannelColors.getRowStyle('GRP_TXT', '#sf');
|
||||
assert.ok(style.includes('border-left:4px solid #ef4444'), 'should have left border');
|
||||
assert.ok(style.includes('background:#ef44441a'), 'should have 10% opacity background');
|
||||
});
|
||||
|
||||
test('getRowStyle works with CHAN type (alias for GRP_TXT)', function() {
|
||||
const ctx = makeSandbox();
|
||||
ctx.window.ChannelColors.set('#mesh', '#3b82f6');
|
||||
const style = ctx.window.ChannelColors.getRowStyle('CHAN', '#mesh');
|
||||
assert.ok(style.includes('border-left:4px solid #3b82f6'), 'should have left border');
|
||||
assert.ok(style.includes('background:#3b82f61a'), 'should have background tint');
|
||||
});
|
||||
|
||||
test('getRowStyle returns empty when channel has no assigned color', function() {
|
||||
const ctx = makeSandbox();
|
||||
ctx.window.ChannelColors.set('#other', '#ff0000');
|
||||
assert.strictEqual(ctx.window.ChannelColors.getRowStyle('GRP_TXT', '#nope'), '');
|
||||
});
|
||||
|
||||
// Summary
|
||||
console.log(`\n${passed} passed, ${failed} failed\n`);
|
||||
process.exit(failed ? 1 : 0);
|
||||
@@ -1573,6 +1573,47 @@ async function run() {
|
||||
|
||||
// ─── End affinity debug tests ─────────────────────────────────────────────
|
||||
|
||||
// ─── Mobile filter dropdown tests (#534) ──────────────────────────────────
|
||||
|
||||
await test('Mobile: filter toggle expands filter bar on packets page (#534)', async () => {
|
||||
// Use a mobile viewport
|
||||
await page.setViewportSize({ width: 480, height: 800 });
|
||||
await page.goto(`${BASE}/#/packets`);
|
||||
await page.waitForTimeout(500);
|
||||
|
||||
const filterBar = await page.$('.filter-bar');
|
||||
assert(filterBar, 'Filter bar should exist on packets page');
|
||||
|
||||
// Before clicking toggle, filter inputs should be hidden
|
||||
const toggleBtn = await page.$('.filter-toggle-btn');
|
||||
assert(toggleBtn, 'Filter toggle button should exist on mobile');
|
||||
|
||||
await toggleBtn.click();
|
||||
await page.waitForTimeout(300);
|
||||
|
||||
// After clicking, .filters-expanded should be on the filter bar
|
||||
const expanded = await filterBar.evaluate(el => el.classList.contains('filters-expanded'));
|
||||
assert(expanded, 'Filter bar should have filters-expanded class after toggle');
|
||||
|
||||
// Filter inputs should now be visible
|
||||
const filterInput = await page.$('.filter-bar input');
|
||||
if (filterInput) {
|
||||
const display = await filterInput.evaluate(el => getComputedStyle(el).display);
|
||||
assert(display !== 'none', `Filter input should be visible when expanded, got display: ${display}`);
|
||||
}
|
||||
|
||||
const filterSelect = await page.$('.filter-bar select');
|
||||
if (filterSelect) {
|
||||
const display = await filterSelect.evaluate(el => getComputedStyle(el).display);
|
||||
assert(display !== 'none', `Filter select should be visible when expanded, got display: ${display}`);
|
||||
}
|
||||
|
||||
// Reset viewport
|
||||
await page.setViewportSize({ width: 1280, height: 720 });
|
||||
});
|
||||
|
||||
// ─── End mobile filter tests ──────────────────────────────────────────────
|
||||
|
||||
// Extract frontend coverage if instrumented server is running
|
||||
try {
|
||||
const coverage = await page.evaluate(() => window.__coverage__);
|
||||
|
||||
+222
-13
@@ -564,6 +564,93 @@ console.log('\n=== hop-resolver.js ===');
|
||||
});
|
||||
}
|
||||
|
||||
// ===== resolveFromServer (hop-resolver.js, M4 #555) =====
|
||||
console.log('\n=== resolveFromServer (hop-resolver.js) ===');
|
||||
{
|
||||
const ctx = makeSandbox();
|
||||
ctx.IATA_COORDS_GEO = {};
|
||||
loadInCtx(ctx, 'public/hop-resolver.js');
|
||||
const HR = ctx.window.HopResolver;
|
||||
|
||||
test('resolveFromServer works without init (uses pubkey prefix as name)', () => {
|
||||
const pk = 'abcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890';
|
||||
const result = HR.resolveFromServer(['AB'], [pk]);
|
||||
assert.strictEqual(result['AB'].name, pk.slice(0, 8));
|
||||
assert.strictEqual(result['AB'].pubkey, pk);
|
||||
});
|
||||
|
||||
test('resolveFromServer with matching node', () => {
|
||||
const pubkey = 'abcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890';
|
||||
HR.init([{ public_key: pubkey, name: 'NodeA', lat: 37.3, lon: -122.0 }]);
|
||||
const result = HR.resolveFromServer(['AB'], [pubkey]);
|
||||
assert.strictEqual(result['AB'].name, 'NodeA');
|
||||
assert.strictEqual(result['AB'].pubkey, pubkey);
|
||||
assert.ok(!result['AB'].ambiguous);
|
||||
});
|
||||
|
||||
test('resolveFromServer with null entry skips it', () => {
|
||||
const pubkey = 'abcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890';
|
||||
HR.init([{ public_key: pubkey, name: 'NodeA', lat: 37.3, lon: -122.0 }]);
|
||||
const result = HR.resolveFromServer(['AB', 'CD'], [pubkey, null]);
|
||||
assert.strictEqual(result['AB'].name, 'NodeA');
|
||||
assert.ok(!('CD' in result)); // null entries are skipped
|
||||
});
|
||||
|
||||
test('resolveFromServer with unknown pubkey uses prefix', () => {
|
||||
HR.init([{ public_key: 'aaaa0000', name: 'Other' }]);
|
||||
const unknownPk = '1111111111111111111111111111111111111111111111111111111111111111';
|
||||
const result = HR.resolveFromServer(['AB'], [unknownPk]);
|
||||
assert.strictEqual(result['AB'].name, unknownPk.slice(0, 8));
|
||||
assert.strictEqual(result['AB'].pubkey, unknownPk);
|
||||
});
|
||||
|
||||
test('resolveFromServer mismatched lengths returns empty', () => {
|
||||
HR.init([{ public_key: 'abcdef1234567890', name: 'NodeA' }]);
|
||||
const result = HR.resolveFromServer(['AB', 'CD'], ['abcdef1234567890']);
|
||||
assert.strictEqual(Object.keys(result).length, 0);
|
||||
});
|
||||
}
|
||||
|
||||
// ===== getResolvedPath (packet-helpers.js, M4 #555) =====
|
||||
console.log('\n=== getResolvedPath (packet-helpers.js) ===');
|
||||
{
|
||||
const ctx = makeSandbox();
|
||||
loadInCtx(ctx, 'public/packet-helpers.js');
|
||||
const getResolvedPath = ctx.window.getResolvedPath;
|
||||
|
||||
test('getResolvedPath returns null when absent', () => {
|
||||
assert.strictEqual(getResolvedPath({}), null);
|
||||
});
|
||||
|
||||
test('getResolvedPath parses JSON string', () => {
|
||||
const pkt = { resolved_path: '["aabb","ccdd",null]' };
|
||||
const result = getResolvedPath(pkt);
|
||||
assert.deepStrictEqual(result, ['aabb', 'ccdd', null]);
|
||||
});
|
||||
|
||||
test('getResolvedPath returns array as-is', () => {
|
||||
const arr = ['aabb', null];
|
||||
const pkt = { resolved_path: arr };
|
||||
assert.strictEqual(getResolvedPath(pkt), arr);
|
||||
});
|
||||
|
||||
test('getResolvedPath caches result', () => {
|
||||
const pkt = { resolved_path: '["aabb"]' };
|
||||
const r1 = getResolvedPath(pkt);
|
||||
const r2 = getResolvedPath(pkt);
|
||||
assert.strictEqual(r1, r2); // same reference
|
||||
});
|
||||
|
||||
test('clearParsedCache clears resolved path cache', () => {
|
||||
const clearParsedCache = ctx.window.clearParsedCache;
|
||||
const pkt = { resolved_path: '["aabb"]' };
|
||||
getResolvedPath(pkt);
|
||||
assert.ok(pkt._parsedResolvedPath !== undefined);
|
||||
clearParsedCache(pkt);
|
||||
assert.strictEqual(pkt._parsedResolvedPath, undefined);
|
||||
});
|
||||
}
|
||||
|
||||
// ===== haversineKm exposed from HopResolver (issue #433) =====
|
||||
console.log('\n=== haversineKm (hop-resolver.js) ===');
|
||||
{
|
||||
@@ -998,6 +1085,56 @@ console.log('\n=== live.js: pruneStaleNodes ===');
|
||||
assert.ok(markers['apiNode'], 'API stale node should NOT be removed');
|
||||
assert.ok(data['apiNode'], 'API stale node data should NOT be removed');
|
||||
});
|
||||
|
||||
test('pruneStaleNodes cleans up nodeActivity for removed nodes', () => {
|
||||
const { ctx } = makeLiveSandbox();
|
||||
const prune = ctx.window._livePruneStaleNodes;
|
||||
const markers = ctx.window._liveNodeMarkers();
|
||||
const data = ctx.window._liveNodeData();
|
||||
const activity = ctx.window._liveNodeActivity();
|
||||
|
||||
// WS-only stale node
|
||||
markers['staleNode'] = { _glowMarker: null };
|
||||
data['staleNode'] = { public_key: 'staleNode', role: 'companion', _liveSeen: Date.now() - 48 * 3600000 };
|
||||
activity['staleNode'] = 5;
|
||||
|
||||
// Active node
|
||||
markers['activeNode'] = { setStyle: function() {}, _glowMarker: null };
|
||||
data['activeNode'] = { public_key: 'activeNode', role: 'companion', _liveSeen: Date.now() };
|
||||
activity['activeNode'] = 3;
|
||||
|
||||
prune();
|
||||
|
||||
assert.ok(!markers['staleNode'], 'stale node marker removed');
|
||||
assert.ok(!data['staleNode'], 'stale node data removed');
|
||||
assert.ok(!activity['staleNode'], 'stale node activity removed');
|
||||
assert.ok(markers['activeNode'], 'active node marker preserved');
|
||||
assert.ok(data['activeNode'], 'active node data preserved');
|
||||
assert.strictEqual(activity['activeNode'], 3, 'active node activity preserved');
|
||||
});
|
||||
|
||||
test('pruneStaleNodes removes orphaned nodeActivity entries', () => {
|
||||
const { ctx } = makeLiveSandbox();
|
||||
const prune = ctx.window._livePruneStaleNodes;
|
||||
const markers = ctx.window._liveNodeMarkers();
|
||||
const data = ctx.window._liveNodeData();
|
||||
const activity = ctx.window._liveNodeActivity();
|
||||
|
||||
// Add an active node
|
||||
markers['existingNode'] = { setStyle: function() {}, _glowMarker: null };
|
||||
data['existingNode'] = { public_key: 'existingNode', role: 'companion', _liveSeen: Date.now() };
|
||||
activity['existingNode'] = 2;
|
||||
|
||||
// Add orphaned activity (no corresponding nodeData)
|
||||
activity['ghostNode'] = 10;
|
||||
|
||||
prune();
|
||||
|
||||
assert.ok(markers['existingNode'], 'existing node preserved');
|
||||
assert.ok(data['existingNode'], 'existing node data preserved');
|
||||
assert.strictEqual(activity['existingNode'], 2, 'existing node activity preserved');
|
||||
assert.ok(!activity['ghostNode'], 'orphaned activity entry removed');
|
||||
});
|
||||
}
|
||||
|
||||
// ===== live.js: vcrFormatTime respects UTC/local setting =====
|
||||
@@ -2683,8 +2820,9 @@ console.log('\n=== packets.js: savedTimeWindowMin defaults ===');
|
||||
assert.ok(!packetsSource.includes('_lastRenderedRows'),
|
||||
'should NOT have pre-built row HTML cache');
|
||||
assert.ok(packetsSource.includes('_displayPackets.slice(startIdx, endIdx)'),
|
||||
'should slice display packets for visible range');
|
||||
assert.ok(packetsSource.includes('visibleSlice.map(p => builder(p))'),
|
||||
'should slice display packets for visible range on full rebuild');
|
||||
// Incremental path uses builder() per-item in loops; full rebuild uses .map()
|
||||
assert.ok(packetsSource.includes('builder(p, startIdx + i)') || packetsSource.includes('builder(_displayPackets[i], i)'),
|
||||
'should build HTML lazily per visible packet');
|
||||
});
|
||||
|
||||
@@ -2695,6 +2833,63 @@ console.log('\n=== packets.js: savedTimeWindowMin defaults ===');
|
||||
'buildGroupRowHtml should use hoisted _observerFilterSet');
|
||||
});
|
||||
|
||||
test('observer filter in grouped mode includes packet when child matches (#537)', () => {
|
||||
// The display filter should keep a grouped packet whose primary observer_id
|
||||
// does NOT match, but one of its _children does.
|
||||
const obsIds = new Set(['OBS_B']);
|
||||
const packets = [
|
||||
{ observer_id: 'OBS_A', _children: [{ observer_id: 'OBS_A' }, { observer_id: 'OBS_B' }] },
|
||||
{ observer_id: 'OBS_C', _children: [{ observer_id: 'OBS_C' }] },
|
||||
];
|
||||
const result = packets.filter(p => {
|
||||
if (obsIds.has(p.observer_id)) return true;
|
||||
if (p._children) return p._children.some(c => obsIds.has(String(c.observer_id)));
|
||||
return false;
|
||||
});
|
||||
assert.strictEqual(result.length, 1, 'should keep packet with matching child observer');
|
||||
assert.strictEqual(result[0].observer_id, 'OBS_A');
|
||||
});
|
||||
|
||||
test('observer filter in grouped mode hides packet with no matching observations (#537)', () => {
|
||||
const obsIds = new Set(['OBS_X']);
|
||||
const packets = [
|
||||
{ observer_id: 'OBS_A', _children: [{ observer_id: 'OBS_A' }, { observer_id: 'OBS_B' }] },
|
||||
];
|
||||
const result = packets.filter(p => {
|
||||
if (obsIds.has(p.observer_id)) return true;
|
||||
if (p._children) return p._children.some(c => obsIds.has(String(c.observer_id)));
|
||||
return false;
|
||||
});
|
||||
assert.strictEqual(result.length, 0, 'should hide packet with no matching observers');
|
||||
});
|
||||
|
||||
test('WS observer filter checks children for grouped packets (#537)', () => {
|
||||
const filters = { observer: 'OBS_B' };
|
||||
const obsSet = new Set(filters.observer.split(','));
|
||||
const p = { observer_id: 'OBS_A', _children: [{ observer_id: 'OBS_B' }] };
|
||||
const passes = obsSet.has(p.observer_id) || (p._children && p._children.some(c => obsSet.has(String(c.observer_id))));
|
||||
assert.ok(passes, 'WS filter should pass grouped packet when child matches');
|
||||
|
||||
const p2 = { observer_id: 'OBS_C', _children: [{ observer_id: 'OBS_D' }] };
|
||||
const passes2 = obsSet.has(p2.observer_id) || (p2._children && p2._children.some(c => obsSet.has(String(c.observer_id))));
|
||||
assert.ok(!passes2, 'WS filter should reject grouped packet with no matching observers');
|
||||
});
|
||||
|
||||
test('packets.js display filter checks _children for observer match (#537)', () => {
|
||||
// Verify the actual source code has the children check
|
||||
assert.ok(
|
||||
packetsSource.includes('p._children) return p._children.some(c => obsIds.has(String(c.observer_id))'),
|
||||
'display filter should check _children for observer match'
|
||||
);
|
||||
});
|
||||
|
||||
test('packets.js WS filter checks _children for observer match (#537)', () => {
|
||||
assert.ok(
|
||||
packetsSource.includes('p._children && p._children.some(c => obsSet.has(String(c.observer_id)))'),
|
||||
'WS filter should check _children for observer match'
|
||||
);
|
||||
});
|
||||
|
||||
test('buildFlatRowHtml has null-safe decoded_json', () => {
|
||||
const flatBuilderMatch = packetsSource.match(/function buildFlatRowHtml[\s\S]*?(?=\n function )/);
|
||||
assert.ok(flatBuilderMatch, 'buildFlatRowHtml should exist');
|
||||
@@ -2999,20 +3194,24 @@ console.log('\n=== channels.js: formatHashHex (issue #465) ===');
|
||||
'destroy must reset observerMap to empty Map');
|
||||
});
|
||||
|
||||
test('WS handler debounces render via _wsRenderTimer', () => {
|
||||
test('WS handler coalesces render via rAF (#396)', () => {
|
||||
const wsBlock = src.slice(src.indexOf('wsHandler = debouncedOnWS'), src.indexOf('function destroy()'));
|
||||
assert.ok(wsBlock.includes('_wsRenderTimer'),
|
||||
'WS handler must debounce renders via _wsRenderTimer');
|
||||
assert.ok(wsBlock.includes('clearTimeout(_wsRenderTimer)'),
|
||||
'WS handler must clear pending timer before scheduling new render');
|
||||
assert.ok(/setTimeout\(function \(\) \{ renderTableRows\(\); \}/.test(wsBlock),
|
||||
'WS handler must schedule renderTableRows via setTimeout');
|
||||
assert.ok(wsBlock.includes('scheduleWSRender()'),
|
||||
'WS handler must coalesce renders via scheduleWSRender()');
|
||||
// Verify scheduleWSRender uses requestAnimationFrame
|
||||
const schedFn = src.slice(src.indexOf('function scheduleWSRender()'), src.indexOf('function scheduleWSRender()') + 300);
|
||||
assert.ok(schedFn.includes('requestAnimationFrame'),
|
||||
'scheduleWSRender must use requestAnimationFrame for coalescing');
|
||||
assert.ok(schedFn.includes('_wsRenderDirty'),
|
||||
'scheduleWSRender must use dirty flag pattern');
|
||||
});
|
||||
|
||||
test('destroy clears _wsRenderTimer', () => {
|
||||
const destroyBlock = src.slice(src.indexOf('function destroy()'), src.indexOf('function destroy()') + 500);
|
||||
assert.ok(destroyBlock.includes('clearTimeout(_wsRenderTimer)'),
|
||||
'destroy must clear _wsRenderTimer to prevent stale renders after navigation');
|
||||
test('destroy clears rAF and dirty flag (#396)', () => {
|
||||
const destroyBlock = src.slice(src.indexOf('function destroy()'), src.indexOf('function destroy()') + 600);
|
||||
assert.ok(destroyBlock.includes('cancelAnimationFrame(_wsRafId)'),
|
||||
'destroy must cancel pending rAF to prevent stale renders after navigation');
|
||||
assert.ok(destroyBlock.includes('_wsRenderDirty = false'),
|
||||
'destroy must reset dirty flag');
|
||||
});
|
||||
}
|
||||
// ===== NODES.JS: shared sandbox factory =====
|
||||
@@ -4126,7 +4325,17 @@ console.log('\n=== app.js: routeTypeName/payloadTypeName edge cases ===');
|
||||
assertJsonEqual(getParsedPath(p), []);
|
||||
});
|
||||
|
||||
test('getParsedPath: cached null _parsedPath returns empty array (#538)', () => {
|
||||
const p = { path_json: '["a"]', _parsedPath: null };
|
||||
assertJsonEqual(getParsedPath(p), []);
|
||||
});
|
||||
|
||||
// --- getParsedDecoded ---
|
||||
test('getParsedDecoded: cached null _parsedDecoded returns empty object (#538)', () => {
|
||||
const p = { decoded_json: '{"x":1}', _parsedDecoded: null };
|
||||
assertJsonEqual(getParsedDecoded(p), {});
|
||||
});
|
||||
|
||||
test('getParsedDecoded: valid JSON object', () => {
|
||||
const p = { decoded_json: '{"type":"GRP_TXT","text":"hello"}' };
|
||||
const result = getParsedDecoded(p);
|
||||
|
||||
@@ -0,0 +1,99 @@
|
||||
/**
|
||||
* Unit tests for HopResolver affinity-aware hop resolution.
|
||||
*/
|
||||
'use strict';
|
||||
const fs = require('fs');
|
||||
const vm = require('vm');
|
||||
|
||||
// Load hop-resolver.js in a sandboxed context
|
||||
const code = fs.readFileSync(__dirname + '/public/hop-resolver.js', 'utf8');
|
||||
const sandbox = { window: {}, console, Math, Object, Array, Number, Date, Map, Set, parseInt, parseFloat, encodeURIComponent };
|
||||
vm.createContext(sandbox);
|
||||
vm.runInContext(code, sandbox);
|
||||
const HopResolver = sandbox.window.HopResolver;
|
||||
|
||||
let passed = 0;
|
||||
let failed = 0;
|
||||
|
||||
function assert(condition, msg) {
|
||||
if (condition) { passed++; console.log(' ✓ ' + msg); }
|
||||
else { failed++; console.error(' ✗ ' + msg); }
|
||||
}
|
||||
|
||||
// ── Test nodes ──
|
||||
// Two nodes share the same 1-byte prefix "ab"
|
||||
const nodeA = { public_key: 'ab1111', name: 'NodeA', lat: 37.0, lon: -122.0 };
|
||||
const nodeB = { public_key: 'ab2222', name: 'NodeB', lat: 38.0, lon: -123.0 };
|
||||
const nodeC = { public_key: 'cd3333', name: 'NodeC', lat: 37.5, lon: -122.5 };
|
||||
|
||||
console.log('\n=== HopResolver Affinity Tests ===\n');
|
||||
|
||||
// Test 1: Affinity prefers neighbor candidate over geo-closest
|
||||
console.log('Test 1: Affinity prefers neighbor over geo-closest');
|
||||
HopResolver.init([nodeA, nodeB, nodeC]);
|
||||
HopResolver.setAffinity({
|
||||
edges: [
|
||||
{ source: 'cd3333', target: 'ab2222', score: 0.8 }
|
||||
// NodeC is a neighbor of NodeB but NOT NodeA
|
||||
]
|
||||
});
|
||||
|
||||
// Resolve hop "ab" after NodeC was resolved — should pick NodeB (neighbor) not NodeA (geo-closer)
|
||||
// Origin at NodeC's position so forward pass runs with NodeC as anchor
|
||||
const result1 = HopResolver.resolve(['cd33', 'ab'], nodeC.lat, nodeC.lon, null, null, null);
|
||||
assert(result1['ab'].name === 'NodeB', 'Should pick NodeB (affinity neighbor of NodeC) — got: ' + result1['ab'].name);
|
||||
|
||||
// Test 2: Without affinity, falls back to geo-closest
|
||||
console.log('\nTest 2: Cold start (no affinity) falls back to geo-closest');
|
||||
HopResolver.init([nodeA, nodeB, nodeC]);
|
||||
HopResolver.setAffinity({}); // No edges
|
||||
|
||||
// With anchor at NodeC's position, NodeA is closer to NodeC than NodeB
|
||||
const result2 = HopResolver.resolve(['cd33', 'ab'], nodeC.lat, nodeC.lon, null, null, null);
|
||||
// NodeA (37, -122) is closer to NodeC (37.5, -122.5) than NodeB (38, -123)
|
||||
assert(result2['ab'].name === 'NodeA', 'Should pick NodeA (geo-closest) — got: ' + result2['ab'].name);
|
||||
|
||||
// Test 3: setAffinity with null/undefined doesn't crash
|
||||
console.log('\nTest 3: setAffinity with null/undefined is safe');
|
||||
HopResolver.setAffinity(null);
|
||||
HopResolver.setAffinity(undefined);
|
||||
HopResolver.setAffinity({});
|
||||
assert(true, 'No crash on null/undefined/empty affinity');
|
||||
|
||||
// Test 4: getAffinity returns correct scores
|
||||
console.log('\nTest 4: getAffinity returns correct scores');
|
||||
HopResolver.setAffinity({
|
||||
edges: [
|
||||
{ source: 'aaa', target: 'bbb', score: 0.95 },
|
||||
{ source: 'ccc', target: 'ddd', weight: 5 }
|
||||
]
|
||||
});
|
||||
assert(HopResolver.getAffinity('aaa', 'bbb') === 0.95, 'aaa→bbb = 0.95');
|
||||
assert(HopResolver.getAffinity('bbb', 'aaa') === 0.95, 'bbb→aaa = 0.95 (bidirectional)');
|
||||
assert(HopResolver.getAffinity('ccc', 'ddd') === 5, 'ccc→ddd = 5 (weight fallback)');
|
||||
assert(HopResolver.getAffinity('aaa', 'zzz') === 0, 'unknown pair = 0');
|
||||
assert(HopResolver.getAffinity(null, 'bbb') === 0, 'null pubkey = 0');
|
||||
|
||||
// Test 5: Affinity with multiple neighbors — highest score wins
|
||||
console.log('\nTest 5: Highest affinity score wins among neighbors');
|
||||
HopResolver.init([nodeA, nodeB, nodeC]);
|
||||
HopResolver.setAffinity({
|
||||
edges: [
|
||||
{ source: 'cd3333', target: 'ab1111', score: 0.3 },
|
||||
{ source: 'cd3333', target: 'ab2222', score: 0.9 }
|
||||
]
|
||||
});
|
||||
const result5 = HopResolver.resolve(['cd33', 'ab'], nodeC.lat, nodeC.lon, null, null, null);
|
||||
assert(result5['ab'].name === 'NodeB', 'Should pick NodeB (highest affinity 0.9) — got: ' + result5['ab'].name);
|
||||
|
||||
// Test 6: Unambiguous hops are not affected by affinity
|
||||
console.log('\nTest 6: Unambiguous hops unaffected by affinity');
|
||||
const nodeD = { public_key: 'ee4444', name: 'NodeD', lat: 36.0, lon: -121.0 };
|
||||
HopResolver.init([nodeA, nodeB, nodeC, nodeD]);
|
||||
HopResolver.setAffinity({ edges: [] });
|
||||
const result6 = HopResolver.resolve(['ee44'], null, null, null, null, null);
|
||||
assert(result6['ee44'].name === 'NodeD', 'Unique prefix resolves directly — got: ' + result6['ee44'].name);
|
||||
assert(!result6['ee44'].ambiguous, 'Should not be marked ambiguous');
|
||||
|
||||
console.log('\n' + (passed + failed) + ' tests, ' + passed + ' passed, ' + failed + ' failed\n');
|
||||
process.exit(failed > 0 ? 1 : 0);
|
||||
@@ -272,6 +272,48 @@ console.log('\n=== live.js: expandToBufferEntries ===');
|
||||
});
|
||||
}
|
||||
|
||||
// ===== expandToBufferEntriesAsync (chunked, non-blocking) =====
|
||||
console.log('\n=== live.js: expandToBufferEntriesAsync ===');
|
||||
{
|
||||
// Build a sandbox with packet-helpers loaded so expandToBufferEntries can call dbPacketToLive
|
||||
const ctx = makeSandbox();
|
||||
addLiveGlobals(ctx);
|
||||
loadInCtx(ctx, 'public/roles.js');
|
||||
loadInCtx(ctx, 'public/packet-helpers.js');
|
||||
try { loadInCtx(ctx, 'public/live.js'); } catch (e) {
|
||||
for (const k of Object.keys(ctx.window)) ctx[k] = ctx.window[k];
|
||||
}
|
||||
const expandSync = ctx.window._liveExpandToBufferEntries;
|
||||
const expandAsync = ctx.window._liveExpandToBufferEntriesAsync;
|
||||
assert.ok(expandAsync, '_liveExpandToBufferEntriesAsync must be exposed');
|
||||
|
||||
const pkts = [];
|
||||
for (let i = 0; i < 500; i++) {
|
||||
pkts.push({
|
||||
id: i, hash: 'h' + i, timestamp: new Date(1700000000000 + i * 1000).toISOString(),
|
||||
decoded_json: '{"type":"GRP_TXT"}', path_json: '[]',
|
||||
observations: [
|
||||
{ timestamp: new Date(1700000000000 + i * 1000 + 100).toISOString(), snr: 5, observer_name: 'O1' },
|
||||
{ timestamp: new Date(1700000000000 + i * 1000 + 200).toISOString(), snr: 8, observer_name: 'O2' },
|
||||
],
|
||||
});
|
||||
}
|
||||
|
||||
test('sync expand handles 500 packets (1000 entries) correctly', () => {
|
||||
const result = expandSync(pkts);
|
||||
assert.strictEqual(result.length, 1000, '500 packets * 2 observations = 1000 entries');
|
||||
assert.strictEqual(result[0].pkt.hash, 'h0');
|
||||
assert.strictEqual(result[999].pkt.hash, 'h499');
|
||||
});
|
||||
|
||||
test('VCR_CHUNK_SIZE is defined and async function yields via setTimeout', () => {
|
||||
const src = fs.readFileSync(__dirname + '/public/live.js', 'utf8');
|
||||
assert.ok(src.includes('VCR_CHUNK_SIZE'), 'VCR_CHUNK_SIZE constant must exist');
|
||||
assert.ok(src.includes('expandToBufferEntriesAsync'), 'async version must exist');
|
||||
assert.ok(src.includes('setTimeout(processChunk, 0)'), 'must yield via setTimeout between chunks');
|
||||
});
|
||||
}
|
||||
|
||||
// ===== SEG_MAP (7-segment display) =====
|
||||
console.log('\n=== live.js: SEG_MAP ===');
|
||||
{
|
||||
@@ -839,6 +881,17 @@ console.log('\n=== live.js: source-level safety checks ===');
|
||||
assert.ok(src.includes('const existingIds = new Set(VCR.buffer.map(b => b.pkt.id)'),
|
||||
'vcrRewind should dedup by packet ID');
|
||||
});
|
||||
|
||||
test('feed items include transport badge', () => {
|
||||
const count = (src.match(/transportBadge\(pkt\.route_type\)/g) || []).length;
|
||||
assert.ok(count >= 3,
|
||||
`feed rendering should call transportBadge(pkt.route_type) in at least 3 places (found ${count})`);
|
||||
});
|
||||
|
||||
test('node detail recent packets include transport badge', () => {
|
||||
assert.ok(src.includes('transportBadge(p.route_type)'),
|
||||
'node detail recent packets should call transportBadge(p.route_type)');
|
||||
});
|
||||
}
|
||||
|
||||
// ===== SUMMARY =====
|
||||
|
||||
@@ -107,6 +107,7 @@ function loadPacketsSandbox() {
|
||||
// Load dependencies first
|
||||
loadInCtx(ctx, 'public/roles.js');
|
||||
loadInCtx(ctx, 'public/app.js');
|
||||
loadInCtx(ctx, 'public/packet-helpers.js');
|
||||
// HopDisplay stub (simpler than loading real file which may have DOM deps)
|
||||
vm.runInContext(`
|
||||
window.HopDisplay = {
|
||||
@@ -695,6 +696,26 @@ console.log('\n=== packets.js: buildFlatRowHtml ===');
|
||||
const result = api.buildFlatRowHtml(p);
|
||||
assert(result.includes('0B'));
|
||||
});
|
||||
|
||||
test('buildFlatRowHtml emits data-entry-idx when provided', () => {
|
||||
const p = {
|
||||
id: 4, hash: 'z', timestamp: '', observer_id: null,
|
||||
raw_hex: 'aabb', payload_type: 0, route_type: 0,
|
||||
decoded_json: '{}', path_json: '[]'
|
||||
};
|
||||
const result = api.buildFlatRowHtml(p, 42);
|
||||
assert(result.includes('data-entry-idx="42"'));
|
||||
});
|
||||
|
||||
test('buildFlatRowHtml emits data-entry-idx=-1 by default', () => {
|
||||
const p = {
|
||||
id: 5, hash: 'w', timestamp: '', observer_id: null,
|
||||
raw_hex: 'aabb', payload_type: 0, route_type: 0,
|
||||
decoded_json: '{}', path_json: '[]'
|
||||
};
|
||||
const result = api.buildFlatRowHtml(p);
|
||||
assert(result.includes('data-entry-idx="-1"'));
|
||||
});
|
||||
}
|
||||
|
||||
console.log('\n=== packets.js: buildGroupRowHtml ===');
|
||||
@@ -740,6 +761,36 @@ console.log('\n=== packets.js: buildGroupRowHtml ===');
|
||||
assert(result.includes('👁'));
|
||||
assert(result.includes('5'));
|
||||
});
|
||||
|
||||
test('buildGroupRowHtml emits data-entry-idx on header row', () => {
|
||||
const p = {
|
||||
hash: 'ei1', count: 1, latest: '2024-01-01T00:00:00Z',
|
||||
observer_id: null, raw_hex: 'aa', payload_type: 0,
|
||||
route_type: 0, decoded_json: '{}', path_json: '[]',
|
||||
observation_count: 1, observer_count: 1
|
||||
};
|
||||
const result = api.buildGroupRowHtml(p, 7);
|
||||
assert(result.includes('data-entry-idx="7"'));
|
||||
});
|
||||
|
||||
test('buildGroupRowHtml emits data-entry-idx on child rows', () => {
|
||||
const ctx2 = loadPacketsSandbox();
|
||||
const api2 = ctx2._packetsTestAPI;
|
||||
// Simulate expandedHashes having this hash
|
||||
// We can't easily toggle expandedHashes from outside, so test via the
|
||||
// fact that children only render when isExpanded is true.
|
||||
// For this test, just verify the header row has the attribute (child rows
|
||||
// are conditional on expandedHashes which we can't set from tests).
|
||||
const p = {
|
||||
hash: 'ei2', count: 3, latest: '2024-01-01T00:00:00Z',
|
||||
observer_id: null, raw_hex: 'aabb', payload_type: 0,
|
||||
route_type: 0, decoded_json: '{}', path_json: '[]',
|
||||
observation_count: 3, observer_count: 2,
|
||||
_children: []
|
||||
};
|
||||
const result = api2.buildGroupRowHtml(p, 15);
|
||||
assert(result.includes('data-entry-idx="15"'));
|
||||
});
|
||||
}
|
||||
|
||||
console.log('\n=== packets.js: page registration ===');
|
||||
@@ -757,6 +808,33 @@ console.log('\n=== packets.js: page registration ===');
|
||||
});
|
||||
}
|
||||
|
||||
console.log('\n=== packets.js: _invalidateRowCounts / _refreshRowCountsIfDirty (#410) ===');
|
||||
{
|
||||
const ctx = loadPacketsSandbox();
|
||||
const api = ctx._packetsTestAPI;
|
||||
|
||||
test('_invalidateRowCounts and _refreshRowCountsIfDirty are exported', () => {
|
||||
assert(typeof api._invalidateRowCounts === 'function');
|
||||
assert(typeof api._refreshRowCountsIfDirty === 'function');
|
||||
});
|
||||
|
||||
test('_invalidateRowCounts does not throw', () => {
|
||||
api._invalidateRowCounts();
|
||||
});
|
||||
|
||||
test('_refreshRowCountsIfDirty does not throw when no display packets', () => {
|
||||
api._invalidateRowCounts();
|
||||
api._refreshRowCountsIfDirty();
|
||||
});
|
||||
|
||||
test('_cumulativeRowOffsets returns valid offsets after invalidation cycle', () => {
|
||||
// Even with no display packets, should return valid array
|
||||
const offsets = api._cumulativeRowOffsets();
|
||||
assert(Array.isArray(offsets));
|
||||
assert(offsets[0] === 0);
|
||||
});
|
||||
}
|
||||
|
||||
// ===== SUMMARY =====
|
||||
console.log(`\n${'='.repeat(40)}`);
|
||||
console.log(`packets.js tests: ${passed} passed, ${failed} failed`);
|
||||
|
||||
Reference in New Issue
Block a user