Compare commits

...

271 Commits

Author SHA1 Message Date
Jade
45e4053883 fix: Don't break when encountering the server user, as there may be real users after 2025-11-10 23:56:02 +00:00
Jade Ellis
c0b617f4f1 feat(sentry): Include the commit hash in the release name 2025-11-10 16:57:24 +00:00
Jade Ellis
a28cfd284b chore(deps): Upgrade tracing / telemetry ecosystem
We no longer need the tracing patches, so I've removed those and
unpinned them in renovate.

otel's jaeger propagator is deprecated too, so it's replaced with the
builtin W3C TraceContext propagator
2025-11-10 16:42:28 +00:00
Jade Ellis
a5b9cb69bd fix(deps): Pin hyper-util back to the patched version 2025-11-10 15:56:09 +00:00
Renovate Bot
3c8f252a14 chore(deps): update opentelemetry-rust monorepo to 0.31.0 2025-11-10 05:03:16 +00:00
Jade
8a63818f31 feat: Enable sentry compilation feature 2025-11-10 01:33:50 +00:00
Renovate Bot
5b5e26e529 chore(deps): update dependency cargo-bins/cargo-binstall to v1.15.10 2025-11-09 19:05:26 +00:00
aviac
866769c054 chore: replace serde-yml with serde-saphyr
- serde-yml has an un-addressed [security issue][sec-issue]
- [saphyr][saphyr] is a pretty recent and active crate that deals with YAML parsing
- based on that, someone recently created [serde-saphyr][serde-saphyr]

---

The change was pretty straightforward and mostly "just a search and replace". The new crate has it's `Error` type split
into serialization and derserialization errors. Hence I created one Continuwuity-Error variant for each instead of just
having a single `Yaml` variant. This was already done previously with the `Toml` errors so I thought this would be
rather acceptable.

[sec-issue]: https://github.com/advisories/GHSA-gfxp-f68g-8x78
[saphyr]: https://github.com/saphyr-rs/saphyr
[serde-saphyr]: https://github.com/saphyr-rs/saphyr/issues/66#issuecomment-3353212289
2025-11-09 11:23:32 +01:00
Renovate Bot
2e3b71f5f1 chore(deps): update rust-patch-updates 2025-11-08 23:57:36 +00:00
Jade
1312d61141 revert f7867cf6ca
revert ci: Clean up old images
2025-11-08 23:56:02 +00:00
Jade Ellis
f7867cf6ca ci: Clean up old images 2025-11-08 23:29:25 +00:00
Jade Ellis
2ca6887a5d chore(ci): Fix merge error 2025-11-08 23:08:10 +00:00
Jade Ellis
368685f8cd ci: Re-run mirror script when files change 2025-11-08 23:00:37 +00:00
Jade Ellis
ad2d192b94 ci: Use PATs for github registry
https://stackoverflow.com/questions/76821352/how-can-you-authenticate-to-the-github-container-registry-using-a-github-app

thx github
2025-11-08 23:00:31 +00:00
Jade Ellis
3214e94cdb ci: Mirror to ghcr 2025-11-08 22:59:27 +00:00
timedout
37c537379d chore(ci): Add git.nexy7574.co.uk image mirror (#1149)
secrets were added to the org

Reviewed-on: https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1149
Co-authored-by: timedout <git@nexy7574.co.uk>
Co-committed-by: timedout <git@nexy7574.co.uk>
2025-11-08 22:56:16 +00:00
Jade Ellis
3c01c5f085 chore: Don't try to update patched deps automatically 2025-11-08 21:17:04 +00:00
Renovate Bot
4c552bb8ca chore(deps): update pre-commit hook pre-commit/pre-commit-hooks to v6 2025-11-08 20:56:00 +00:00
Jade Ellis
ce73d29855 chore: Fix typos 2025-11-08 20:54:49 +00:00
Renovate Bot
d6e314744b chore(deps): update pre-commit hook crate-ci/typos to v1.39.0 2025-11-08 14:34:13 +00:00
Jade
ec603188de ci: Enable pre-commit in renovate 2025-11-08 14:31:35 +00:00
timedout
fbf48addc7 fix(user_can): Fix room creators being unable to redact events in v12 rooms 2025-10-27 14:34:50 +00:00
nexy7574
cbf726580f fix: Kicks in !v12 are impossible 2025-10-27 14:34:50 +00:00
nexy7574
28f258fc8c fix: Incorrect interpretation of 5.5.4 2025-10-27 14:34:50 +00:00
nexy7574
8b3acfd770 fix: Inverted creatorship check 2025-10-27 14:34:50 +00:00
nexy7574
a581e8de01 fix: Don't check restricted join rules for invite joins 2025-10-27 14:34:50 +00:00
nexy7574
7c74db5e74 fix: Weird re-application of partially resolved state 2025-10-27 14:34:50 +00:00
nexy7574
b17b4235f3 fix: Unbans and kicks incorrectly checked creatorship in !v12 2025-10-27 14:34:50 +00:00
aviac
ec3564e8aa chore: use upstream rust-jemalloc-sys-unprefixed after flake.lock update 2025-10-27 12:55:21 +00:00
aviac
9a887ac04b chore: fix CI to make all checks green
- define a nix default package
- try to fix CI
- fix/improve (?) CI even more (??)
2025-10-27 12:55:21 +00:00
aviac
fed808a3c6 feat: add taplo.toml to check now that we have it 2025-10-27 12:55:21 +00:00
aviac
37983b33a2 feat: add treefmt 2025-10-27 12:55:21 +00:00
aviac
1b2224fac6 feat: add hydra jobs to build all packages 2025-10-27 12:55:21 +00:00
aviac
c1c165ab48 fix: apply rocksdb changes in checks and shll 2025-10-27 12:55:20 +00:00
aviac
68bea1816f feat(nix): flake-parts, first draft 2025-10-27 12:55:20 +00:00
Odd E. Ebbesen
cb7875e479 fix(#1134): Update docs and implementation of admin media delete-past-remote-media (#1136)
Reviewed-on: https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1136
Co-authored-by: Odd E. Ebbesen <git@oddware.net>
Co-committed-by: Odd E. Ebbesen <git@oddware.net>
2025-10-27 12:31:25 +00:00
Jade Ellis
910a3182f7 fix: Prevent crash on process exit on MacOS 2025-10-26 17:42:08 +00:00
Jade Ellis
05886f8dcb feat: Add option to control WAL compression
Also enables zstd compression by default
2025-10-26 17:30:42 +00:00
timedout
cff3c27729 fix: Bump ruwuma, export new route, config loading 2025-10-24 16:37:22 -04:00
Kierre
80be2ca22c Repair 2025-10-24 16:07:06 -04:00
Kierre
d133b6c0c3 feat: set MSC4373 values 2025-10-24 15:33:16 -04:00
Ginger
a3592bd3b7 feat: Make a few improvements to the systemd unit
- Use systemd's credential system to supply our config file
- Remove `ConfigurationDirectory` to prevent conflicts with package managers
- Set `config_reload_signal` to true using an envvar
2025-10-17 13:37:42 +00:00
Ginger
70e8e96302 fix: Use mode 600 for config files on Fedora because they contain secret info 2025-10-17 13:37:42 +00:00
timedout
6002edccd3 perf: Remove extraneous policy server check 2025-10-16 23:57:07 +01:00
timedout
d189004d65 feat: Add more granular controls for policy server calling (#1127)
Adds two new toggles to the configuration, the first of which allows disabling the policy server checks entirely, and the second of which allows disabling checking events created locally. They're both enabled by default for maximum PS efficacy but allowing them to be disabled allows people who frequently cannot contact policy servers, for example those in censored countries, to be able to still use rooms with pace, allows single-user/trusted-only homeservers to disable the preliminary check on their own events, and also gives an escape hatch in case an issue like #1060 happens again, especially with MSCs not in FCP being moving targets.

In future, I think we should gate all MSC implementations behind config flags, even if they default to on.

Reviewed-on: https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1127
Reviewed-by: Jade Ellis <jade@ellis.link>
Co-authored-by: timedout <git@nexy7574.co.uk>
Co-committed-by: timedout <git@nexy7574.co.uk>
2025-10-16 22:45:23 +00:00
timedout
26b700bf51 fix: Policy server calls use the correct JSON object (#1126)
Fixes #1060

Reviewed-on: https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1126
Reviewed-by: Jacob Taylor <aranjedeath@noreply.forgejo.ellis.link>
Co-authored-by: timedout <git@nexy7574.co.uk>
Co-committed-by: timedout <git@nexy7574.co.uk>
2025-10-16 21:06:54 +00:00
Renovate Bot
09f24745c3 chore(deps): lock file maintenance 2025-10-15 19:05:50 +00:00
Renovate Bot
7ffbbe6890 chore(deps): update https://github.com/actions/setup-node action to v6 2025-10-15 18:55:02 +00:00
Renovate Bot
ad94c112fe chore(deps): update rust-patch-updates 2025-10-15 17:55:58 +00:00
Jade
8c7cc68cbf fix(ci): Don't use shallow clone when we're comparing git history 2025-10-15 12:53:15 +00:00
Ginger
dc047b635f feat: Send notifications to systemd when a reload is triggered 2025-10-15 03:12:25 +00:00
Renovate Bot
cc4c2fed25 chore(deps): lock file maintenance 2025-10-13 12:05:52 +00:00
Renovate Bot
17e47ecd6d chore(deps): update github-actions-non-major 2025-10-13 11:27:22 +00:00
Jade
b1d5ff477b chore: Update renovate config
- Limit renovate updates to mondays
- Don't group lock updates
- Update checksums if possible
2025-10-13 11:26:26 +00:00
Renovate Bot
d6dc01ac2c chore(deps): update https://code.forgejo.org/actions/checkout action to v5 2025-10-13 10:41:20 +00:00
Jimmy Brush
77ebe0d02f fix(!714): Off-by-one in v5 sync
Simplified sliding sync specifies ranges to be inclusive while rust ranges are
exclusive.
2025-10-13 10:28:19 +00:00
Renovate Bot
81e3d4c905 chore(deps): update dependency cargo-bins/cargo-binstall to v1.15.7 2025-10-13 10:27:18 +00:00
nexy7574
cb8f36444c feat: Proactively read Content-Length to reject oversized uploads 2025-10-12 19:42:57 +00:00
nexy7574
799def70dc feat: Produce even more informative errors when saving media fails 2025-10-12 19:42:57 +00:00
nexy7574
20f741d0e5 feat: Produce a more informative error when uploading media fails 2025-10-12 19:42:57 +00:00
Renovate Bot
d38f4a24f2 chore(deps): update ghcr.io/renovatebot/renovate docker tag to v41.146.0 2025-10-11 05:03:03 +00:00
Renovate Bot
6604cc4df9 chore(deps): update ghcr.io/renovatebot/renovate docker tag to v41.144.1 2025-10-10 05:01:39 +00:00
Renovate Bot
89aa4d1eae chore(deps): update ghcr.io/renovatebot/renovate docker tag to v41.143.1 2025-10-09 05:03:56 +00:00
Renovate Bot
9231ea5114 chore(deps): update ghcr.io/renovatebot/renovate docker tag to v41.141.0 2025-10-08 05:01:41 +00:00
Renovate Bot
4a3c72338d chore(deps): update ghcr.io/renovatebot/renovate docker tag to v41.138.1 2025-10-07 05:02:54 +00:00
Renovate Bot
ab862f4383 chore(deps): update ghcr.io/renovatebot/renovate docker tag to v41.135.5 2025-10-06 05:01:26 +00:00
Renovate Bot
bd43be931a chore(deps): update ghcr.io/renovatebot/renovate docker tag to v41.135.4 2025-10-05 05:03:52 +00:00
Ginger
148240cbbb fix: Add missing ldap3 feature 2025-10-01 18:55:30 +00:00
Renovate Bot
2e9e42d9ae chore(deps): update rust crate ldap3 to 0.12.0 2025-10-01 18:55:30 +00:00
Renovate Bot
89fbda0d6e chore(deps): update ghcr.io/renovatebot/renovate docker tag to v41.132.5 2025-10-01 05:03:28 +00:00
Renovate Bot
c97eb5c889 chore(deps): update ghcr.io/renovatebot/renovate docker tag to v41.132.2 2025-09-30 05:01:26 +00:00
Ginger
366ec46b26 fix: Upload debs built on a schedule 2025-09-29 14:17:44 +00:00
ginger
62a98ebc71 fix: Upload RPMs built on a schedule 2025-09-29 14:17:44 +00:00
Renovate Bot
439c605efe chore(deps): update ghcr.io/renovatebot/renovate docker tag to v41.131.9 2025-09-29 05:03:13 +00:00
Renovate Bot
32df2f3487 chore(deps): update ghcr.io/renovatebot/renovate docker tag to v41.131.8 2025-09-28 05:03:46 +00:00
Renovate Bot
692da7ffc2 chore(deps): update dependency cargo-bins/cargo-binstall to v1.15.6 2025-09-27 16:17:44 +00:00
Renovate Bot
1082b24b1d chore(deps): update ghcr.io/renovatebot/renovate docker tag to v41.131.6 2025-09-27 05:03:28 +00:00
nexy7574
f45ceedb8a fix(upgrade): Potentially resolve CI clippy errors
I'm not convinced this isn't a rust bug itself,
but CI was complaining about lifetimes
and those complaints couldn't be reproduced locally,
so this should probably fix it maybe?
2025-09-26 18:47:49 +01:00
nexy7574
d614e43981 fix(stateres): Creators can always unban
Also basically rewrote all of the event auth logs to be more digestable
2025-09-26 18:47:49 +01:00
Renovate Bot
1e0e7a31aa chore(deps): update ghcr.io/renovatebot/renovate docker tag to v41.131.2 2025-09-26 05:02:43 +00:00
Renovate Bot
92fffe9c82 chore(deps): update ghcr.io/renovatebot/renovate docker tag to v41.130.1 2025-09-25 08:28:06 +00:00
Renovate Bot
11e51300a5 chore(deps): update github-actions-non-major 2025-09-25 08:16:34 +00:00
Jade Ellis
ef84e1bb02 fix(v12): Create tombstone event on room upgrade 2025-09-25 08:15:23 +00:00
nexy7574
1887d58df8 fix: V12 room upgrades 2025-09-25 08:15:23 +00:00
nexy7574
c66f6f8900 fix(stateres): Correctly fetch missing auth events for incoming PDUs 2025-09-25 02:54:00 +01:00
Ginger
902fe7b7ab fix: Fix panic in debug builds caused by MSC4133 migration 2025-09-24 16:45:11 -04:00
Renovate Bot
472e1fee17 chore(deps): update ghcr.io/renovatebot/renovate docker tag to v41.127.2 2025-09-24 05:03:46 +00:00
Jade Ellis
3c6f2d07e0 ci: Only run RPM builds on tags or schedule 2025-09-23 22:16:46 +01:00
ginger
43254aa396 fix: Attempt to enable RPM signing 2025-09-23 22:15:01 +01:00
Tom Foster
48ebf86335 feat(ci): Add Fedora RPM package build workflow
Build and publish RPM packages for Fedora using rpkg and official
rust-packaging macros. Packages are automatically signed by Forgejo's
built-in package registry (introduced in v9.0).

Publishes packages to organised groups:
- continuwuity (binary): base group (stable/dev/branch-name)
- continuwuity-debuginfo: GROUP-debug
- continuwuity (source RPM): GROUP-src

Workflow triggers on pushes to relevant paths and version tags (v*).
Tagged releases use clean version numbers (v1.2.3 becomes 1.2.3-1)
while branch builds use sanitised branch name versioning.

Uses dnf builddep to install build dependencies directly from the
generated SRPM, ensuring consistency between CI and spec file without
duplication. This also prevents hiding packaging issues that could
occur with --nodeps fallbacks.
2025-09-23 22:15:00 +01:00
Ginger
f1e3b4907e Build Debian packages and upload them to Forgejo's repository (#996)
This uses the existing `cargo-deb` metadata.

Reviewed-on: https://forgejo.ellis.link/continuwuation/continuwuity/pulls/996
Reviewed-by: Tom Foster <tom@tcpip.uk>
Reviewed-by: nex <nex@noreply.forgejo.ellis.link>
Co-authored-by: Ginger <ginger@gingershaped.computer>
Co-committed-by: Ginger <ginger@gingershaped.computer>
2025-09-23 19:53:37 +00:00
Jade Ellis
9346a0d05e fix(ci): Typo 2025-09-23 12:55:04 +01:00
nyanbinary
c99faae115 chore(nix): bump rocksdb version in flake.nix to 10.5.fb 2025-09-23 06:31:10 +00:00
Renovate Bot
a5aa68ee8d chore(deps): update ghcr.io/renovatebot/renovate docker tag to v41.125.2 2025-09-23 03:53:03 +00:00
Tom Foster
8959ac06ac ci: Split Rust build cache into dependencies and incremental caches
Replace single large build cache with separate dependencies and incremental
caches. Dependencies cache survives source code changes and uses tiered
restore keys. Removes build directory from caching to improve CI performance
while maintaining effective compilation caching with sccache.
2025-09-23 04:30:35 +01:00
Tom Foster
47f7ebfd68 fix: Use node_version in npm cache key for wrangler installation
Replace hashFiles('**/package-lock.json') with node_version since wrangler
is installed via npm without a lockfile to hash. Removes trailing dash from
cache keys and ensures npm dependencies are regenerated when Node.js version
changes.
2025-09-23 04:30:35 +01:00
Tom Foster
7d91f218b1 ci: Migrate to detect-versions with namespaced cache keys
Replace local detect-runner-os action with external detect-versions@v1 to
reduce custom action maintenance. Add architecture detection for future
cross-platform support and namespace all cache keys with "continuwuity-"
prefix to prevent collisions with other projects on shared runners.

Updates cache mount IDs in Dockerfiles to match the new namespacing
convention, ensuring consistent cache isolation across CI and Docker builds.
2025-09-23 04:30:22 +01:00
Jade Ellis
e5e2db37d9 ci: Run image release workflow on tag 2025-09-22 17:03:26 +01:00
Jade Ellis
e08ea3b9e5 ci: Trace commands to push docker manifests 2025-09-22 17:03:26 +01:00
Jade Ellis
4f1907abfa ci: Change tag generation to use suffix flavour 2025-09-22 17:03:26 +01:00
Ginger
92d74c293e feat: Advertise support for MSC4155 2025-09-22 11:33:45 -04:00
Renovate Bot
3fbdced0e1 chore(deps): update github-actions-non-major 2025-09-22 05:04:03 +00:00
nexy7574
b70470fa71 fix: Event filters all non-state events 2025-09-21 20:10:36 +01:00
nexy7574
703d6a2075 chore: Bump version to rc.8 2025-09-21 18:17:24 +01:00
Savyasachee Jha
5b75e21810 Update resolv-conf to upstream 0.7.5 2025-09-21 17:13:38 +00:00
Ginger
13b7538785 Add support for MSC4155 (#1013)
[rendered msc here](https://github.com/Johennes/matrix-spec-proposals/blob/johannes/invite-filtering/proposals/4155-invite-filtering.md). Closes #836.

Co-authored-by: nexy7574 <git@nexy7574.co.uk>
Reviewed-on: https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1013
Reviewed-by: nex <nex@noreply.forgejo.ellis.link>
Co-authored-by: Ginger <ginger@gingershaped.computer>
Co-committed-by: Ginger <ginger@gingershaped.computer>
2025-09-21 17:03:40 +00:00
Renovate Bot
9745bcba1c chore(deps): update ghcr.io/renovatebot/renovate docker tag to v41.121.4 2025-09-21 05:02:02 +00:00
nexy7574
c9c79fbea6 fix: Fix restricted join rules inconsistencies 2025-09-20 21:07:13 +00:00
nexy7574
92e9802340 style: Tidy up 1054 2025-09-20 21:07:00 +00:00
nexy7574
1d80b7ce0c fix: Don't perform local join when there's no remote servers 2025-09-20 21:07:00 +00:00
Jade
563b6d4b30 fix: Update debug assertion with new serde type location
Fixes !1052
2025-09-20 18:04:16 +00:00
Renovate Bot
e86fc6d9f8 chore(deps): update ghcr.io/renovatebot/renovate docker tag to v41.119.5 2025-09-20 05:03:27 +00:00
Renovate Bot
13adea6498 chore(deps): update ghcr.io/renovatebot/renovate docker tag to v41.118.1 2025-09-19 10:31:58 +00:00
Renovate Bot
17d0bb6cf6 chore(deps): update ghcr.io/renovatebot/renovate docker tag to v41.117.0 2025-09-18 21:06:35 +00:00
Renovate Bot
6dc5051fa6 chore(deps): update ghcr.io/renovatebot/renovate docker tag to v41.116.10 2025-09-18 19:26:39 +00:00
Renovate Bot
3034c03ad1 chore(deps): update ghcr.io/renovatebot/renovate docker tag to v41.116.8 2025-09-18 13:36:19 +00:00
Renovate Bot
fa6f549d39 chore(deps): lock file maintenance 2025-09-18 13:32:26 +00:00
Renovate Bot
999217b0f6 chore(deps): update dependency cargo-bins/cargo-binstall to v1.15.5 2025-09-18 13:31:48 +00:00
Renovate Bot
74fccff2cc chore(deps): update github-actions-non-major 2025-09-18 13:31:19 +00:00
Shuroii
7a56a2462c fix(ci): Use github env namespace as forgejo is still unsupported 2025-09-18 13:30:50 +00:00
Ginger
458811f241 fix: Fix nexy's very accurate and not-at-all busted fix to my fix 2025-09-17 20:04:50 -04:00
nexy7574
0672ce5b88 style: Fix clippy lint errors 2025-09-17 23:54:09 +01:00
Ginger
7f287c7880 fix: Use a database migration to fix corrupted us.cloke.msc4175.tz fields
(cherry picked from commit 4a893ce4cc81487bcf324dccefd8184ddef5b215)
2025-09-17 23:14:07 +01:00
Shuroii
9142978a15 fix: Fully qualify action
This fixes an issue where Forgejo tries to look for code.forgejo.org for the action despite it not being available.
2025-09-17 21:37:50 +00:00
Shuroii
a8eb9c47f8 feat(ci): Add a workflow to update flake hashes
This workflow is intended to be ran as dispatch whenever the rocksdb fork changes!
Other than that, it'll run on any toolchain changes (rust-toolchain.toml, Cargo.lock, Cargo.toml) and update the relevant hash accordingly.
2025-09-17 21:37:50 +00:00
nexy7574
9f18cf667a chore: Temporarily disable bad tests 2025-09-17 22:25:04 +01:00
nexy7574
7e4071c117 Implement room v12 (#943)
**Does not yet work!** Currently, state resolution does not correctly resolve conflicting states. Everything else appears to work as expected, so stateres will be fixed soon, then we should be clear for takeoff.

Also: a lot of things currently accept a nullable room ID that really just don't need to. This will need tidying up before merge. Some authentication checks have also been disabled temporarily but nothing important.

A lot of things are tagged with `TODO(hydra)`, those need resolving before merge. External contributors should PR to the `hydra/public` branch, *not* ` main`.

---

This PR should be squash merged.

Reviewed-on: https://forgejo.ellis.link/continuwuation/continuwuity/pulls/943
Co-authored-by: nexy7574 <git@nexy7574.co.uk>
Co-committed-by: nexy7574 <git@nexy7574.co.uk>
2025-09-17 20:46:03 +00:00
Renovate Bot
51423c9d7d chore(deps): update ghcr.io/renovatebot/renovate docker tag to v41.115.6 2025-09-17 05:03:46 +00:00
Ginger
a0b0ff9d5c fix: Remove legacy check for u. prefix 2025-09-16 11:30:39 +00:00
Ginger
8e27d74c4a fix: Slightly more parallelism 2025-09-16 11:30:39 +00:00
Ginger
d6b1055683 fix: Remove needless async marker 2025-09-16 11:30:39 +00:00
Ginger
c9117e6ee4 fix: Fix incorrect deserialization of MSC4133 profile fields 2025-09-16 11:30:39 +00:00
Ginger
e3415a500d chore: Code cleanup 2025-09-16 11:30:39 +00:00
Ginger
e6fd3c970b fix: Nuke explicit references to the MSC4175 tz profile field 2025-09-16 11:30:39 +00:00
Renovate Bot
6b7f35a8b8 chore(deps): update ghcr.io/renovatebot/renovate docker tag to v41.115.0 2025-09-16 05:01:56 +00:00
Tom Foster
a120a4fa95 fix: Handle runner cargo bin path migration in timelord action
Runner images have migrated from /usr/share/rust/.cargo/bin to standard
~/.cargo/bin location. Action now checks old location first and migrates
binaries if found, maintaining compatibility with both paths.

Bump cache key to v3 to ensure fresh binary cache after path changes.
2025-09-15 16:17:32 +01:00
Renovate Bot
f872210b20 chore(deps): update ghcr.io/renovatebot/renovate docker tag to v41.113.4 2025-09-15 05:01:40 +00:00
Renovate Bot
3dd04bd9df chore(deps): update ghcr.io/renovatebot/renovate docker tag to v41.113.2 2025-09-14 05:03:21 +00:00
Ginger
af45c348a4 fix: Properly deserialize changes to legacy fields made with MSC4133 endpoints 2025-09-14 01:28:08 +00:00
nexy7574
36dabecb82 chore(1014): Include MSC4155 in build features to resolve build errors 2025-09-14 00:53:43 +00:00
nexy7574
50cd1081ba chore(1014): Bump ruwuma 2025-09-14 00:53:43 +00:00
nexy7574
14df55e5c5 style(1014): Remove unnecessary commented code 2025-09-14 00:53:43 +00:00
nexy7574
d9d0d1a465 fix(!1014): Don't prematurely return during registration 2025-09-14 00:53:43 +00:00
Tom Foster
81b6b3547c fix: Resolve Forgejo runner v11 matrix job execution failure
Matrix jobs stopped starting after upgrading from runner v9 to v11 due to
changes in job dependency resolution. Remove redundant define-variables job
that computed static image paths and replace with IMAGE_PATH environment
variable.

Also fix timelord action binary caching for compatibility between different
runner images that install cargo binaries in different locations.
2025-09-13 17:12:09 +01:00
Renovate Bot
0bbc3c4e05 chore(deps): update ghcr.io/renovatebot/renovate docker tag to v41.112.0 2025-09-12 21:11:13 +00:00
Jade
0f09fa3d31 chore(renovate): Specify automerge strategy 2025-09-12 21:02:25 +00:00
Tom Foster
3d5355dfc3 chore(renovate): Add auto-merge for renovatebot and reorganise package rules
Enable automatic merging of ghcr.io/renovatebot/renovate docker image updates
to reduce manual maintenance overhead.

Reorganise package rules by manager type (cargo, github-actions, docker) and
add missing description for cargo concurrency limit rule to improve config
maintainability.
2025-09-12 17:50:08 +01:00
Renovate Bot
2547eb3a90 chore(deps): update ghcr.io/renovatebot/renovate docker tag to v41.109.0 2025-09-12 13:29:47 +00:00
Renovate Bot
51ba41823f chore(deps): update ghcr.io/renovatebot/renovate docker tag to v41.106.0 2025-09-12 13:23:28 +00:00
Tom Foster
542dff50bd ci: Split Docker builds into sequential release and max-perf stages
Separate fast release builds from slow max-perf builds to optimise runner
utilisation and provide quicker feedback. Release builds complete first with
standard optimisations, followed by Haswell-optimised dragrace builds once
the safe builds pass successfully.

Extract build logic into focused composite actions for better log visibility
in Forgejo UI. Split monolithic build action into prepare-docker-build,
inline docker build step, and upload-docker-artifacts to ensure each phase
completes independently and shows logs immediately.

Creates separate manifests at each stage to avoid waiting for all builds
before publishing.
2025-09-12 12:43:19 +01:00
Tom Foster
9c147b182f ci: Fix BuildKit cache invalidation and add Haswell-optimised builds
The workflow was rebuilding dependencies unnecessarily despite timelord
restoring timestamps because TARGET_CPU and RUST_PROFILE weren't passed
to Docker, creating inconsistent cache keys. Now passes both arguments
for proper cache reuse.

Adds Haswell-optimised builds alongside baseline builds using -march=haswell
for PCLMUL instruction support. Recent build improvements reducing compile
times from 15-20 minutes to ~5 minutes make this additional CPU variant
feasible. Users can pull optimised images with -haswell suffix.
2025-09-11 13:59:43 +01:00
Renovate Bot
7e76ca45c1 chore(deps): lock file maintenance 2025-09-11 12:28:11 +00:00
Tom Foster
5126cb4554 fix: Use forgejo/upload-artifact@v4 for artifact consistency
Follow-on to correct #1009. The previous fix downgraded upload-artifact
to v3 but kept download-artifact@v4, creating incompatible storage
formats that prevented artifact pattern filtering from working.

Update all upload-artifact actions to v4 and adjust renovate
configuration to disable automatic updates for forgejo artifact
actions to maintain version consistency.
2025-09-11 11:57:04 +01:00
Renovate Bot
4d05d0f677 chore(deps): update ghcr.io/renovatebot/renovate docker tag to v41.99.9 2025-09-11 09:56:48 +00:00
Tom Foster
0673ac1a6c fix: Fix artifact action compatibility and add digest debugging
Resolve upload-artifact v4 GHES compatibility errors by downgrading to v3.
Switch to standard forgejo/download-artifact@v4 for pattern filtering support.
Update renovate configuration to prevent future incompatible upgrades.

Add diagnostic output to digest export step to troubleshoot zero-byte
artifact uploads preventing manifest creation. Include CI triggers for
Element workflow to test changes in pull requests.
2025-09-11 10:44:11 +01:00
Jade Ellis
ad11417145 chore(deps): Replace serde_yaml with serde_yml 2025-09-10 20:20:45 +01:00
Renovate Bot
0de904ffe4 chore(deps): update rust crate const-str to 0.7.0 2025-09-10 18:05:00 +00:00
Renovate Bot
d74b9de221 chore(deps): update dependency cargo-bins/cargo-binstall to v1.15.4 2025-09-10 17:44:44 +00:00
Renovate Bot
e7ac5988cb chore(deps): update https://github.com/actions/setup-node action to v5 2025-09-10 17:06:45 +00:00
Jade Ellis
571f05017c chore: Update resolv git hash 2025-09-10 17:50:37 +01:00
Jade Ellis
a339e73eb5 chore: Unify actions versions 2025-09-10 17:39:25 +01:00
Jade Ellis
72b78ed6d4 chore: Fix nightly-only clippy lints 2025-09-10 17:35:17 +01:00
nexy7574
baa89586e2 fix(MSC4277): Undo refuted response changes 2025-09-10 16:25:06 +00:00
nexy7574
7ad8ff2e45 style(MSC4277): Run lints to satisfy checks 2025-09-10 16:25:06 +00:00
nexy7574
2046b1e2f6 feat(MSC4277): Unify reporting endpoint behaviours
* reporting rooms now always returns 200 OK
* reporting an event returns OK if we don't know about the reported event
* removed the score parameter (needs a followup ruwuma update)
2025-09-10 16:25:06 +00:00
Renovate Bot
2cb980cd4c chore(deps): update ghcr.io/renovatebot/renovate docker tag to v41.99.7 2025-09-10 16:16:34 +00:00
Jade Ellis
27e0ef7b2e chore: Update renovate CI
- Fixes some issues with the action - Enables OSV vuln scanning -
Enables updating the dockerfile tool versions
2025-09-10 16:53:59 +01:00
Jade Ellis
7091882887 chore: Update cargo lockfile 2025-09-10 16:47:20 +01:00
Jade Ellis
a81546374d ci: Make timelord docker work locally 2025-09-10 16:40:55 +01:00
Tom Foster
7950e2cc7f ci: Refactor timelord action to use git-warp-time fallback
Updates the timelord action to fall back to git-warp-time when the cache
is completely empty, enabling timestamp restoration even on fresh builds.
When git-warp-time is used, performs an unshallow fetch to get full history,
while subsequent runs use normal fetches. Simplifies the interface by making
inputs optional with sensible defaults.

Adds binary caching for timelord-cli and git-warp-time tools to avoid
repeated installations, and updates paths to use /usr/share/rust/.cargo/bin/
for the catthehacker runner image used by the dind profile (may need updating
if/when switching to standard image).

The main timelord restore now happens inside the Dockerfile itself, as Docker
intentionally wipes all file mtimes on COPY/ADD operations.
2025-09-08 08:34:29 +00:00
Renovate Bot
8f186cd770 chore(deps): update https://github.com/renovatebot/github-action action to v43.0.11 2025-09-08 05:02:33 +00:00
Ginger
5d3e10a048 fix: Make RA use the full feature 2025-09-07 18:07:03 -04:00
Ginger
1e541875ad fix: Nuke src/api/client/utils.rs 2025-09-07 18:06:11 -04:00
nexy7574
90fd92977e style: Run clippy 2025-09-07 21:20:26 +00:00
Ginger
e27ef7f5ec feat: Do not persist remote PDUs fetched with admin commands 2025-09-07 21:20:26 +00:00
Ginger
16f4efa708 fix: Fix pagination tokens being corrupted for backfilled PDUs 2025-09-07 21:20:26 +00:00
Ginger
e38dec5864 fix: Put the output of !admin query room-timeline pdus in a codeblock 2025-09-07 21:20:26 +00:00
Ginger
f3824ffc3d fix: Use handle_incoming_pdu directly to keep remote PDUs as outliers 2025-09-07 21:20:26 +00:00
nexy7574
e3fbf7a143 feat: Ask remote servers for individual unknown events 2025-09-07 21:20:26 +00:00
nexy7574
09de586dc7 feat(PR977): Log more things in the join process 2025-09-07 22:01:07 +01:00
nexy7574
d1fff1d09f perf(pr977): Remove redundant ACL check in send_join 2025-09-07 22:01:07 +01:00
nexy7574
f47474d12a fix(PR977): Adjust some log levels 2025-09-07 22:01:07 +01:00
nexy7574
53da294e53 fix(PR977): Omitting redundant entries from the auth_chain caused problems 2025-09-07 22:01:07 +01:00
nexy7574
2cdccbf2fe feat(PR977): Support omitting members in the send_join response 2025-09-07 22:01:07 +01:00
Tom Foster
6cf3c839e4 ci(release-image): Skip digest upload when not pushing images
After #992, builds without registry credentials skip Docker image output
but still extract binary artifacts. However, we were still trying to
upload digests for images that weren't created. Add conditional check
to only upload digests when actually pushing to registry.
2025-09-07 21:27:56 +01:00
Tom Foster
4a1091dd06 ci(release-image): Unify binary extraction using BuildKit local output
Fork PRs currently fail binary extraction with 'invalid reference format'
and 'must specify at least one container source' errors. This replaces the
registry-specific docker create/copy method with BuildKit's local output
feature for all builds.

Uses multiple outputs in single build: image export plus local binary
extraction from /sbin. Speeds up extracting binary artifacts and saves a
couple of extra workflow steps in the process.
2025-09-07 20:46:11 +01:00
Tom Foster
1e9701f379 ci(release-image): Skip setup steps when using persistent BuildKit
When BUILDKIT_ENDPOINT is set, builds run on a persistent BuildKit instance,
making runner setup steps unnecessary. Skip Rust toolchain installation,
QEMU setup, caching steps, and timelord to eliminate ~7 operations per job.

Also adds output to git SHA and timestamp steps for visibility.

Cuts at least a minute off average build time through fewer installs,
cache restores, and cache saves.
2025-09-07 18:59:05 +01:00
Tom Foster
2cedf0d2e1 fix(ci): Use image output instead of docker for fork PRs
Docker exporter doesn't support manifest lists (multi-platform builds).
For fork PRs without registry credentials, use 'type=image,push=false'
instead of 'type=docker' to build multi-platform images locally without pushing.
2025-09-07 18:32:38 +01:00
Tom Foster
84fdcd326a fix(ci): Resolve registry push failures for fork PRs
Fork PRs now fail during Docker image build with 'tag is needed when
pushing to registry' because BUILTIN_REGISTRY_ENABLED evaluates to false
without proper credentials, leaving the images list empty. This appears
to be due to recent Forgejo permission changes affecting fork access to
repository secrets.

Add fallback to official registry when credentials unavailable, skip
registry login and push operations for forks, and make merge job
conditional since no digests exist without push. This allows forks to
test Docker builds whilst avoiding authentication failures.
2025-09-07 17:39:18 +01:00
Tom Foster
d640853f9d ci(docs): Optimise build performance with caching and conditional Node.js
Skip installing Node.js entirely if v20+ is already available, otherwise
install v22. Add npm dependency caching with OS-specific cache keys using
the custom detect-runner-os action for proper cache isolation between
runners. Dependencies normally take just under 10s, so this should more
than halve the doc build time to free up runner slots.
2025-09-07 14:51:10 +01:00
Tom Foster
fff9629b0f fix(docker): Resolve liburing.so.2 loading error for non-root users
Container failed to start when running as non-root (user 1000:1000) because
copied directories had restrictive 770 permissions, likely due to different
umask in persistent BuildKit. Non-root users couldn't access /usr/lib to
load required dynamic libraries.

Introduces prepper stage using Ubuntu to organize files into layered structure
with explicit 755 directory permissions before copying to scratch image.
Also fixes workflow syntax error and removes docker/** from paths-ignore to
ensure Docker changes trigger CI builds.
2025-09-07 14:13:14 +01:00
Tom Foster
1a3107c20a fix(ci): Replace Mozilla sccache action with token-free alternative
Replace mozilla-actions/sccache-action with a custom Forgejo-specific
implementation that eliminates GitHub token dependencies and rate limiting
issues for all contributors regardless of repository permissions.

The new action mirrors sccache binaries to the Forgejo package registry
and queries that instead of GitHub releases, maintaining identical functionality
including hostedtoolcache support.
2025-09-07 09:29:32 +01:00
aviac
969d7cbb66 feat(nix): remove rocksdb from flake.nix inputs
Consuming this flake is pretty annoying since the rocksdb input is
fetched on every build which takes ~ 10 - 20 sec. By removing it and
replacing it with a `pkgs.fetchFromGitea`, we create an intermediate
derivation which is better for caching reasons.
2025-09-06 17:40:31 +00:00
Jade Ellis
cd238b05de fix: Remove bad colon in workflow 2025-09-06 16:21:21 +01:00
Jade Ellis
c0e3829fed feat: Replace Jaeger with OTLP 2025-09-06 16:19:56 +01:00
Jade Ellis
1d7dda6cf5 chore: Upgrade ctor, cbor 2025-09-06 16:19:56 +01:00
Jade Ellis
6f19931c5b chore(deps): Upgrade minor incompatible dependencies 2025-09-06 16:19:56 +01:00
Tom Foster
2516e783ba ci: Support optional persistent BuildKit endpoints in Docker builds
Allows us to use runners with persistent BuildKit containers for improved
caching and faster build times. Falls back to standard docker-container
driver when BUILDKIT_ENDPOINT environment variable is not set.
2025-09-06 16:05:51 +01:00
Jade Ellis
fdf5771387 ci: Fix CI not triggering on external pull requests 2025-09-06 15:21:39 +01:00
Ginger
58bbc0e676 fix: Move packaging files from dist/ to pkg/ 2025-09-06 14:03:57 +00:00
Ginger
0d58e660a2 fix: Remove unnecessary user and directory modifications
systemd creates a dynamic user for
continuwuity and manages directories for
it automatically, so the debian postinst
script no longer needs to do that.
2025-09-06 14:03:57 +00:00
Ginger
e7124edb73 fix: Update debian systemd unit path 2025-09-06 14:03:57 +00:00
Ginger
d19e0f0d97 feat: Move packaging scripts into dist/ and consolidate the service files 2025-09-06 14:03:57 +00:00
nex
467aed3028 chore: Add Ginger's GH noreply email to mailmap 2025-09-02 16:36:56 +00:00
Ginger
99b44bbf09 Update conduwuit-example.toml 2025-09-01 17:50:09 +00:00
Ginger
95aeff8cdc Set the DB path as an env var in systemd service files to prevent footgunning 2025-09-01 17:50:09 +00:00
nexy7574
9e62e66ae4 chore(PR956): Update admin docs 2025-09-01 11:27:58 +00:00
nexy7574
76b93e252d feat: Only inject vias when manual ones aren't provided during join 2025-09-01 11:27:58 +00:00
nexy7574
66d479e2eb fix: Make remote leave helper a public fn 2025-09-01 11:27:58 +00:00
nexy7574
241371463e feat: Force leave remote rooms admin command 2025-09-01 11:27:58 +00:00
nexy7574
d970df5fd2 perf(MSC4323): Parallelise some check futs 2025-09-01 12:13:37 +01:00
nexy7574
4e644961f3 perf(MSC4323): Remove redundant authorisation checks 2025-09-01 12:13:37 +01:00
nexy7574
35cf9af5c8 feat(MSC4323): Add versions flag 2025-09-01 12:13:37 +01:00
nexy7574
04e796176a style(MSC4323): Satisfy our linting overlords 2025-09-01 12:13:37 +01:00
nexy7574
9783940105 feat(MSC4323): Advertise suspension support in capabilities 2025-09-01 12:13:37 +01:00
nexy7574
1e430f9470 feat(MSC4323): Implement agnostic suspension endpoint 2025-09-01 12:13:37 +01:00
Renovate Bot
5cce024841 chore(deps): update https://github.com/reproducible-containers/buildkit-cache-dance action to v3.3.0 2025-08-31 00:44:28 +00:00
Jade Ellis
e87c461b8d feat: Cache renovate data, RO GitHub token 2025-08-31 01:37:50 +01:00
Jade Ellis
b934898f51 chore: Update renovate config, limit cargo updates 2025-08-31 00:25:41 +01:00
nexy7574
83e3de55a4 fix(sync/v2): Room leaves being omitted incorrectly
Partially borrowed from 85a84f93c7
2025-08-30 16:18:46 +01:00
Tom Foster
609e239436 fix(fedora): Correct linting issues in RPM spec file
The Fedora RPM packaging files added in PR #950 weren't passing pre-commit
checks, causing CI failures for any branches rebased after that merge. This
applies prek linting fixes (typo correction, trailing whitespace removal,
and EOF newline) to ensure CI passes for all contributors.
2025-08-30 16:10:41 +01:00
Ginger
34417c96ae Update URL to point at the landing page 2025-08-28 21:10:46 +00:00
Ginger
f33f281edb Update long description to match deb package 2025-08-28 21:10:46 +00:00
Ginger
ddbca59193 Add spec and service files for creating an RPM package 2025-08-28 21:10:46 +00:00
Tom Foster
b5a2e49ae4 fix: Resolve Clippy CI failures from elided lifetime warnings
The latest Rust nightly compiler (2025-08-27) introduced the
elided-named-lifetimes lint which causes Clippy CI checks to fail
when an elided lifetime ('_) resolves to a named lifetime that's
already in scope.

This commit fixes the Clippy warnings by:
- Making lifetime relationships explicit where 'a is already in scope
- Keeping elided lifetimes ('_) in functions without explicit
  lifetime parameters
- Ensuring proper lifetime handling in the database pool module

Affected files (17 total):
- Database map modules: Handle, Key, and KeyVal references in get,
  qry, keys, and stream operations
- Database pool module: into_recv_seek function

This change resolves the CI build failures without changing any
functionality, ensuring the codebase remains compatible with the
latest nightly Clippy checks.
2025-08-28 21:13:19 +01:00
Jade Ellis
37248a4f68 chore: Add reasons for test skips 2025-08-28 20:10:05 +01:00
Tom Foster
dd22325ea2 refactor(ci): Consolidate Rust checks with optimised toolchain setup
Merge rust-checks.yml into prek-checks.yml for a unified workflow that
runs formatting and clippy/test checks in parallel jobs.

Add reusable composite actions:
- setup-rust: Smart Rust toolchain management with caching
  * Uses cargo-binstall for pre-built binary downloads
  * Integrates Mozilla sccache-action for compilation caching
  * Workspace-relative paths for better cache control
  * GitHub token support for improved rate limits
- setup-llvm-with-apt: LLVM installation with native dependencies
- detect-runner-os: Consistent OS detection for cache keys

Key improvements:
- Install prek via cargo-binstall --git (crates.io outdated at v0.0.1)
- Download timelord-cli from cargo-quickinstall
- Set BINSTALL_MAXIMUM_RESOLUTION_TIMEOUT=10 to avoid rate limit delays
- Default Rust version 1.87.0 with override support
- Remove redundant sccache stats (handled by Mozilla action)

Significantly reduces CI runtime through binary downloads instead of
compilation while maintaining all existing quality checks.
2025-08-28 19:20:14 +01:00
nex
30a56d5cb9 Update renovate.json 2025-08-28 17:15:32 +00:00
nexy7574
3183210459 fix: Post-merge compile issues 2025-08-23 21:28:31 +01:00
RatCornu
57d7743037 feat: add ldap_only config option 2025-08-23 19:59:36 +00:00
Jade Ellis
cb09bfa4e7 fix: Correctly pass ldap feature from the default crate 2025-08-23 19:59:36 +00:00
Jade Ellis
0ed691edef fix: Make builds without LDAP work correctly 2025-08-23 19:59:36 +00:00
Jade Ellis
c58b9f05ed chore: Fix default attributes for config 2025-08-23 19:59:36 +00:00
RatCornu
fb7e739b72 chore: remove unused LDAP mail attribute 2025-08-23 19:59:36 +00:00
RatCornu
c7adbae03f feat: ldap login 2025-08-23 19:59:36 +00:00
Jade Ellis
8b35de6a43 chore: Fix clippy lints with minimal diff 2025-08-22 00:51:54 +01:00
aviac
d191494f18 chore(nix): update fenix input
This is required, since now we're installing `rustfmt` from the latest
state of the fenix repo. This wasn't recent enough for the latest rust
version. The input was locked at (2025-07-02). Now it's up to date.
2025-08-22 00:37:16 +01:00
aviac
6d1f12b22d chore(nix): make rustfmt-nightly available to default dev shell
I verified this by running `rustfmt --version` on my system. Note that I
don't have a system-wide install of rust and only rely on dev shells, so
this can't possibly come from somewhere else.

```
$ rustfmt --version
rustfmt 1.8.0-nightly (6677875279 2025-07-02)
```
2025-08-22 00:37:16 +01:00
aviac
ca3ee9224b chore(rust): drop rustfmt from rust-toolchain.toml
This just installs regular rustfmt, which is not needed in this project.
One could say "It doesn't hurt", but in the NixOS dev shell it actually
does since it will shadow nightly rustfmt and we don't have the
`cargo +nightly fmt` synatx on NixOS that is available on other Distros.

Also "It doesn't hurt" to delete it for non NixOS users.
2025-08-22 00:37:16 +01:00
aviac
427b973b67 chore(rust): bump version 1.87 -> 1.89
- bump version in rust-toolchain.toml
- update sha in flake.nix
2025-08-22 00:32:04 +01:00
Tom Foster
aacaf5a2a0 fix(ci): Downgrade setup-uv action from v6 to v5
The setup-uv@v6 action has deprecated Node 18 support mid-version by
using the File API, causing workflow failures. Temporarily downgrading
to v5 until we migrate to a better runner image with Node 20+ support.
2025-08-21 21:10:15 +01:00
aviac
256bed992e chore(nix): exec 'use flake' with direnv on NixOS systems 2025-08-21 13:40:11 +02:00
aviac
ecb87ccd1c chore(nix): bump rocksdb version in flake.nix to 10.4.fb
This works without any further changes. Multiple people in the matrix
room (including myself) have reported that the built executable runs
fine with this. Nevertheless, there might be room for improvements (in
future commits)
2025-08-21 13:39:36 +02:00
Tom Foster
14a4b24fc5 fix(ci): Configure Renovate for Forgejo platform
- Set platform to 'forgejo' with proper API endpoint
- Use environment variables for all Renovate configuration
- Add git timeout and disable GitHub token warnings
- Move PR limit configuration to workflow
2025-08-17 17:37:24 +01:00
Tom Foster
731761f0fc Merge branch 'main' into tom/prek-was-prefligit 2025-08-17 15:08:44 +00:00
Tom Foster
4524a00fc6 chore(ci): Remove obsolete prefligit action
Now using prek directly via uvx, this custom action is no longer needed.
2025-08-17 16:00:42 +01:00
Tom Foster
9db750e97c fix(ci): Add full GitHub URL to renovate action
Forgejo's runner doesn't automatically assume actions are on github.com,
so we need to specify the full URL.
2025-08-17 15:51:29 +01:00
Tom Foster
b14a4d470b Merge branch 'main' into tom/prek-was-prefligit 2025-08-17 14:16:35 +00:00
Tom Foster
5d1f141882 ci: Rename prefligit-checks.yml to prek-checks.yml
Rename workflow file to match the updated tool name.
2025-08-17 15:13:02 +01:00
Tom Foster
b447cfff56 ci: Update prefligit to prek
The prefligit project has been renamed to prek due to typosquatting
concerns. This updates our CI to use the new name and recommended
installation method via uv, which significantly reduces setup time
compared to cargo install and includes automatic caching.

- Replace outdated static prefligit action with direct prek invocation
- Use uv as recommended by upstream: https://github.com/j178/prek
- Update check-byte-order-marker to fix-byte-order-marker (deprecated)
- Simplify workflow by removing unused ref calculations

The same .pre-commit-config.yaml works unchanged. Developers can
install locally with 'uvx prek install' or other methods from the repo.
2025-08-17 15:11:38 +01:00
Tom Foster
283888e788 Merge branch 'main' into tom/renovate 2025-08-17 13:27:33 +00:00
Tom Foster
f54e59a068 ci: Add Renovate for automated dependency management
Configures Renovate bot to create PRs for outdated dependencies.
Runs daily at 5am UTC with manual trigger via workflow_dispatch.

Configuration:
- Ignores custom forks (jemalloc, telemetry packages)
- Groups: GHA minor/patch, Rust toolchain, lockfile, Rust patches
- Limits: 3 concurrent PRs, 2 PRs per hour
- Supports: Cargo, GitHub Actions, Nix
2025-08-17 14:20:20 +01:00
Tom Foster
2a183cc5a4 fix(build): Remove hardened_malloc from full feature set
The hardened_malloc feature conflicts with jemalloc, preventing successful
builds with the --features full flag. Commenting out hardened_malloc allows
the full profile to build correctly while maintaining all other features.
2025-08-17 13:44:32 +01:00
nexy7574
54acd07555 fix: Drop fake room v2 support 2025-08-16 16:22:24 +01:00
Tom Foster
583cb924f1 refactor: address code review feedback for auth and pagination improvements
- Extract duplicated thread/message pagination functions to shared utils module
- Refactor pagination token parsing to use Option combinators instead of defaults
- Split access token generation from assignment for clearer error handling
- Add appservice token collision detection at startup and registration
- Allow appservice re-registration with same token (for config updates)
- Simplify thread relation chunk building using iterator chaining
- Fix saturating_inc edge case in relation queries with explicit filtering
- Add concise comments explaining non-obvious behaviour choices
2025-08-12 05:29:41 +01:00
Tom Foster
9286838d23 fix(relations): improve thread pagination and include root event
Replace unreliable PduCount pagination tokens with ShortEventId throughout
the relations and messages endpoints. ShortEventId provides stable, unique
identifiers that persist across server restarts and database operations.

Key improvements:
- Add token parsing helpers that try ShortEventId first, fall back to
  PduCount for backwards compatibility
- Include thread root event when paginating backwards to thread start
- Fix off-by-one error in get_relations that was returning the starting
  event in results
- Only return next_batch/prev_batch tokens when more events are available,
  preventing clients from making unnecessary requests at thread boundaries
- Ensure consistent token format between /relations, /messages, and /sync
  endpoints for interoperability

This fixes duplicate events when scrolling at thread boundaries and ensures
the thread root message is visible when viewing a thread, matching expected
client behaviour.
2025-08-10 19:12:56 +01:00
Tom Foster
d1ebcfaf0b fix(auth): prevent token collisions and optimise lookups
Ensures access tokens are unique across both user and appservice tables to
prevent authentication ambiguity and potential security issues.

Changes:
- On startup, automatically logout any user devices using tokens that
  conflict with appservice tokens (resolves in favour of appservices)
  and log a warning with affected user/device details
- When creating new user tokens, check for conflicts with appservice tokens
  and generate a new token if a collision would occur
- When registering new appservices, reject registration if the token is
  already in use by a user device
- Use futures::select_ok to race token lookups concurrently for better
  performance (adapted from tuwunel commit 066097a8)

This fix-forward approach resolves existing token collisions on startup
whilst preventing new ones from being created, without breaking existing
valid authentications.

The find_token optimisation is adapted from tuwunel (matrix-construct/tuwunel)
commit 066097a8: "Optimize user and appservice token queries" by Jason Volk.
2025-08-10 17:10:06 +01:00
Tom Foster
e820551f62 fix(appservice): create sender_localpart user during appservice startup
Fixes #813: Application services were unable to work because their sender_localpart
user was never created in the database, preventing authentication.

This fix ensures the appservice user account is created when:
- The server starts up and loads existing appservices from the database
- A new appservice is registered via the admin command

Additionally, if an appservice user has been accidentally deactivated, it will be
automatically reactivated when the appservice starts.

The solution centralises all appservice startup logic into a single `start_appservice`
helper method, eliminating code duplication between the registration and startup paths.
2025-08-10 17:10:06 +01:00
Yonatan Sidler
bd3db65cb2 fix(arch): fix config.toml not being loaded from LoadCredentials directory 2025-08-06 20:01:36 +03:00
nexy7574
e4a43b1a5b fix(policy-server): Call the PS later in the PDU creation process
This avoids accidentally sending partially built PDUs to the policy server,
which may cause issues with some implementations
2025-08-02 00:19:33 +01:00
Jade Ellis
5775e0ad9d docs: Make traefik router names consistent 2025-07-30 19:55:48 +01:00
Jade Ellis
238cc627e3 docs: Set traefik labels 2025-07-30 19:33:53 +01:00
Jade Ellis
b1516209c4 chore: Update funding file 2025-07-30 19:23:38 +01:00
Jade Ellis
0589884109 docs: Fix documentation link in README
Closes https://forgejo.ellis.link/continuwuation/continuwuity/issues/913
2025-07-28 19:28:34 +01:00
Jade Ellis
4a83df5b57 chore: Fix link 2025-07-25 17:35:18 +01:00
Jade Ellis
aa08edc55f chore: Release announcement 2025-07-25 17:30:31 +01:00
Jade Ellis
00c7e220bb chore: Release 2025-07-25 14:10:06 +01:00
Jade Ellis
87be4d1a52 feat: Almost-functional musl builds on Alpine
Lots of fiddling, still can't get stuff to work

Next step is a debian builder copying the static libs from alpine
2025-07-24 23:22:27 +01:00
Jade Ellis
205506f206 chore: Update deps 2025-07-24 22:18:10 +01:00
Jade Ellis
66181c61af chore: Update rocksdb, feature flag changes
Most of the way to static musl builds, just zlib I think
2025-07-24 21:51:52 +01:00
Jade Ellis
b7a0442298 feat: Musl images in docker
Not working at the moment, need to upgrade the rust-rocksdb and possibly
zstd to stop them force-enabling dynamic libclang
2025-07-24 19:00:41 +01:00
227 changed files with 8533 additions and 4366 deletions

View File

@@ -26,3 +26,7 @@ max_line_length = 98
[*.yml]
indent_size = 2
indent_style = space
[*.json]
indent_size = 4
indent_style = space

4
.envrc
View File

@@ -2,6 +2,8 @@
dotenv_if_exists
# use flake ".#${DIRENV_DEVSHELL:-default}"
if [ -f /etc/os-release ] && grep -q '^ID=nixos' /etc/os-release; then
use flake ".#${DIRENV_DEVSHELL:-default}"
fi
PATH_add bin

View File

@@ -0,0 +1,108 @@
name: create-manifest
description: |
Create and push a multi-platform Docker manifest from individual platform digests.
Handles downloading digests, creating manifest lists, and pushing to registry.
inputs:
digest_pattern:
description: Glob pattern to match digest artifacts (e.g. "digests-linux-{amd64,arm64}")
required: true
tag_suffix:
description: Suffix to add to all Docker tags (e.g. "-maxperf")
required: false
default: ""
images:
description: Container registry images (newline-separated)
required: true
registry_user:
description: Registry username for authentication
required: false
registry_password:
description: Registry password for authentication
required: false
outputs:
version:
description: The version tag created for the manifest
value: ${{ steps.meta.outputs.version }}
tags:
description: All tags created for the manifest
value: ${{ steps.meta.outputs.tags }}
runs:
using: composite
steps:
- name: Download digests
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
uses: forgejo/download-artifact@v4
with:
path: /tmp/digests
pattern: ${{ inputs.digest_pattern }}
merge-multiple: true
- name: Login to builtin registry
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
uses: docker/login-action@v3
with:
registry: ${{ env.BUILTIN_REGISTRY }}
username: ${{ inputs.registry_user }}
password: ${{ inputs.registry_password }}
- name: Set up Docker Buildx
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
uses: docker/setup-buildx-action@v3
with:
# Use persistent BuildKit if BUILDKIT_ENDPOINT is set (e.g. tcp://buildkit:8125)
driver: ${{ env.BUILDKIT_ENDPOINT != '' && 'remote' || 'docker-container' }}
endpoint: ${{ env.BUILDKIT_ENDPOINT || '' }}
- name: Extract metadata (tags) for Docker
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
id: meta
uses: docker/metadata-action@v5
with:
flavor: |
suffix=${{ inputs.tag_suffix }},onlatest=true
tags: |
type=semver,pattern={{version}},prefix=v
type=semver,pattern={{major}}.{{minor}},enable=${{ !startsWith(github.ref, 'refs/tags/v0.0.') }},prefix=v
type=semver,pattern={{major}},enable=${{ !startsWith(github.ref, 'refs/tags/v0.') }},prefix=v
type=ref,event=branch,prefix=${{ format('refs/heads/{0}', github.event.repository.default_branch) != github.ref && 'branch-' || '' }},
type=ref,event=pr
type=sha,format=short
type=raw,value=latest${{ inputs.tag_suffix }},enable=${{ startsWith(github.ref, 'refs/tags/v') }},priority=1100
images: ${{ inputs.images }}
# default labels & annotations: https://github.com/docker/metadata-action/blob/master/src/meta.ts#L509
env:
DOCKER_METADATA_ANNOTATIONS_LEVELS: index
- name: Create manifest list and push
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
working-directory: /tmp/digests
shell: bash
env:
IMAGES: ${{ inputs.images }}
run: |
set -o xtrace
IFS=$'\n'
IMAGES_LIST=($IMAGES)
ANNOTATIONS_LIST=($DOCKER_METADATA_OUTPUT_ANNOTATIONS)
TAGS_LIST=($DOCKER_METADATA_OUTPUT_TAGS)
for REPO in "${IMAGES_LIST[@]}"; do
docker buildx imagetools create \
$(for tag in "${TAGS_LIST[@]}"; do echo "--tag"; echo "$tag"; done) \
$(for annotation in "${ANNOTATIONS_LIST[@]}"; do echo "--annotation"; echo "$annotation"; done) \
$(for reference in *; do printf "$REPO@sha256:%s\n" $reference; done)
done
- name: Inspect image
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
shell: bash
env:
IMAGES: ${{ inputs.images }}
run: |
set -o xtrace
IMAGES_LIST=($IMAGES)
for REPO in "${IMAGES_LIST[@]}"; do
docker buildx imagetools inspect $REPO:${{ steps.meta.outputs.version }}
done

View File

@@ -1,27 +0,0 @@
name: prefligit
description: |
Runs prefligit, pre-commit reimplemented in Rust.
inputs:
extra_args:
description: options to pass to pre-commit run
required: false
default: '--all-files'
runs:
using: composite
steps:
- name: Install uv
uses: https://github.com/astral-sh/setup-uv@v6
with:
enable-cache: true
ignore-nothing-to-cache: true
- name: Install Prefligit
shell: bash
run: |
curl --proto '=https' --tlsv1.2 -LsSf https://github.com/j178/prefligit/releases/download/v0.0.10/prefligit-installer.sh | sh
- uses: actions/cache@v3
with:
path: ~/.cache/prefligit
key: prefligit-0|${{ hashFiles('.pre-commit-config.yaml') }}
- run: prefligit run --show-diff-on-failure --color=always -v ${{ inputs.extra_args }}
shell: bash

View File

@@ -0,0 +1,169 @@
name: prepare-docker-build
description: |
Prepare the Docker build environment for Continuwuity builds.
Sets up Rust toolchain, Docker Buildx, caching, and extracts metadata for Docker builds.
inputs:
platform:
description: Target platform (e.g. linux/amd64, linux/arm64)
required: true
slug:
description: Platform slug for artifact naming (e.g. linux-amd64, linux-arm64)
required: true
target_cpu:
description: Target CPU architecture (e.g. haswell, empty for base)
required: false
default: ""
profile:
description: Cargo build profile (release or release-max-perf)
required: true
images:
description: Container registry images (newline-separated)
required: true
registry_user:
description: Registry username for authentication
required: false
registry_password:
description: Registry password for authentication
required: false
outputs:
cpu_suffix:
description: CPU suffix for artifact naming
value: ${{ steps.cpu-suffix.outputs.suffix }}
metadata_labels:
description: Docker labels for the image
value: ${{ steps.meta.outputs.labels }}
metadata_annotations:
description: Docker annotations for the image
value: ${{ steps.meta.outputs.annotations }}
runs:
using: composite
steps:
- name: Set CPU suffix variable
id: cpu-suffix
shell: bash
run: |
if [[ -n "${{ inputs.target_cpu }}" ]]; then
echo "suffix=-${{ inputs.target_cpu }}" >> $GITHUB_OUTPUT
echo "CPU_SUFFIX=-${{ inputs.target_cpu }}" >> $GITHUB_ENV
else
echo "suffix=" >> $GITHUB_OUTPUT
echo "CPU_SUFFIX=" >> $GITHUB_ENV
fi
- name: Echo matrix configuration
shell: bash
run: |
echo "Platform: ${{ inputs.platform }}"
echo "Slug: ${{ inputs.slug }}"
echo "Target CPU: ${{ inputs.target_cpu }}"
echo "Profile: ${{ inputs.profile }}"
- name: Install rust
if: ${{ env.BUILDKIT_ENDPOINT == '' }}
id: rust-toolchain
uses: ./.forgejo/actions/rust-toolchain
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
# Use persistent BuildKit if BUILDKIT_ENDPOINT is set (e.g. tcp://buildkit:8125)
driver: ${{ env.BUILDKIT_ENDPOINT != '' && 'remote' || 'docker-container' }}
endpoint: ${{ env.BUILDKIT_ENDPOINT || '' }}
- name: Set up QEMU
if: ${{ env.BUILDKIT_ENDPOINT == '' }}
uses: docker/setup-qemu-action@v3
- name: Login to builtin registry
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
uses: docker/login-action@v3
with:
registry: ${{ env.BUILTIN_REGISTRY }}
username: ${{ inputs.registry_user }}
password: ${{ inputs.registry_password }}
- name: Extract metadata (labels, annotations) for Docker
id: meta
uses: docker/metadata-action@v5
with:
images: ${{ inputs.images }}
# default labels & annotations: https://github.com/docker/metadata-action/blob/master/src/meta.ts#L509
env:
DOCKER_METADATA_ANNOTATIONS_LEVELS: manifest,index
- name: Get short git commit SHA
id: sha
shell: bash
run: |
calculatedSha=$(git rev-parse --short ${{ github.sha }})
echo "COMMIT_SHORT_SHA=$calculatedSha" >> $GITHUB_ENV
echo "Short SHA: $calculatedSha"
- name: Get Git commit timestamps
shell: bash
run: |
timestamp=$(git log -1 --pretty=%ct)
echo "TIMESTAMP=$timestamp" >> $GITHUB_ENV
echo "Commit timestamp: $timestamp"
- uses: ./.forgejo/actions/timelord
id: timelord
- name: Cache Rust registry
if: ${{ env.BUILDKIT_ENDPOINT == '' }}
uses: actions/cache@v3
with:
path: |
.cargo/git
.cargo/git/checkouts
.cargo/registry
.cargo/registry/src
key: continuwuity-rust-registry-image-${{hashFiles('**/Cargo.lock') }}
- name: Cache cargo target
if: ${{ env.BUILDKIT_ENDPOINT == '' }}
id: cache-cargo-target
uses: actions/cache@v3
with:
path: |
cargo-target${{ env.CPU_SUFFIX }}-${{ inputs.slug }}-${{ inputs.profile }}
key: continuwuity-cargo-target${{ env.CPU_SUFFIX }}-${{ inputs.slug }}-${{ inputs.profile }}-${{hashFiles('**/Cargo.lock') }}-${{steps.rust-toolchain.outputs.rustc_version}}
- name: Cache apt cache
if: ${{ env.BUILDKIT_ENDPOINT == '' }}
id: cache-apt
uses: actions/cache@v3
with:
path: |
var-cache-apt-${{ inputs.slug }}
key: continuwuity-var-cache-apt-${{ inputs.slug }}
- name: Cache apt lib
if: ${{ env.BUILDKIT_ENDPOINT == '' }}
id: cache-apt-lib
uses: actions/cache@v3
with:
path: |
var-lib-apt-${{ inputs.slug }}
key: continuwuity-var-lib-apt-${{ inputs.slug }}
- name: inject cache into docker
if: ${{ env.BUILDKIT_ENDPOINT == '' }}
uses: https://github.com/reproducible-containers/buildkit-cache-dance@v3.3.0
with:
cache-map: |
{
".cargo/registry": "/usr/local/cargo/registry",
".cargo/git/db": "/usr/local/cargo/git/db",
"cargo-target${{ env.CPU_SUFFIX }}-${{ inputs.slug }}-${{ inputs.profile }}": {
"target": "/app/target",
"id": "cargo-target${{ env.CPU_SUFFIX }}-${{ inputs.slug }}-${{ inputs.profile }}"
},
"var-cache-apt-${{ inputs.slug }}": "/var/cache/apt",
"var-lib-apt-${{ inputs.slug }}": "/var/lib/apt",
"${{ steps.timelord.outputs.database-path }}":"/timelord"
}
skip-extraction: ${{ steps.cache.outputs.cache-hit }}

View File

@@ -40,7 +40,7 @@ runs:
!~/.rustup/tmp
!~/.rustup/downloads
# Requires repo to be cloned if toolchain is not specified
key: ${{ runner.os }}-rustup-${{ inputs.toolchain || hashFiles('**/rust-toolchain.toml') }}
key: continuwuity-${{ runner.os }}-rustup-${{ inputs.toolchain || hashFiles('**/rust-toolchain.toml') }}
- name: Install Rust toolchain
if: steps.rustup-version.outputs.version == ''
shell: bash

View File

@@ -2,20 +2,14 @@ name: sccache
description: |
Install sccache for caching builds in GitHub Actions.
inputs:
token:
description: 'A Github PAT'
required: false
runs:
using: composite
steps:
- name: Install sccache
uses: https://github.com/mozilla-actions/sccache-action@v0.0.9
with:
token: ${{ inputs.token }}
uses: https://git.tomfos.tr/tom/sccache-action@v1
- name: Configure sccache
uses: https://github.com/actions/github-script@v7
uses: https://github.com/actions/github-script@v8
with:
script: |
core.exportVariable('ACTIONS_RESULTS_URL', process.env.ACTIONS_RESULTS_URL || '');

View File

@@ -0,0 +1,167 @@
name: setup-llvm-with-apt
description: |
Set up LLVM toolchain with APT package management and smart caching.
Supports cross-compilation architectures and additional package installation.
Creates symlinks in /usr/bin: clang, clang++, lld, llvm-ar, llvm-ranlib
inputs:
dpkg-arch:
description: 'Debian architecture for cross-compilation (e.g. arm64)'
required: false
default: ''
extra-packages:
description: 'Additional APT packages to install (space-separated)'
required: false
default: ''
llvm-version:
description: 'LLVM version to install'
required: false
default: '20'
outputs:
llvm-version:
description: 'Installed LLVM version'
value: ${{ steps.configure.outputs.version }}
runs:
using: composite
steps:
- name: Detect runner OS
id: runner-os
uses: https://git.tomfos.tr/actions/detect-versions@v1
- name: Configure cross-compilation architecture
if: inputs.dpkg-arch != ''
shell: bash
run: |
echo "🏗️ Adding ${{ inputs.dpkg-arch }} architecture"
sudo dpkg --add-architecture ${{ inputs.dpkg-arch }}
# Restrict default sources to amd64
sudo sed -i 's/^deb http/deb [arch=amd64] http/g' /etc/apt/sources.list
sudo sed -i 's/^deb https/deb [arch=amd64] https/g' /etc/apt/sources.list
# Add ports sources for foreign architecture
sudo tee /etc/apt/sources.list.d/${{ inputs.dpkg-arch }}.list > /dev/null <<EOF
deb [arch=${{ inputs.dpkg-arch }}] http://ports.ubuntu.com/ubuntu-ports/ jammy main restricted universe multiverse
deb [arch=${{ inputs.dpkg-arch }}] http://ports.ubuntu.com/ubuntu-ports/ jammy-updates main restricted universe multiverse
deb [arch=${{ inputs.dpkg-arch }}] http://ports.ubuntu.com/ubuntu-ports/ jammy-security main restricted universe multiverse
EOF
echo "✅ Architecture ${{ inputs.dpkg-arch }} configured"
- name: Start LLVM cache group
shell: bash
run: echo "::group::📦 Restoring LLVM cache"
- name: Check for LLVM cache
id: cache
uses: actions/cache@v4
with:
path: |
/usr/bin/clang-*
/usr/bin/clang++-*
/usr/bin/lld-*
/usr/bin/llvm-*
/usr/lib/llvm-*/
/usr/lib/x86_64-linux-gnu/libLLVM*.so*
/usr/lib/x86_64-linux-gnu/libclang*.so*
/etc/apt/sources.list.d/archive_uri-*
/etc/apt/trusted.gpg.d/apt.llvm.org.asc
key: continuwuity-llvm-${{ steps.runner-os.outputs.slug }}-${{ steps.runner-os.outputs.arch }}-v${{ inputs.llvm-version }}-${{ hashFiles('**/Cargo.lock', 'rust-toolchain.toml') }}
- name: End LLVM cache group
shell: bash
run: echo "::endgroup::"
- name: Check and install LLVM if needed
id: llvm-setup
shell: bash
run: |
echo "🔍 Checking for LLVM ${{ inputs.llvm-version }}..."
# Check both binaries and libraries exist
if [ -f "/usr/bin/clang-${{ inputs.llvm-version }}" ] && \
[ -f "/usr/bin/clang++-${{ inputs.llvm-version }}" ] && \
[ -f "/usr/bin/lld-${{ inputs.llvm-version }}" ] && \
([ -f "/usr/lib/x86_64-linux-gnu/libLLVM.so.${{ inputs.llvm-version }}.1" ] || \
[ -f "/usr/lib/x86_64-linux-gnu/libLLVM-${{ inputs.llvm-version }}.so.1" ] || \
[ -f "/usr/lib/llvm-${{ inputs.llvm-version }}/lib/libLLVM.so" ]); then
echo "✅ LLVM ${{ inputs.llvm-version }} found and verified"
echo "needs-install=false" >> $GITHUB_OUTPUT
else
echo "📦 LLVM ${{ inputs.llvm-version }} not found or incomplete - installing..."
echo "::group::🔧 Installing LLVM ${{ inputs.llvm-version }}"
wget -O - https://apt.llvm.org/llvm.sh | bash -s -- ${{ inputs.llvm-version }}
echo "::endgroup::"
if [ ! -f "/usr/bin/clang-${{ inputs.llvm-version }}" ]; then
echo "❌ Failed to install LLVM ${{ inputs.llvm-version }}"
exit 1
fi
echo "✅ Installed LLVM ${{ inputs.llvm-version }}"
echo "needs-install=true" >> $GITHUB_OUTPUT
fi
- name: Prepare for additional packages
if: inputs.extra-packages != ''
shell: bash
run: |
# Update APT if LLVM was cached (installer script already does apt-get update)
if [[ "${{ steps.llvm-setup.outputs.needs-install }}" != "true" ]]; then
echo "::group::📦 Running apt-get update (LLVM cached, extra packages needed)"
sudo apt-get update
echo "::endgroup::"
fi
echo "::group::📦 Installing additional packages"
- name: Install additional packages
if: inputs.extra-packages != ''
uses: https://github.com/awalsh128/cache-apt-pkgs-action@latest
with:
packages: ${{ inputs.extra-packages }}
version: 1.0
- name: End package installation group
if: inputs.extra-packages != ''
shell: bash
run: echo "::endgroup::"
- name: Configure LLVM environment
id: configure
shell: bash
run: |
echo "::group::🔧 Configuring LLVM ${{ inputs.llvm-version }} environment"
# Create symlinks
sudo ln -sf "/usr/bin/clang-${{ inputs.llvm-version }}" /usr/bin/clang
sudo ln -sf "/usr/bin/clang++-${{ inputs.llvm-version }}" /usr/bin/clang++
sudo ln -sf "/usr/bin/lld-${{ inputs.llvm-version }}" /usr/bin/lld
sudo ln -sf "/usr/bin/llvm-ar-${{ inputs.llvm-version }}" /usr/bin/llvm-ar
sudo ln -sf "/usr/bin/llvm-ranlib-${{ inputs.llvm-version }}" /usr/bin/llvm-ranlib
echo " ✓ Created symlinks"
# Setup library paths
LLVM_LIB_PATH="/usr/lib/llvm-${{ inputs.llvm-version }}/lib"
if [ -d "$LLVM_LIB_PATH" ]; then
echo "LD_LIBRARY_PATH=${LLVM_LIB_PATH}:${LD_LIBRARY_PATH:-}" >> $GITHUB_ENV
echo "LIBCLANG_PATH=${LLVM_LIB_PATH}" >> $GITHUB_ENV
echo "$LLVM_LIB_PATH" | sudo tee "/etc/ld.so.conf.d/llvm-${{ inputs.llvm-version }}.conf" > /dev/null
sudo ldconfig
echo " ✓ Configured library paths"
else
# Fallback to standard library location
if [ -d "/usr/lib/x86_64-linux-gnu" ]; then
echo "LIBCLANG_PATH=/usr/lib/x86_64-linux-gnu" >> $GITHUB_ENV
echo " ✓ Using fallback library path"
fi
fi
# Set output
echo "version=${{ inputs.llvm-version }}" >> $GITHUB_OUTPUT
echo "::endgroup::"
echo "✅ LLVM ready: $(clang --version | head -1)"

View File

@@ -0,0 +1,239 @@
name: setup-rust
description: |
Set up Rust toolchain with sccache for compilation caching.
Respects rust-toolchain.toml by default or accepts explicit version override.
inputs:
cache-key-suffix:
description: 'Optional suffix for cache keys (e.g. platform identifier)'
required: false
default: ''
rust-components:
description: 'Additional Rust components to install (space-separated)'
required: false
default: ''
rust-target:
description: 'Rust target triple (e.g. x86_64-unknown-linux-gnu)'
required: false
default: ''
rust-version:
description: 'Rust version to install (e.g. nightly). Defaults to 1.87.0'
required: false
default: '1.87.0'
sccache-cache-limit:
description: 'Maximum size limit for sccache local cache (e.g. 2G, 500M)'
required: false
default: '2G'
github-token:
description: 'GitHub token for downloading sccache from GitHub releases'
required: false
default: ''
outputs:
rust-version:
description: 'Installed Rust version'
value: ${{ steps.rust-setup.outputs.version }}
runs:
using: composite
steps:
- name: Detect runner OS
id: runner-os
uses: https://git.tomfos.tr/actions/detect-versions@v1
- name: Configure Cargo environment
shell: bash
run: |
# Use workspace-relative paths for better control and consistency
echo "CARGO_HOME=${{ github.workspace }}/.cargo" >> $GITHUB_ENV
echo "CARGO_TARGET_DIR=${{ github.workspace }}/target" >> $GITHUB_ENV
echo "SCCACHE_DIR=${{ github.workspace }}/.sccache" >> $GITHUB_ENV
echo "RUSTUP_HOME=${{ github.workspace }}/.rustup" >> $GITHUB_ENV
# Limit binstall resolution timeout to avoid GitHub rate limit delays
echo "BINSTALL_MAXIMUM_RESOLUTION_TIMEOUT=10" >> $GITHUB_ENV
# Ensure directories exist for first run
mkdir -p "${{ github.workspace }}/.cargo"
mkdir -p "${{ github.workspace }}/.sccache"
mkdir -p "${{ github.workspace }}/target"
mkdir -p "${{ github.workspace }}/.rustup"
- name: Start cache restore group
shell: bash
run: echo "::group::📦 Restoring caches (registry, toolchain, build artifacts)"
- name: Cache Cargo registry and git
id: registry-cache
uses: actions/cache@v4
with:
path: |
.cargo/registry/index
.cargo/registry/cache
.cargo/git/db
# Registry cache saved per workflow, restored from any workflow's cache
# Each workflow maintains its own registry that accumulates its needed crates
key: continuwuity-cargo-registry-${{ steps.runner-os.outputs.slug }}-${{ steps.runner-os.outputs.arch }}-${{ github.workflow }}
restore-keys: |
continuwuity-cargo-registry-${{ steps.runner-os.outputs.slug }}-${{ steps.runner-os.outputs.arch }}-
- name: Cache toolchain binaries
id: toolchain-cache
uses: actions/cache@v4
with:
path: |
.cargo/bin
.rustup/toolchains
.rustup/update-hashes
# Shared toolchain cache across all Rust versions
key: continuwuity-toolchain-${{ steps.runner-os.outputs.slug }}-${{ steps.runner-os.outputs.arch }}
- name: Setup sccache
uses: https://git.tomfos.tr/tom/sccache-action@v1
- name: Cache dependencies
id: deps-cache
uses: actions/cache@v4
with:
path: |
target/**/.fingerprint
target/**/deps
target/**/*.d
target/**/.cargo-lock
target/**/CACHEDIR.TAG
target/**/.rustc_info.json
/timelord/
# Dependencies cache - based on Cargo.lock, survives source code changes
key: >-
continuwuity-deps-${{ steps.runner-os.outputs.slug }}-${{ steps.runner-os.outputs.arch }}-${{ inputs.rust-version }}${{ inputs.cache-key-suffix && format('-{0}', inputs.cache-key-suffix) || '' }}-${{ hashFiles('rust-toolchain.toml', '**/Cargo.lock') }}
restore-keys: |
continuwuity-deps-${{ steps.runner-os.outputs.slug }}-${{ steps.runner-os.outputs.arch }}-${{ inputs.rust-version }}${{ inputs.cache-key-suffix && format('-{0}', inputs.cache-key-suffix) || '' }}-
- name: Cache incremental compilation
id: incremental-cache
uses: actions/cache@v4
with:
path: |
target/**/incremental
# Incremental cache - based on source code changes
key: >-
continuwuity-incremental-${{ steps.runner-os.outputs.slug }}-${{ steps.runner-os.outputs.arch }}-${{ inputs.rust-version }}${{ inputs.cache-key-suffix && format('-{0}', inputs.cache-key-suffix) || '' }}-${{ hashFiles('rust-toolchain.toml', '**/Cargo.lock') }}-${{ hashFiles('**/*.rs', '**/Cargo.toml') }}
restore-keys: |
continuwuity-incremental-${{ steps.runner-os.outputs.slug }}-${{ steps.runner-os.outputs.arch }}-${{ inputs.rust-version }}${{ inputs.cache-key-suffix && format('-{0}', inputs.cache-key-suffix) || '' }}-${{ hashFiles('rust-toolchain.toml', '**/Cargo.lock') }}-
continuwuity-incremental-${{ steps.runner-os.outputs.slug }}-${{ steps.runner-os.outputs.arch }}-${{ inputs.rust-version }}${{ inputs.cache-key-suffix && format('-{0}', inputs.cache-key-suffix) || '' }}-
- name: End cache restore group
shell: bash
run: echo "::endgroup::"
- name: Setup Rust toolchain
shell: bash
run: |
# Install rustup if not already cached
if ! command -v rustup &> /dev/null; then
echo "::group::📦 Installing rustup"
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --default-toolchain none
source "$CARGO_HOME/env"
echo "::endgroup::"
else
echo "✅ rustup already available"
fi
# Setup the appropriate Rust version
if [[ -n "${{ inputs.rust-version }}" ]]; then
echo "::group::📦 Setting up Rust ${{ inputs.rust-version }}"
# Set override first to prevent rust-toolchain.toml from auto-installing
rustup override set ${{ inputs.rust-version }} 2>/dev/null || true
# Check if we need to install/update the toolchain
if rustup toolchain list | grep -q "^${{ inputs.rust-version }}-"; then
rustup update ${{ inputs.rust-version }}
else
rustup toolchain install ${{ inputs.rust-version }} --profile minimal -c cargo,clippy,rustfmt
fi
else
echo "::group::📦 Setting up Rust from rust-toolchain.toml"
rustup show
fi
echo "::endgroup::"
- name: Configure PATH and install tools
shell: bash
env:
GITHUB_TOKEN: ${{ inputs.github-token }}
run: |
# Add .cargo/bin to PATH permanently for all subsequent steps
echo "${{ github.workspace }}/.cargo/bin" >> $GITHUB_PATH
# For this step only, we need to add it to PATH since GITHUB_PATH takes effect in the next step
export PATH="${{ github.workspace }}/.cargo/bin:$PATH"
# Install cargo-binstall for fast binary installations
if command -v cargo-binstall &> /dev/null; then
echo "✅ cargo-binstall already available"
else
echo "::group::📦 Installing cargo-binstall"
curl -L --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/cargo-bins/cargo-binstall/main/install-from-binstall-release.sh | bash
echo "::endgroup::"
fi
if command -v prek &> /dev/null; then
echo "✅ prek already available"
else
echo "::group::📦 Installing prek"
# prek isn't regularly published to crates.io, so we use git source
cargo-binstall -y --no-symlinks --git https://github.com/j178/prek prek
echo "::endgroup::"
fi
if command -v timelord &> /dev/null; then
echo "✅ timelord already available"
else
echo "::group::📦 Installing timelord"
cargo-binstall -y --no-symlinks timelord-cli
echo "::endgroup::"
fi
- name: Configure sccache environment
shell: bash
run: |
echo "RUSTC_WRAPPER=sccache" >> $GITHUB_ENV
echo "CMAKE_C_COMPILER_LAUNCHER=sccache" >> $GITHUB_ENV
echo "CMAKE_CXX_COMPILER_LAUNCHER=sccache" >> $GITHUB_ENV
echo "CMAKE_CUDA_COMPILER_LAUNCHER=sccache" >> $GITHUB_ENV
echo "SCCACHE_GHA_ENABLED=true" >> $GITHUB_ENV
# Configure incremental compilation GC
# If we restored from old cache (partial hit), clean up aggressively
if [[ "${{ steps.build-cache.outputs.cache-hit }}" != "true" ]]; then
echo "♻️ Partial cache hit - enabling cache cleanup"
echo "CARGO_INCREMENTAL_GC_THRESHOLD=5" >> $GITHUB_ENV
fi
- name: Install Rust components
if: inputs.rust-components != ''
shell: bash
run: |
echo "📦 Installing components: ${{ inputs.rust-components }}"
rustup component add ${{ inputs.rust-components }}
- name: Install Rust target
if: inputs.rust-target != ''
shell: bash
run: |
echo "📦 Installing target: ${{ inputs.rust-target }}"
rustup target add ${{ inputs.rust-target }}
- name: Output version and summary
id: rust-setup
shell: bash
run: |
RUST_VERSION=$(rustc --version | cut -d' ' -f2)
echo "version=$RUST_VERSION" >> $GITHUB_OUTPUT
echo "📋 Setup complete:"
echo " Rust: $(rustc --version)"
echo " Cargo: $(cargo --version)"
echo " prek: $(prek --version 2>/dev/null || echo 'installed')"
echo " timelord: $(timelord --version 2>/dev/null || echo 'installed')"

View File

@@ -1,46 +1,120 @@
name: timelord
description: |
Use timelord to set file timestamps
Use timelord to set file timestamps with git-warp-time fallback for cache misses
inputs:
key:
description: |
The key to use for caching the timelord data.
This should be unique to the repository and the runner.
required: true
default: timelord-v0
required: false
default: ''
path:
description: |
The path to the directory to be timestamped.
This should be the root of the repository.
required: true
default: .
required: false
default: ''
outputs:
database-path:
description: Path to timelord database
value: '${{ env.TIMELORD_CACHE_PATH }}'
runs:
using: composite
steps:
- name: Cache timelord-cli installation
id: cache-timelord-bin
uses: actions/cache@v3
with:
path: ~/.cargo/bin/timelord
key: timelord-cli-v3.0.1
- name: Install timelord-cli
uses: https://github.com/cargo-bins/cargo-binstall@main
if: steps.cache-timelord-bin.outputs.cache-hit != 'true'
- run: cargo binstall timelord-cli@3.0.1
- name: Set defaults
shell: bash
if: steps.cache-timelord-bin.outputs.cache-hit != 'true'
run: |
echo "TIMELORD_KEY=${{ inputs.key || format('timelord-v1-{0}-{1}', github.repository, hashFiles('**/*.rs', '**/Cargo.toml', '**/Cargo.lock')) }}" >> $GITHUB_ENV
echo "TIMELORD_PATH=${{ inputs.path || '.' }}" >> $GITHUB_ENV
echo "TIMELORD_CACHE_PATH=$HOME/.cache/timelord" >> $GITHUB_ENV
echo "PATH=$HOME/.cargo/bin:/usr/share/rust/.cargo/bin:$PATH" >> $GITHUB_ENV
- name: Load timelord files
uses: actions/cache/restore@v3
- name: Restore binary cache
id: binary-cache
uses: actions/cache/restore@v4
with:
path: /timelord/
key: ${{ inputs.key }}
- name: Run timelord to set timestamps
path: |
/usr/share/rust/.cargo/bin
~/.cargo/bin
key: continuwuity-timelord-binaries
- name: Check if binaries need installation
shell: bash
run: timelord sync --source-dir ${{ inputs.path }} --cache-dir /timelord/
- name: Save timelord
uses: actions/cache/save@v3
id: check-binaries
run: |
NEED_INSTALL=false
# Ensure ~/.cargo/bin exists
mkdir -p ~/.cargo/bin
# Check and move timelord if needed
if [ -f /usr/share/rust/.cargo/bin/timelord ] && [ ! -f ~/.cargo/bin/timelord ]; then
echo "Moving timelord from /usr/share/rust/.cargo/bin to ~/.cargo/bin"
mv /usr/share/rust/.cargo/bin/timelord ~/.cargo/bin/
fi
if [ ! -f ~/.cargo/bin/timelord ]; then
echo "timelord-cli not found, needs installation"
NEED_INSTALL=true
fi
# Check and move git-warp-time if needed
if [ -f /usr/share/rust/.cargo/bin/git-warp-time ] && [ ! -f ~/.cargo/bin/git-warp-time ]; then
echo "Moving git-warp-time from /usr/share/rust/.cargo/bin to ~/.cargo/bin"
mv /usr/share/rust/.cargo/bin/git-warp-time ~/.cargo/bin/
fi
if [ ! -f ~/.cargo/bin/git-warp-time ]; then
echo "git-warp-time not found, needs installation"
NEED_INSTALL=true
fi
echo "need-install=$NEED_INSTALL" >> $GITHUB_OUTPUT
- name: Install timelord-cli and git-warp-time
if: steps.check-binaries.outputs.need-install == 'true'
uses: https://github.com/taiki-e/install-action@v2
with:
path: /timelord/
key: ${{ inputs.key }}
tool: git-warp-time,timelord-cli@3.0.1
- name: Save binary cache
if: steps.check-binaries.outputs.need-install == 'true'
uses: actions/cache/save@v4
with:
path: |
/usr/share/rust/.cargo/bin
~/.cargo/bin
key: continuwuity-timelord-binaries
- name: Restore timelord cache with fallbacks
id: timelord-restore
uses: actions/cache/restore@v4
with:
path: ${{ env.TIMELORD_CACHE_PATH }}
key: ${{ env.TIMELORD_KEY }}
restore-keys: |
continuwuity-timelord-${{ github.repository }}-
- name: Initialize timestamps on complete cache miss
if: steps.timelord-restore.outputs.cache-hit != 'true'
shell: bash
run: |
echo "Complete timelord cache miss - running git-warp-time"
git fetch --unshallow
if [ "${{ env.TIMELORD_PATH }}" = "." ]; then
git-warp-time --quiet
else
git-warp-time --quiet ${{ env.TIMELORD_PATH }}
fi
echo "Git timestamps restored"
- name: Run timelord sync
shell: bash
run: |
mkdir -p ${{ env.TIMELORD_CACHE_PATH }}
timelord sync --source-dir ${{ env.TIMELORD_PATH }} --cache-dir ${{ env.TIMELORD_CACHE_PATH }}
- name: Save updated timelord cache immediately
uses: actions/cache/save@v4
with:
path: ${{ env.TIMELORD_CACHE_PATH }}
key: ${{ env.TIMELORD_KEY }}

View File

@@ -0,0 +1,70 @@
name: upload-docker-artifacts
description: |
Upload Docker build artifacts including binary and digest files.
Handles artifact naming and conditional digest uploads for registry publishing.
inputs:
slug:
description: Platform slug for artifact naming (e.g. linux-amd64, linux-arm64)
required: true
cpu_suffix:
description: CPU suffix for artifact naming (e.g. -haswell)
required: false
default: ""
artifact_suffix:
description: Suffix for binary artifacts (e.g. -maxperf)
required: false
default: ""
digest_suffix:
description: Suffix for digest artifacts (e.g. -maxperf)
required: false
default: ""
digest:
description: The digest of the built Docker image
required: true
outputs:
binary_artifact_name:
description: The name of the uploaded binary artifact
value: conduwuit${{ inputs.cpu_suffix }}-${{ inputs.slug }}${{ inputs.artifact_suffix }}
runs:
using: composite
steps:
- name: Export digest
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
shell: bash
run: |
mkdir -p /tmp/digests
digest="${{ inputs.digest }}"
echo "🔍 Build step digest output: '$digest'"
if [[ -z "$digest" ]]; then
echo "❌ ERROR: No digest found from build step"
exit 1
fi
digest_file="/tmp/digests/${digest#sha256:}"
echo "📁 Creating digest file: $digest_file"
touch "$digest_file"
echo "✅ Digest file created successfully"
echo "📋 Contents of /tmp/digests:"
ls -la /tmp/digests/
- name: Rename extracted binary
shell: bash
run: mv /tmp/binaries/sbin/conduwuit /tmp/binaries/conduwuit${{ inputs.cpu_suffix }}-${{ inputs.slug }}${{ inputs.artifact_suffix }}
- name: Upload binary artifact
uses: forgejo/upload-artifact@v4
with:
name: conduwuit${{ inputs.cpu_suffix }}-${{ inputs.slug }}${{ inputs.artifact_suffix }}
path: /tmp/binaries/conduwuit${{ inputs.cpu_suffix }}-${{ inputs.slug }}${{ inputs.artifact_suffix }}
if-no-files-found: error
- name: Upload digest
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
uses: forgejo/upload-artifact@v4
with:
name: digests${{ inputs.digest_suffix }}-${{ inputs.slug }}${{ inputs.cpu_suffix }}
path: /tmp/digests/*
if-no-files-found: error
retention-days: 5

View File

@@ -40,6 +40,12 @@ creds:
- registry: registry.gitlab.com
user: "{{env \"GITLAB_USERNAME\"}}"
pass: "{{env \"GITLAB_TOKEN\"}}"
- registry: git.nexy7574.co.uk
user: "{{env \"N7574_GIT_USERNAME\"}}"
pass: "{{env \"N7574_GIT_TOKEN\"}}"
- registry: ghcr.io
user: "{{env \"GH_PACKAGES_USER\"}}"
pass: "{{env \"GH_PACKAGES_TOKEN\"}}"
# Global defaults
defaults:
@@ -53,3 +59,11 @@ sync:
target: registry.gitlab.com/continuwuity/continuwuity
type: repository
<<: *tags-main
- source: *source
target: git.nexy7574.co.uk/mirrored/continuwuity
type: repository
<<: *tags-releases
- source: *source
target: ghcr.io/continuwuity/continuwuity
type: repository
<<: *tags-main

View File

@@ -0,0 +1,148 @@
name: Build / Debian DEB
concurrency:
group: "build-debian-${{ forge.ref }}"
cancel-in-progress: true
on:
push:
tags:
- "v*.*.*"
workflow_dispatch:
schedule:
- cron: '30 0 * * *'
jobs:
build:
runs-on: ubuntu-latest
strategy:
matrix:
container: ["ubuntu-latest", "ubuntu-previous", "debian-latest", "debian-oldstable"]
container:
image: "ghcr.io/tcpipuk/act-runner:${{ matrix.container }}"
steps:
- name: Get Debian version
id: debian-version
run: |
VERSION=$(cat /etc/debian_version)
DISTRIBUTION=$(lsb_release -sc 2>/dev/null)
echo "version=$VERSION" >> $GITHUB_OUTPUT
echo "distribution=$DISTRIBUTION" >> $GITHUB_OUTPUT
echo "Debian distribution: $DISTRIBUTION ($VERSION)"
- name: Checkout repository with full history
uses: https://code.forgejo.org/actions/checkout@v5
with:
fetch-depth: 0
- name: Cache Cargo registry
uses: https://code.forgejo.org/actions/cache@v4
with:
path: |
~/.cargo/registry
~/.cargo/git
key: cargo-debian-${{ steps.debian-version.outputs.distribution }}-${{ hashFiles('**/Cargo.lock') }}
restore-keys: |
cargo-debian-${{ steps.debian-version.outputs.distribution }}-
- name: Setup sccache
uses: https://git.tomfos.tr/tom/sccache-action@v1
- name: Configure sccache environment
run: |
echo "RUSTC_WRAPPER=sccache" >> $GITHUB_ENV
echo "CMAKE_C_COMPILER_LAUNCHER=sccache" >> $GITHUB_ENV
echo "CMAKE_CXX_COMPILER_LAUNCHER=sccache" >> $GITHUB_ENV
echo "SCCACHE_CACHE_SIZE=10G" >> $GITHUB_ENV
# Aggressive GC since cache restores don't increment counter
echo "CARGO_INCREMENTAL_GC_TRIGGER=5" >> $GITHUB_ENV
- name: Setup Rust nightly
uses: ./.forgejo/actions/setup-rust
with:
rust-version: nightly
github-token: ${{ secrets.GH_PUBLIC_RO }}
- name: Get package version and component
id: package-meta
run: |
BASE_VERSION=$(cargo metadata --no-deps --format-version 1 | jq -r ".packages[] | select(.name == \"conduwuit\").version" | sed 's/[^a-zA-Z0-9.+]/~/g')
# VERSION is the package version, COMPONENT is used in
# apt's repository config like a git repo branch
if [[ "${{ forge.ref }}" == "refs/tags/"* ]]; then
# Use the "stable" component for tagged releases
COMPONENT="stable"
VERSION=$BASE_VERSION
else
# Use the "dev" component for development builds
SHA=$(echo "${{ forge.sha }}" | cut -c1-7)
DATE=$(date +%Y%m%d)
if [ "${{ forge.ref_name }}" = "main" ]; then
COMPONENT="dev"
else
# Use the sanitized ref name as the component for feature branches
COMPONENT="dev-$(echo '${{ forge.ref_name }}' | sed 's/[^a-zA-Z0-9.+]/-/g' | tr '[:upper:]' '[:lower:]' | cut -c1-30)"
fi
CLEAN_COMPONENT=$(echo $COMPONENT | sed 's/[^a-zA-Z0-9.+]/~/g')
VERSION="$BASE_VERSION~git$DATE.$SHA-$CLEAN_COMPONENT"
fi
echo "component=$COMPONENT" >> $GITHUB_OUTPUT
echo "version=$VERSION" >> $GITHUB_OUTPUT
echo "Component: $COMPONENT"
echo "Version: $VERSION"
- name: Install cargo-deb
run: |
if command -v cargo-deb &> /dev/null; then
echo "cargo-deb already available"
else
echo "Installing cargo-deb"
cargo-binstall -y --no-symlinks cargo-deb
fi
- name: Install build dependencies
run: |
apt-get update -y
# Build dependencies for rocksdb
apt-get install -y clang liburing-dev
- name: Run cargo-deb
id: cargo-deb
run: |
DEB_PATH=$(cargo deb --deb-version ${{ steps.package-meta.outputs.version }})
echo "path=$DEB_PATH" >> $GITHUB_OUTPUT
- name: Test deb installation
run: |
echo "Installing: ${{ steps.cargo-deb.outputs.path }}"
apt-get install -y ${{ steps.cargo-deb.outputs.path }}
dpkg -s continuwuity
[ -f /usr/bin/conduwuit ] && echo "✅ Binary installed successfully"
[ -f /usr/lib/systemd/system/conduwuit.service ] && echo "✅ Systemd service installed"
[ -f /etc/conduwuit/conduwuit.toml ] && echo "✅ Config file installed"
- name: Upload deb artifact
uses: https://code.forgejo.org/actions/upload-artifact@v3
with:
name: continuwuity-${{ steps.debian-version.outputs.distribution }}
path: ${{ steps.cargo-deb.outputs.path }}
- name: Publish to Forgejo package registry
if: ${{ forge.event_name == 'push' || forge.event_name == 'workflow_dispatch' || forge.event_name == 'schedule' }}
run: |
OWNER="continuwuation"
DISTRIBUTION=${{ steps.debian-version.outputs.distribution }}
COMPONENT=${{ steps.package-meta.outputs.component }}
DEB=${{ steps.cargo-deb.outputs.path }}
echo "Publishing: $DEB in component $COMPONENT for distribution $DISTRIBUTION"
curl --fail-with-body \
-X PUT \
-H "Authorization: token ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }}" \
--upload-file "$DEB" \
"${{ forge.server_url }}/api/packages/$OWNER/debian/pool/$DISTRIBUTION/$COMPONENT/upload"

View File

@@ -0,0 +1,389 @@
name: Build / Fedora RPM
concurrency:
group: "build-fedora-${{ github.ref }}"
cancel-in-progress: true
on:
push:
tags:
- "v*.*.*"
# paths:
# - 'pkg/fedora/**'
# - 'src/**'
# - 'Cargo.toml'
# - 'Cargo.lock'
# - '.forgejo/workflows/build-fedora.yml'
workflow_dispatch:
schedule:
- cron: '30 0 * * *'
jobs:
build:
runs-on: fedora-latest
steps:
- name: Detect Fedora version
id: fedora
run: |
VERSION=$(rpm -E %fedora)
echo "version=$VERSION" >> $GITHUB_OUTPUT
echo "Fedora version: $VERSION"
- name: Checkout repository with full history
uses: https://code.forgejo.org/actions/checkout@v5
with:
fetch-depth: 0
- name: Cache DNF packages
uses: https://code.forgejo.org/actions/cache@v4
with:
path: |
/var/cache/dnf
/var/cache/yum
key: dnf-fedora${{ steps.fedora.outputs.version }}-${{ hashFiles('pkg/fedora/continuwuity.spec.rpkg') }}-v1
restore-keys: |
dnf-fedora${{ steps.fedora.outputs.version }}-
- name: Cache Cargo registry
uses: https://code.forgejo.org/actions/cache@v4
with:
path: |
~/.cargo/registry
~/.cargo/git
key: cargo-fedora${{ steps.fedora.outputs.version }}-${{ hashFiles('**/Cargo.lock') }}
restore-keys: |
cargo-fedora${{ steps.fedora.outputs.version }}-
- name: Cache Rust build dependencies
uses: https://code.forgejo.org/actions/cache@v4
with:
path: |
~/rpmbuild/BUILD/*/target/release/deps
~/rpmbuild/BUILD/*/target/release/build
~/rpmbuild/BUILD/*/target/release/.fingerprint
~/rpmbuild/BUILD/*/target/release/incremental
key: rust-deps-fedora${{ steps.fedora.outputs.version }}-${{ hashFiles('**/Cargo.lock') }}
restore-keys: |
rust-deps-fedora${{ steps.fedora.outputs.version }}-
- name: Setup sccache
uses: https://git.tomfos.tr/tom/sccache-action@v1
- name: Configure sccache environment
run: |
echo "RUSTC_WRAPPER=sccache" >> $GITHUB_ENV
echo "CMAKE_C_COMPILER_LAUNCHER=sccache" >> $GITHUB_ENV
echo "CMAKE_CXX_COMPILER_LAUNCHER=sccache" >> $GITHUB_ENV
echo "SCCACHE_CACHE_SIZE=10G" >> $GITHUB_ENV
# Aggressive GC since cache restores don't increment counter
echo "CARGO_INCREMENTAL_GC_TRIGGER=5" >> $GITHUB_ENV
- name: Install base RPM tools
run: |
dnf install -y --setopt=keepcache=1 \
fedora-packager \
python3-pip \
rpm-sign \
rpkg \
wget
- name: Setup build environment and build SRPM
run: |
git config --global --add safe.directory "$GITHUB_WORKSPACE"
git config --global user.email "ci@continuwuity.org"
git config --global user.name "Continuwuity"
rpmdev-setuptree
cd "$GITHUB_WORKSPACE"
# Determine release suffix and version based on ref type and branch
if [[ "${{ github.ref }}" == "refs/tags/"* ]]; then
# Tags get clean version numbers for stable releases
RELEASE_SUFFIX=""
TAG_NAME="${{ github.ref_name }}"
# Extract version from tag (remove v prefix if present)
TAG_VERSION=$(echo "$TAG_NAME" | sed 's/^v//')
# Create spec file with tag version
sed -e "s/^Version:.*$/Version: $TAG_VERSION/" \
-e "s/^Release:.*$/Release: 1%{?dist}/" \
pkg/fedora/continuwuity.spec.rpkg > continuwuity.spec.rpkg
elif [ "${{ github.ref_name }}" = "main" ]; then
# Main branch gets .dev suffix
RELEASE_SUFFIX=".dev"
# Replace the Release line to include our suffix
sed "s/^Release:.*$/Release: 1${RELEASE_SUFFIX}%{?dist}/" \
pkg/fedora/continuwuity.spec.rpkg > continuwuity.spec.rpkg
else
# Other branches get sanitized branch name as suffix
SAFE_BRANCH=$(echo "${{ github.ref_name }}" | sed 's/[^a-zA-Z0-9]/_/g' | cut -c1-20)
RELEASE_SUFFIX=".${SAFE_BRANCH}"
# Replace the Release line to include our suffix
sed "s/^Release:.*$/Release: 1${RELEASE_SUFFIX}%{?dist}/" \
pkg/fedora/continuwuity.spec.rpkg > continuwuity.spec.rpkg
fi
rpkg srpm --outdir "$HOME/rpmbuild/SRPMS"
ls -la $HOME/rpmbuild/SRPMS/
- name: Install build dependencies from SRPM
run: |
SRPM=$(find "$HOME/rpmbuild/SRPMS" -name "*.src.rpm" | head -1)
if [ -z "$SRPM" ]; then
echo "Error: No SRPM file found"
exit 1
fi
echo "Installing build dependencies from: $(basename $SRPM)"
dnf builddep -y "$SRPM"
- name: Build RPM from SRPM
run: |
SRPM=$(find "$HOME/rpmbuild/SRPMS" -name "*.src.rpm" | head -1)
if [ -z "$SRPM" ]; then
echo "Error: No SRPM file found"
exit 1
fi
echo "Building from SRPM: $SRPM"
rpmbuild --rebuild "$SRPM" \
--define "_topdir $HOME/rpmbuild" \
--define "_sourcedir $GITHUB_WORKSPACE" \
--nocheck # Skip %check section to avoid test dependencies
- name: Test RPM installation
run: |
# Find the main binary RPM (exclude debug and source RPMs)
RPM=$(find "$HOME/rpmbuild/RPMS" -name "continuwuity-*.rpm" \
! -name "*debuginfo*" \
! -name "*debugsource*" \
! -name "*.src.rpm" | head -1)
if [ -z "$RPM" ]; then
echo "Error: No binary RPM file found"
exit 1
fi
echo "Testing installation of: $RPM"
# Dry run first
rpm -qpi "$RPM"
echo ""
rpm -qpl "$RPM"
# Actually install it
dnf install -y "$RPM"
# Verify installation
rpm -qa | grep continuwuity
# Check that the binary exists
[ -f /usr/bin/conduwuit ] && echo "✅ Binary installed successfully"
[ -f /usr/lib/systemd/system/conduwuit.service ] && echo "✅ Systemd service installed"
[ -f /etc/conduwuit/conduwuit.toml ] && echo "✅ Config file installed"
- name: List built packages
run: |
echo "Binary RPMs:"
find "$HOME/rpmbuild/RPMS" -name "*.rpm" -type f -exec ls -la {} \;
echo ""
echo "Source RPMs:"
find "$HOME/rpmbuild/SRPMS" -name "*.rpm" -type f -exec ls -la {} \;
- name: Collect artifacts
run: |
mkdir -p artifacts
find "$HOME/rpmbuild/RPMS" -name "*.rpm" -type f -exec cp {} artifacts/ \;
find "$HOME/rpmbuild/SRPMS" -name "*.rpm" -type f -exec cp {} artifacts/ \;
cd artifacts
echo "Build Information:" > BUILD_INFO.txt
echo "==================" >> BUILD_INFO.txt
echo "Git commit: ${{ github.sha }}" >> BUILD_INFO.txt
echo "Git branch: ${{ github.ref_name }}" >> BUILD_INFO.txt
echo "Build date: $(date -u +%Y-%m-%d_%H:%M:%S_UTC)" >> BUILD_INFO.txt
echo "" >> BUILD_INFO.txt
echo "Package contents:" >> BUILD_INFO.txt
echo "-----------------" >> BUILD_INFO.txt
for rpm in *.rpm; do
echo "" >> BUILD_INFO.txt
echo "File: $rpm" >> BUILD_INFO.txt
rpm -qpi "$rpm" 2>/dev/null | grep -E "^(Name|Version|Release|Architecture|Size)" >> BUILD_INFO.txt
done
ls -la
- name: Upload binary RPM artifact
run: |
# Find the main binary RPM (exclude debug and source RPMs)
BIN_RPM=$(find artifacts -name "continuwuity-*.rpm" \
! -name "*debuginfo*" \
! -name "*debugsource*" \
! -name "*.src.rpm" \
-type f)
mkdir -p upload-bin
cp $BIN_RPM upload-bin/
- name: Upload binary RPM
uses: https://code.forgejo.org/actions/upload-artifact@v3
with:
name: continuwuity
path: upload-bin/
- name: Upload debug RPM artifact
uses: https://code.forgejo.org/actions/upload-artifact@v3
with:
name: continuwuity-debug
path: artifacts/*debuginfo*.rpm
- name: Publish to RPM Package Registry
if: ${{ github.event_name == 'push' || github.event_name == 'workflow_dispatch' || github.event_name == 'schedule' }}
run: |
# Find the main binary RPM (exclude debug and source RPMs)
RPM=$(find artifacts -name "continuwuity-*.rpm" \
! -name "*debuginfo*" \
! -name "*debugsource*" \
! -name "*.src.rpm" \
-type f | head -1)
if [ -z "$RPM" ]; then
echo "No binary RPM found to publish"
exit 0
fi
RPM_BASENAME=$(basename "$RPM")
echo "Publishing: $RPM_BASENAME"
# Determine the group based on ref type and branch
if [[ "${{ github.ref }}" == "refs/tags/"* ]]; then
GROUP="stable"
# For tags, extract the tag name for version info
TAG_NAME="${{ github.ref_name }}"
elif [ "${{ github.ref_name }}" = "main" ]; then
GROUP="dev"
else
# Use sanitized branch name as group for feature branches
GROUP=$(echo "${{ github.ref_name }}" | sed 's/[^a-zA-Z0-9]/-/g' | tr '[:upper:]' '[:lower:]' | cut -c1-30)
fi
PACKAGE_INFO=$(rpm -qpi "$RPM" 2>/dev/null)
PACKAGE_NAME=$(echo "$PACKAGE_INFO" | grep "^Name" | awk '{print $3}')
PACKAGE_VERSION=$(echo "$PACKAGE_INFO" | grep "^Version" | awk '{print $3}')
PACKAGE_RELEASE=$(echo "$PACKAGE_INFO" | grep "^Release" | awk '{print $3}')
PACKAGE_ARCH=$(echo "$PACKAGE_INFO" | grep "^Architecture" | awk '{print $2}')
# Full version includes release
FULL_VERSION="${PACKAGE_VERSION}-${PACKAGE_RELEASE}"
# Forgejo's RPM registry cannot overwrite existing packages, so we must delete first
# 404 is OK if package doesn't exist yet
echo "Removing any existing package: $PACKAGE_NAME-$FULL_VERSION.$PACKAGE_ARCH"
RESPONSE=$(curl -s -w "\n%{http_code}" -X DELETE \
-H "Authorization: token ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }}" \
"https://forgejo.ellis.link/api/packages/continuwuation/rpm/$GROUP/package/$PACKAGE_NAME/$FULL_VERSION/$PACKAGE_ARCH")
HTTP_CODE=$(echo "$RESPONSE" | tail -n1)
if [ "$HTTP_CODE" != "204" ] && [ "$HTTP_CODE" != "404" ]; then
echo "ERROR: Failed to delete package (HTTP $HTTP_CODE)"
echo "$RESPONSE" | head -n -1
exit 1
fi
curl --fail-with-body \
-X PUT \
-H "Authorization: token ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }}" \
-H "Content-Type: application/x-rpm" \
-T "$RPM" \
"https://forgejo.ellis.link/api/packages/continuwuation/rpm/$GROUP/upload?sign=true"
echo ""
echo "✅ Published binary RPM to: https://forgejo.ellis.link/continuwuation/-/packages/rpm/continuwuity/"
echo "Group: $GROUP"
# Upload debug RPMs to separate group
DEBUG_RPMS=$(find artifacts -name "*debuginfo*.rpm")
if [ -n "$DEBUG_RPMS" ]; then
echo ""
echo "Publishing debug RPMs to group: ${GROUP}-debug"
for DEBUG_RPM in $DEBUG_RPMS; do
echo "Publishing: $(basename "$DEBUG_RPM")"
DEBUG_INFO=$(rpm -qpi "$DEBUG_RPM" 2>/dev/null)
DEBUG_NAME=$(echo "$DEBUG_INFO" | grep "^Name" | awk '{print $3}')
DEBUG_VERSION=$(echo "$DEBUG_INFO" | grep "^Version" | awk '{print $3}')
DEBUG_RELEASE=$(echo "$DEBUG_INFO" | grep "^Release" | awk '{print $3}')
DEBUG_ARCH=$(echo "$DEBUG_INFO" | grep "^Architecture" | awk '{print $2}')
DEBUG_FULL_VERSION="${DEBUG_VERSION}-${DEBUG_RELEASE}"
# Must delete existing package first (Forgejo limitation)
RESPONSE=$(curl -s -w "\n%{http_code}" -X DELETE \
-H "Authorization: token ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }}" \
"https://forgejo.ellis.link/api/packages/continuwuation/rpm/${GROUP}-debug/package/$DEBUG_NAME/$DEBUG_FULL_VERSION/$DEBUG_ARCH")
HTTP_CODE=$(echo "$RESPONSE" | tail -n1)
if [ "$HTTP_CODE" != "204" ] && [ "$HTTP_CODE" != "404" ]; then
echo "ERROR: Failed to delete debug package (HTTP $HTTP_CODE)"
echo "$RESPONSE" | head -n -1
exit 1
fi
curl --fail-with-body \
-X PUT \
-H "Authorization: token ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }}" \
-H "Content-Type: application/x-rpm" \
-T "$DEBUG_RPM" \
"https://forgejo.ellis.link/api/packages/continuwuation/rpm/${GROUP}-debug/upload?sign=true"
done
echo "✅ Published debug RPMs to group: ${GROUP}-debug"
fi
# Also upload the SRPM to separate group
SRPM=$(find artifacts -name "*.src.rpm" | head -1)
if [ -n "$SRPM" ]; then
echo ""
echo "Publishing source RPM: $(basename "$SRPM")"
echo "Publishing to group: ${GROUP}-src"
SRPM_INFO=$(rpm -qpi "$SRPM" 2>/dev/null)
SRPM_NAME=$(echo "$SRPM_INFO" | grep "^Name" | awk '{print $3}')
SRPM_VERSION=$(echo "$SRPM_INFO" | grep "^Version" | awk '{print $3}')
SRPM_RELEASE=$(echo "$SRPM_INFO" | grep "^Release" | awk '{print $3}')
SRPM_FULL_VERSION="${SRPM_VERSION}-${SRPM_RELEASE}"
# Must delete existing SRPM first (Forgejo limitation)
echo "Removing any existing SRPM: $SRPM_NAME-$SRPM_FULL_VERSION.src"
RESPONSE=$(curl -s -w "\n%{http_code}" -X DELETE \
-H "Authorization: token ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }}" \
"https://forgejo.ellis.link/api/packages/continuwuation/rpm/${GROUP}-src/package/$SRPM_NAME/$SRPM_FULL_VERSION/src")
HTTP_CODE=$(echo "$RESPONSE" | tail -n1)
if [ "$HTTP_CODE" != "204" ] && [ "$HTTP_CODE" != "404" ]; then
echo "ERROR: Failed to delete SRPM (HTTP $HTTP_CODE)"
echo "$RESPONSE" | head -n -1
exit 1
fi
curl --fail-with-body \
-X PUT \
-H "Authorization: token ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }}" \
-H "Content-Type: application/x-rpm" \
-T "$SRPM" \
"https://forgejo.ellis.link/api/packages/continuwuation/rpm/${GROUP}-src/upload?sign=true"
echo "✅ Published source RPM to group: ${GROUP}-src"
fi

View File

@@ -21,7 +21,7 @@ jobs:
steps:
- name: Sync repository
uses: https://github.com/actions/checkout@v4
uses: actions/checkout@v5
with:
persist-credentials: false
fetch-depth: 0
@@ -49,10 +49,21 @@ jobs:
cp ./docs/static/_headers ./public/_headers
echo "Copied .well-known files and _headers to ./public"
- name: Detect runner environment
id: runner-env
uses: https://git.tomfos.tr/actions/detect-versions@v1
- name: Setup Node.js
uses: https://github.com/actions/setup-node@v4
if: steps.runner-env.outputs.node_major == '' || steps.runner-env.outputs.node_major < '20'
uses: https://github.com/actions/setup-node@v6
with:
node-version: 20
node-version: 22
- name: Cache npm dependencies
uses: actions/cache@v3
with:
path: ~/.npm
key: continuwuity-${{ steps.runner-env.outputs.slug }}-${{ steps.runner-env.outputs.arch }}-node-${{ steps.runner-env.outputs.node_version }}
- name: Install dependencies
run: npm install --save-dev wrangler@latest

View File

@@ -4,6 +4,14 @@ on:
schedule:
- cron: "0 0 * * *"
workflow_dispatch:
pull_request:
paths:
- ".forgejo/workflows/element.yml"
push:
branches:
- main
paths:
- ".forgejo/workflows/element.yml"
concurrency:
group: "element-${{ github.ref }}"
@@ -16,7 +24,7 @@ jobs:
steps:
- name: 📦 Setup Node.js
uses: https://github.com/actions/setup-node@v4
uses: https://github.com/actions/setup-node@v6
with:
node-version: "22"
@@ -101,7 +109,7 @@ jobs:
cat ./element-web/webapp/config.json
- name: 📤 Upload Artifact
uses: https://code.forgejo.org/actions/upload-artifact@v3
uses: forgejo/upload-artifact@v4
with:
name: element-web
path: ./element-web/webapp/

View File

@@ -11,7 +11,13 @@ on:
required: false
default: false
type: boolean
push:
branches:
- main
paths:
# Re-run when config changes
- '.forgejo/regsync/regsync.yml'
- '.forgejo/workflows/mirror-images.yml'
concurrency:
group: "mirror-images"
cancel-in-progress: true
@@ -24,12 +30,25 @@ jobs:
BUILTIN_REGISTRY_PASSWORD: ${{ secrets.BUILTIN_REGISTRY_PASSWORD }}
GITLAB_USERNAME: ${{ vars.GITLAB_USERNAME }}
GITLAB_TOKEN: ${{ secrets.GITLAB_TOKEN }}
N7574_GIT_USERNAME: ${{ vars.N7574_GIT_USERNAME }}
N7574_GIT_TOKEN: ${{ secrets.N7574_GIT_TOKEN }}
GH_PACKAGES_USER: ${{ vars.GH_PACKAGES_USER }}
GH_PACKAGES_TOKEN: ${{ secrets.GH_PACKAGES_TOKEN }}
steps:
- name: Checkout repository
uses: actions/checkout@v4
uses: actions/checkout@v5
with:
persist-credentials: false
# - uses: https://github.com/actions/create-github-app-token@v2
# id: app-token
# with:
# app-id: ${{ vars.GH_APP_ID }}
# private-key: ${{ secrets.GH_APP_PRIVATE_KEY }}
# github-api-url: https://api.github.com
# owner: continuwuity
# repositories: continuwuity
- name: Install regctl
uses: https://forgejo.ellis.link/continuwuation/regclient-actions/regctl-installer@main
with:

View File

@@ -1,22 +0,0 @@
name: Checks / Prefligit
on:
push:
pull_request:
permissions:
contents: read
jobs:
prefligit:
runs-on: ubuntu-latest
env:
FROM_REF: ${{ github.event.pull_request.base.sha || (!github.event.forced && ( github.event.before != '0000000000000000000000000000000000000000' && github.event.before || github.sha )) || format('{0}~', github.sha) }}
TO_REF: ${{ github.sha }}
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
persist-credentials: false
- uses: ./.forgejo/actions/prefligit
with:
extra_args: --all-files --hook-stage manual

View File

@@ -0,0 +1,83 @@
name: Checks / Prek
on:
pull_request:
push:
branches:
- main
workflow_dispatch:
permissions:
contents: read
jobs:
fast-checks:
name: Pre-commit & Formatting
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v5
with:
persist-credentials: false
- name: Setup Rust nightly
uses: ./.forgejo/actions/setup-rust
with:
rust-version: nightly
github-token: ${{ secrets.GH_PUBLIC_RO }}
- name: Run prek
run: |
prek run \
--all-files \
--hook-stage manual \
--show-diff-on-failure \
--color=always \
-v
- name: Check Rust formatting
run: |
cargo +nightly fmt --all -- --check && \
echo "✅ Formatting check passed" || \
exit 1
clippy-and-tests:
name: Clippy and Cargo Tests
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v5
with:
persist-credentials: false
- name: Setup LLVM
uses: ./.forgejo/actions/setup-llvm-with-apt
with:
extra-packages: liburing-dev liburing2
- name: Setup Rust with caching
uses: ./.forgejo/actions/setup-rust
with:
github-token: ${{ secrets.GH_PUBLIC_RO }}
- name: Run Clippy lints
run: |
cargo clippy \
--workspace \
--features full \
--locked \
--no-deps \
--profile test \
-- \
-D warnings
- name: Run Cargo tests
run: |
cargo test \
--workspace \
--features full \
--locked \
--profile test \
--all-targets \
--no-fail-fast

View File

@@ -3,62 +3,40 @@ concurrency:
group: "release-image-${{ github.ref }}"
on:
push:
pull_request:
paths-ignore:
- "*.md"
- "**/*.md"
- ".gitlab-ci.yml"
- ".gitignore"
- "renovate.json"
- "debian/**"
- "docker/**"
- "pkg/**"
- "docs/**"
push:
branches:
- main
paths-ignore:
- "*.md"
- "**/*.md"
- ".gitlab-ci.yml"
- ".gitignore"
- "renovate.json"
- "pkg/**"
- "docs/**"
tags:
- "v*.*.*"
# Allows you to run this workflow manually from the Actions tab
workflow_dispatch:
env:
BUILTIN_REGISTRY: forgejo.ellis.link
BUILTIN_REGISTRY_ENABLED: "${{ ((vars.BUILTIN_REGISTRY_USER && secrets.BUILTIN_REGISTRY_PASSWORD) || (github.event_name != 'pull_request' || github.event.pull_request.head.repo.fork == false)) && 'true' || 'false' }}"
IMAGE_PATH: forgejo.ellis.link/continuwuation/continuwuity
jobs:
define-variables:
runs-on: ubuntu-latest
outputs:
images: ${{ steps.var.outputs.images }}
images_list: ${{ steps.var.outputs.images_list }}
build_matrix: ${{ steps.var.outputs.build_matrix }}
steps:
- name: Setting variables
uses: https://github.com/actions/github-script@v7
id: var
with:
script: |
const githubRepo = '${{ github.repository }}'.toLowerCase()
const repoId = githubRepo.split('/')[1]
core.setOutput('github_repository', githubRepo)
const builtinImage = '${{ env.BUILTIN_REGISTRY }}/' + githubRepo
let images = []
if (process.env.BUILTIN_REGISTRY_ENABLED === "true") {
images.push(builtinImage)
}
core.setOutput('images', images.join("\n"))
core.setOutput('images_list', images.join(","))
const platforms = ['linux/amd64', 'linux/arm64']
core.setOutput('build_matrix', JSON.stringify({
platform: platforms,
target_cpu: ['base'],
include: platforms.map(platform => { return {
platform,
slug: platform.replace('/', '-')
}})
}))
build-image:
build-release:
name: "Build ${{ matrix.slug }} (release)"
runs-on: dind
needs: define-variables
permissions:
contents: read
packages: write
@@ -66,116 +44,28 @@ jobs:
id-token: write
strategy:
matrix:
{
"target_cpu": ["base"],
"profile": ["release"],
"include":
[
{ "platform": "linux/amd64", "slug": "linux-amd64" },
{ "platform": "linux/arm64", "slug": "linux-arm64" },
],
"platform": ["linux/amd64", "linux/arm64"],
}
include:
- platform: "linux/amd64"
slug: "linux-amd64"
- platform: "linux/arm64"
slug: "linux-arm64"
steps:
- name: Echo strategy
run: echo '${{ toJSON(fromJSON(needs.define-variables.outputs.build_matrix)) }}'
- name: Echo matrix
run: echo '${{ toJSON(matrix) }}'
- name: Checkout repository
uses: actions/checkout@v4
uses: actions/checkout@v5
with:
persist-credentials: false
- name: Install rust
id: rust-toolchain
uses: ./.forgejo/actions/rust-toolchain
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
# Uses the `docker/login-action` action to log in to the Container registry registry using the account and password that will publish the packages. Once published, the packages are scoped to the account defined here.
- name: Login to builtin registry
uses: docker/login-action@v3
- name: Prepare Docker build environment
id: prepare
uses: ./.forgejo/actions/prepare-docker-build
with:
registry: ${{ env.BUILTIN_REGISTRY }}
username: ${{ vars.BUILTIN_REGISTRY_USER || github.actor }}
password: ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }}
# This step uses [docker/metadata-action](https://github.com/docker/metadata-action#about) to extract tags and labels that will be applied to the specified image. The `id` "meta" allows the output of this step to be referenced in a subsequent step. The `images` value provides the base name for the tags and labels.
- name: Extract metadata (labels, annotations) for Docker
id: meta
uses: docker/metadata-action@v5
with:
images: ${{needs.define-variables.outputs.images}}
# default labels & annotations: https://github.com/docker/metadata-action/blob/master/src/meta.ts#L509
env:
DOCKER_METADATA_ANNOTATIONS_LEVELS: manifest,index
# This step uses the `docker/build-push-action` action to build the image, based on your repository's `Dockerfile`. If the build succeeds, it pushes the image to GitHub Packages.
# It uses the `context` parameter to define the build's context as the set of files located in the specified path. For more information, see "[Usage](https://github.com/docker/build-push-action#usage)" in the README of the `docker/build-push-action` repository.
# It uses the `tags` and `labels` parameters to tag and label the image with the output from the "meta" step.
# It will not push images generated from a pull request
- name: Get short git commit SHA
id: sha
run: |
calculatedSha=$(git rev-parse --short ${{ github.sha }})
echo "COMMIT_SHORT_SHA=$calculatedSha" >> $GITHUB_ENV
- name: Get Git commit timestamps
run: echo "TIMESTAMP=$(git log -1 --pretty=%ct)" >> $GITHUB_ENV
- uses: ./.forgejo/actions/timelord
with:
key: timelord-v0
path: .
- name: Cache Rust registry
uses: actions/cache@v3
with:
path: |
.cargo/git
.cargo/git/checkouts
.cargo/registry
.cargo/registry/src
key: rust-registry-image-${{hashFiles('**/Cargo.lock') }}
- name: Cache cargo target
id: cache-cargo-target
uses: actions/cache@v3
with:
path: |
cargo-target-${{ matrix.target_cpu }}-${{ matrix.slug }}-${{ matrix.profile }}
key: cargo-target-${{ matrix.target_cpu }}-${{ matrix.slug }}-${{ matrix.profile }}-${{hashFiles('**/Cargo.lock') }}-${{steps.rust-toolchain.outputs.rustc_version}}
- name: Cache apt cache
id: cache-apt
uses: actions/cache@v3
with:
path: |
var-cache-apt-${{ matrix.slug }}
key: var-cache-apt-${{ matrix.slug }}
- name: Cache apt lib
id: cache-apt-lib
uses: actions/cache@v3
with:
path: |
var-lib-apt-${{ matrix.slug }}
key: var-lib-apt-${{ matrix.slug }}
- name: inject cache into docker
uses: https://github.com/reproducible-containers/buildkit-cache-dance@v3.1.0
with:
cache-map: |
{
".cargo/registry": "/usr/local/cargo/registry",
".cargo/git/db": "/usr/local/cargo/git/db",
"cargo-target-${{ matrix.target_cpu }}-${{ matrix.slug }}-${{ matrix.profile }}": {
"target": "/app/target",
"id": "cargo-target-${{ matrix.target_cpu }}-${{ matrix.slug }}-${{ matrix.profile }}"
},
"var-cache-apt-${{ matrix.slug }}": "/var/cache/apt",
"var-lib-apt-${{ matrix.slug }}": "/var/lib/apt"
}
skip-extraction: ${{ steps.cache.outputs.cache-hit }}
platform: ${{ matrix.platform }}
slug: ${{ matrix.slug }}
target_cpu: ""
profile: "release"
images: ${{ env.IMAGE_PATH }}
registry_user: ${{ vars.BUILTIN_REGISTRY_USER || github.actor }}
registry_password: ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }}
- name: Build and push Docker image by digest
id: build
uses: docker/build-push-action@v6
@@ -183,114 +73,134 @@ jobs:
context: .
file: "docker/Dockerfile"
build-args: |
GIT_COMMIT_HASH=${{ github.sha }})
GIT_COMMIT_HASH=${{ github.sha }}
GIT_COMMIT_HASH_SHORT=${{ env.COMMIT_SHORT_SHA }}
GIT_REMOTE_URL=${{github.event.repository.html_url }}
GIT_REMOTE_COMMIT_URL=${{github.event.head_commit.url }}
CARGO_INCREMENTAL=${{ env.BUILDKIT_ENDPOINT != '' && '1' || '0' }}
TARGET_CPU=
RUST_PROFILE=release
platforms: ${{ matrix.platform }}
labels: ${{ steps.meta.outputs.labels }}
annotations: ${{ steps.meta.outputs.annotations }}
labels: ${{ steps.prepare.outputs.metadata_labels }}
annotations: ${{ steps.prepare.outputs.metadata_annotations }}
cache-from: type=gha
# cache-to: type=gha,mode=max
sbom: true
outputs: type=image,"name=${{ needs.define-variables.outputs.images_list }}",push-by-digest=true,name-canonical=true,push=true
outputs: |
${{ env.BUILTIN_REGISTRY_ENABLED == 'true' && format('type=image,"name={0}",push-by-digest=true,name-canonical=true,push=true', env.IMAGE_PATH) || format('type=image,"name={0}",push=false', env.IMAGE_PATH) }}
type=local,dest=/tmp/binaries
env:
SOURCE_DATE_EPOCH: ${{ env.TIMESTAMP }}
# For publishing multi-platform manifests
- name: Export digest
run: |
mkdir -p /tmp/digests
digest="${{ steps.build.outputs.digest }}"
touch "/tmp/digests/${digest#sha256:}"
- name: Extract binary from container (image)
id: extract-binary-image
run: |
mkdir -p /tmp/binaries
digest="${{ steps.build.outputs.digest }}"
echo "container_id=$(docker create --platform ${{ matrix.platform }} ${{ needs.define-variables.outputs.images_list }}@$digest)" >> $GITHUB_OUTPUT
- name: Extract binary from container (copy)
run: docker cp ${{ steps.extract-binary-image.outputs.container_id }}:/sbin/conduwuit /tmp/binaries/conduwuit-${{ matrix.target_cpu }}-${{ matrix.slug }}-${{ matrix.profile }}
- name: Extract binary from container (cleanup)
run: docker rm ${{ steps.extract-binary-image.outputs.container_id }}
- name: Upload binary artifact
uses: forgejo/upload-artifact@v4
- name: Upload Docker artifacts
uses: ./.forgejo/actions/upload-docker-artifacts
with:
name: conduwuit-${{ matrix.target_cpu }}-${{ matrix.slug }}-${{ matrix.profile }}
path: /tmp/binaries/conduwuit-${{ matrix.target_cpu }}-${{ matrix.slug }}-${{ matrix.profile }}
if-no-files-found: error
slug: ${{ matrix.slug }}
cpu_suffix: ${{ steps.prepare.outputs.cpu_suffix }}
artifact_suffix: ""
digest_suffix: ""
digest: ${{ steps.build.outputs.digest }}
- name: Upload digest
uses: forgejo/upload-artifact@v4
with:
name: digests-${{ matrix.slug }}
path: /tmp/digests/*
if-no-files-found: error
retention-days: 5
merge:
merge-release:
name: "Create Multi-arch Release Manifest"
runs-on: dind
needs: [define-variables, build-image]
needs: build-release
steps:
- name: Download digests
uses: forgejo/download-artifact@v4
- name: Checkout repository
uses: actions/checkout@v5
with:
path: /tmp/digests
pattern: digests-*
merge-multiple: true
# Uses the `docker/login-action` action to log in to the Container registry registry using the account and password that will publish the packages. Once published, the packages are scoped to the account defined here.
- name: Login to builtin registry
uses: docker/login-action@v3
persist-credentials: false
- name: Create multi-platform manifest
uses: ./.forgejo/actions/create-docker-manifest
with:
registry: ${{ env.BUILTIN_REGISTRY }}
username: ${{ vars.BUILTIN_REGISTRY_USER || github.actor }}
password: ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }}
digest_pattern: "digests-linux-{amd64,arm64}"
tag_suffix: ""
images: ${{ env.IMAGE_PATH }}
registry_user: ${{ vars.BUILTIN_REGISTRY_USER || github.actor }}
registry_password: ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
build-maxperf:
name: "Build ${{ matrix.slug }} (max-perf)"
runs-on: dind
needs: build-release
permissions:
contents: read
packages: write
attestations: write
id-token: write
strategy:
matrix:
include:
- platform: "linux/amd64"
slug: "linux-amd64"
target_cpu: "haswell"
- platform: "linux/arm64"
slug: "linux-arm64"
target_cpu: ""
- name: Extract metadata (tags) for Docker
id: meta
uses: docker/metadata-action@v5
steps:
- name: Checkout repository
uses: actions/checkout@v5
with:
tags: |
type=semver,pattern={{version}},prefix=v
type=semver,pattern={{major}}.{{minor}},enable=${{ !startsWith(github.ref, 'refs/tags/v0.0.') }},prefix=v
type=semver,pattern={{major}},enable=${{ !startsWith(github.ref, 'refs/tags/v0.') }},prefix=v
type=ref,event=branch,prefix=${{ format('refs/heads/{0}', github.event.repository.default_branch) != github.ref && 'branch-' || '' }}
type=ref,event=pr
type=sha,format=long
type=raw,value=latest,enable=${{ startsWith(github.ref, 'refs/tags/v') }}
images: ${{needs.define-variables.outputs.images}}
# default labels & annotations: https://github.com/docker/metadata-action/blob/master/src/meta.ts#L509
persist-credentials: false
- name: Prepare max-perf Docker build environment
id: prepare
uses: ./.forgejo/actions/prepare-docker-build
with:
platform: ${{ matrix.platform }}
slug: ${{ matrix.slug }}
target_cpu: ${{ matrix.target_cpu }}
profile: "release-max-perf"
images: ${{ env.IMAGE_PATH }}
registry_user: ${{ vars.BUILTIN_REGISTRY_USER || github.actor }}
registry_password: ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }}
- name: Build and push max-perf Docker image by digest
id: build
uses: docker/build-push-action@v6
with:
context: .
file: "docker/Dockerfile"
build-args: |
GIT_COMMIT_HASH=${{ github.sha }}
GIT_COMMIT_HASH_SHORT=${{ env.COMMIT_SHORT_SHA }}
GIT_REMOTE_URL=${{github.event.repository.html_url }}
GIT_REMOTE_COMMIT_URL=${{github.event.head_commit.url }}
CARGO_INCREMENTAL=${{ env.BUILDKIT_ENDPOINT != '' && '1' || '0' }}
TARGET_CPU=${{ matrix.target_cpu }}
RUST_PROFILE=release-max-perf
platforms: ${{ matrix.platform }}
labels: ${{ steps.prepare.outputs.metadata_labels }}
annotations: ${{ steps.prepare.outputs.metadata_annotations }}
cache-from: type=gha
# cache-to: type=gha,mode=max
sbom: true
outputs: |
${{ env.BUILTIN_REGISTRY_ENABLED == 'true' && format('type=image,"name={0}",push-by-digest=true,name-canonical=true,push=true', env.IMAGE_PATH) || format('type=image,"name={0}",push=false', env.IMAGE_PATH) }}
type=local,dest=/tmp/binaries
env:
DOCKER_METADATA_ANNOTATIONS_LEVELS: index
SOURCE_DATE_EPOCH: ${{ env.TIMESTAMP }}
- name: Upload max-perf Docker artifacts
uses: ./.forgejo/actions/upload-docker-artifacts
with:
slug: ${{ matrix.slug }}
cpu_suffix: ${{ steps.prepare.outputs.cpu_suffix }}
artifact_suffix: "-maxperf"
digest_suffix: "-maxperf"
digest: ${{ steps.build.outputs.digest }}
- name: Create manifest list and push
working-directory: /tmp/digests
env:
IMAGES: ${{needs.define-variables.outputs.images}}
shell: bash
run: |
IFS=$'\n'
IMAGES_LIST=($IMAGES)
ANNOTATIONS_LIST=($DOCKER_METADATA_OUTPUT_ANNOTATIONS)
TAGS_LIST=($DOCKER_METADATA_OUTPUT_TAGS)
for REPO in "${IMAGES_LIST[@]}"; do
docker buildx imagetools create \
$(for tag in "${TAGS_LIST[@]}"; do echo "--tag"; echo "$tag"; done) \
$(for annotation in "${ANNOTATIONS_LIST[@]}"; do echo "--annotation"; echo "$annotation"; done) \
$(for reference in *; do printf "$REPO@sha256:%s\n" $reference; done)
done
- name: Inspect image
env:
IMAGES: ${{needs.define-variables.outputs.images}}
shell: bash
run: |
IMAGES_LIST=($IMAGES)
for REPO in "${IMAGES_LIST[@]}"; do
docker buildx imagetools inspect $REPO:${{ steps.meta.outputs.version }}
done
merge-maxperf:
name: "Create Max-Perf Manifest"
runs-on: dind
needs: build-maxperf
steps:
- name: Checkout repository
uses: actions/checkout@v5
with:
persist-credentials: false
- name: Create max-perf manifest
uses: ./.forgejo/actions/create-docker-manifest
with:
digest_pattern: "digests-maxperf-linux-{amd64-haswell,arm64}"
tag_suffix: "-maxperf"
images: ${{ env.IMAGE_PATH }}
registry_user: ${{ vars.BUILTIN_REGISTRY_USER || github.actor }}
registry_password: ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }}

View File

@@ -0,0 +1,132 @@
name: Maintenance / Renovate
enable-email-notifications: true
on:
schedule:
# Run at 5am UTC daily to avoid late-night dev
- cron: '0 5 * * *'
workflow_dispatch:
inputs:
dryRun:
description: 'Dry run mode'
required: false
default: ''
type: choice
options:
- ''
- 'extract'
- 'lookup'
- 'full'
logLevel:
description: 'Log level'
required: false
default: 'info'
type: choice
options:
- 'debug'
- 'info'
- 'warning'
- 'critical'
push:
branches:
- main
paths:
# Re-run when config changes
- '.forgejo/workflows/renovate.yml'
- 'renovate.json'
jobs:
renovate:
name: Renovate
runs-on: ubuntu-latest
container:
image: ghcr.io/renovatebot/renovate:41.146.4@sha256:bb70194b7405faf10a6f279b60caa10403a440ba37d158c5a4ef0ae7b67a0f92
options: --tmpfs /tmp:exec
steps:
- name: Checkout
uses: actions/checkout@v5
with:
show-progress: false
- name: print node heap
run: /usr/local/renovate/node -e 'console.log(`node heap limit = ${require("v8").getHeapStatistics().heap_size_limit / (1024 * 1024)} Mb`)'
- name: Restore renovate repo cache
uses: actions/cache/restore@v4
with:
path: |
/tmp/renovate/cache/renovate/repository
key: renovate-repo-cache-${{ github.run_id }}
restore-keys: |
renovate-repo-cache-
- name: Restore renovate package cache
uses: actions/cache/restore@v4
with:
path: |
/tmp/renovate/cache/renovate/renovate-cache-sqlite
key: renovate-package-cache-${{ github.run_id }}
restore-keys: |
renovate-package-cache-
- name: Restore renovate OSV cache
uses: actions/cache/restore@v4
with:
path: |
/tmp/osv
key: renovate-osv-cache-${{ github.run_id }}
restore-keys: |
renovate-osv-cache-
- name: Self-hosted Renovate
run: renovate
env:
LOG_LEVEL: ${{ inputs.logLevel || 'info' }}
RENOVATE_DRY_RUN: ${{ inputs.dryRun || 'false' }}
RENOVATE_PLATFORM: forgejo
RENOVATE_ENDPOINT: ${{ github.server_url }}
RENOVATE_AUTODISCOVER: 'false'
RENOVATE_REPOSITORIES: '["${{ github.repository }}"]'
RENOVATE_GIT_TIMEOUT: 60000
RENOVATE_REQUIRE_CONFIG: 'required'
RENOVATE_ONBOARDING: 'false'
RENOVATE_INHERIT_CONFIG: 'true'
RENOVATE_GITHUB_TOKEN_WARN: 'false'
RENOVATE_TOKEN: ${{ secrets.RENOVATE_TOKEN }}
GITHUB_COM_TOKEN: ${{ secrets.GH_PUBLIC_RO || secrets.GH_TOKEN }}
RENOVATE_REPOSITORY_CACHE: 'enabled'
RENOVATE_X_SQLITE_PACKAGE_CACHE: 'true'
OSV_OFFLINE_ROOT_DIR: /tmp/osv
- name: Save renovate repo cache
if: always()
uses:
actions/cache/save@v4
with:
path: |
/tmp/renovate/cache/renovate/repository
key: renovate-repo-cache-${{ github.run_id }}
- name: Save renovate package cache
if: always()
uses: actions/cache/save@v4
with:
path: |
/tmp/renovate/cache/renovate/renovate-cache-sqlite
key: renovate-package-cache-${{ github.run_id }}
- name: Save renovate OSV cache
if: always()
uses: actions/cache/save@v4
with:
path: |
/tmp/osv
key: renovate-osv-cache-${{ github.run_id }}

View File

@@ -1,144 +0,0 @@
name: Checks / Rust
on:
push:
jobs:
format:
name: Format
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
persist-credentials: false
- name: Install rust
uses: ./.forgejo/actions/rust-toolchain
with:
toolchain: "nightly"
components: "rustfmt"
- name: Check formatting
run: |
cargo +nightly fmt --all -- --check
clippy:
name: Clippy
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
persist-credentials: false
- name: Install rust
uses: ./.forgejo/actions/rust-toolchain
- uses: https://github.com/actions/create-github-app-token@v2
id: app-token
with:
app-id: ${{ vars.GH_APP_ID }}
private-key: ${{ secrets.GH_APP_PRIVATE_KEY }}
github-api-url: https://api.github.com
owner: ${{ vars.GH_APP_OWNER }}
repositories: ""
- name: Install sccache
uses: ./.forgejo/actions/sccache
with:
token: ${{ steps.app-token.outputs.token }}
- run: sudo apt-get update
- name: Install system dependencies
uses: https://github.com/awalsh128/cache-apt-pkgs-action@v1
with:
packages: clang liburing-dev
version: 1
- name: Cache Rust registry
uses: actions/cache@v3
with:
path: |
~/.cargo/git
!~/.cargo/git/checkouts
~/.cargo/registry
!~/.cargo/registry/src
key: rust-registry-${{hashFiles('**/Cargo.lock') }}
- name: Timelord
uses: ./.forgejo/actions/timelord
with:
key: sccache-v0
path: .
- name: Clippy
run: |
cargo clippy \
--workspace \
--all-features \
--locked \
--no-deps \
--profile test \
-- \
-D warnings
- name: Show sccache stats
if: always()
run: sccache --show-stats
cargo-test:
name: Cargo Test
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
persist-credentials: false
- name: Install rust
uses: ./.forgejo/actions/rust-toolchain
- uses: https://github.com/actions/create-github-app-token@v2
id: app-token
with:
app-id: ${{ vars.GH_APP_ID }}
private-key: ${{ secrets.GH_APP_PRIVATE_KEY }}
github-api-url: https://api.github.com
owner: ${{ vars.GH_APP_OWNER }}
repositories: ""
- name: Install sccache
uses: ./.forgejo/actions/sccache
with:
token: ${{ steps.app-token.outputs.token }}
- run: sudo apt-get update
- name: Install system dependencies
uses: https://github.com/awalsh128/cache-apt-pkgs-action@v1
with:
packages: clang liburing-dev
version: 1
- name: Cache Rust registry
uses: actions/cache@v3
with:
path: |
~/.cargo/git
!~/.cargo/git/checkouts
~/.cargo/registry
!~/.cargo/registry/src
key: rust-registry-${{hashFiles('**/Cargo.lock') }}
- name: Timelord
uses: ./.forgejo/actions/timelord
with:
key: sccache-v0
path: .
- name: Cargo Test
run: |
cargo test \
--workspace \
--all-features \
--locked \
--profile test \
--all-targets \
--no-fail-fast
- name: Show sccache stats
if: always()
run: sccache --show-stats

View File

@@ -0,0 +1,120 @@
name: Update flake hashes
on:
workflow_dispatch:
pull_request:
paths:
- "Cargo.lock"
- "Cargo.toml"
- "rust-toolchain.toml"
- "nix/**/*"
- ".forgejo/workflows/update-flake-hashes.yml"
jobs:
update-flake-hashes:
runs-on: ubuntu-latest
steps:
- uses: https://code.forgejo.org/actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5
with:
fetch-depth: 0
fetch-tags: false
fetch-single-branch: true
submodules: false
persist-credentials: false
- uses: https://github.com/cachix/install-nix-action@7ab6e7fd29da88e74b1e314a4ae9ac6b5cda3801 # v31.8.0
with:
nix_path: nixpkgs=channel:nixos-unstable
# We can skip getting a toolchain hash if this was ran as a dispatch with the intent
# to update just the rocksdb hash. If this was ran as a dispatch and the toolchain
# files are changed, we still update them, as well as the rocksdb import.
- name: Detect changed files
id: changes
run: |
git fetch origin ${{ github.base_ref }} --depth=1 || true
if [ -n "${{ github.event.pull_request.base.sha }}" ]; then
base=${{ github.event.pull_request.base.sha }}
else
base=$(git rev-parse HEAD~1)
fi
echo "Base: $base"
echo "HEAD: $(git rev-parse HEAD)"
git diff --name-only $base HEAD > changed_files.txt
echo "detected changes in $(cat changed_files.txt)"
# Join files with commas
files=$(paste -sd, changed_files.txt)
echo "files=$files" >> $FORGEJO_OUTPUT
- name: Debug output
run: |
echo "State of output"
echo "Changed files: ${{ steps.changes.outputs.files }}"
- name: Get new toolchain hash
if: contains(steps.changes.outputs.files, 'Cargo.toml') || contains(steps.changes.outputs.files, 'Cargo.lock') || contains(steps.changes.outputs.files, 'rust-toolchain.toml')
run: |
# Set the current sha256 to an empty hash to make `nix build` calculate a new one
awk '/fromToolchainFile *\{/{found=1; print; next} found && /sha256 =/{sub(/sha256 = .*/, "sha256 = lib.fakeSha256;"); found=0} 1' nix/packages/rust.nix > temp.nix
mv temp.nix nix/packages/rust.nix
# Build continuwuity and filter for the new hash
# We do `|| true` because we want this to fail without stopping the workflow
nix build .#default 2>&1 | tee >(grep 'got:' | awk '{print $2}' > new_toolchain_hash.txt) || true
# Place the new hash in place of the empty hash
new_hash=$(cat new_toolchain_hash.txt)
sed -i "s|lib.fakeSha256|\"$new_hash\"|" nix/packages/rust.nix
echo "New hash:"
awk -F'"' '/fromToolchainFile/{found=1; next} found && /sha256 =/{print $2; found=0}' nix/packages/rust.nix
echo "Expected new hash:"
cat new_toolchain_hash.txt
rm new_toolchain_hash.txt
- name: Get new rocksdb hash
if: contains(steps.changes.outputs.files, '.nix') || contains(steps.changes.outputs.files, 'flake.lock')
run: |
# Set the current sha256 to an empty hash to make `nix build` calculate a new one
awk '/repo = "rocksdb";/{found=1; print; next} found && /sha256 =/{sub(/sha256 = .*/, "sha256 = lib.fakeSha256;"); found=0} 1' nix/packages/rocksdb/package.nix > temp.nix
mv temp.nix nix/packages/rocksdb/package.nix
# Build continuwuity and filter for the new hash
# We do `|| true` because we want this to fail without stopping the workflow
nix build .#default 2>&1 | tee >(grep 'got:' | awk '{print $2}' > new_rocksdb_hash.txt) || true
# Place the new hash in place of the empty hash
new_hash=$(cat new_rocksdb_hash.txt)
sed -i "s|lib.fakeSha256|\"$new_hash\"|" nix/packages/rocksdb/package.nix
echo "New hash:"
awk -F'"' '/repo = "rocksdb";/{found=1; next} found && /sha256 =/{print $2; found=0}' nix/packages/rocksdb/package.nix
echo "Expected new hash:"
cat new_rocksdb_hash.txt
rm new_rocksdb_hash.txt
- name: Show diff
run: git diff flake.nix nix
- name: Push changes
run: |
set -euo pipefail
if git diff --quiet --exit-code; then
echo "No changes to commit."
exit 0
fi
git config user.email "renovate@mail.ellis.link"
git config user.name "renovate"
REF="${{ github.head_ref }}"
git fetch origin "$REF"
git checkout "$REF"
git commit -a -m "chore(Nix): Updated flake hashes"
git push origin HEAD:refs/heads/"$REF"

5
.github/FUNDING.yml vendored
View File

@@ -1,5 +1,4 @@
github: [JadedBlueEyes]
# Doesn't support an array, so we can only list nex
ko_fi: nexy7574
github: [JadedBlueEyes, nexy7574]
custom:
- https://ko-fi.com/nexy7574
- https://ko-fi.com/JadedBlueEyes

View File

@@ -13,3 +13,4 @@ Rudi Floren <rudi.floren@gmail.com> <rudi.floren@googlemail.com>
Tamara Schmitz <tamara.zoe.schmitz@posteo.de> <15906939+tamara-schmitz@users.noreply.github.com>
Timo Kösters <timo@koesters.xyz>
x4u <xi.zhu@protonmail.ch> <14617923-x4u@users.noreply.gitlab.com>
Ginger <ginger@gingershaped.computer> <75683114+gingershaped@users.noreply.github.com>

View File

@@ -7,9 +7,9 @@ default_stages:
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v5.0.0
rev: v6.0.0
hooks:
- id: check-byte-order-marker
- id: fix-byte-order-marker
- id: check-case-conflict
- id: check-symlinks
- id: destroyed-symlinks
@@ -23,7 +23,7 @@ repos:
- id: check-added-large-files
- repo: https://github.com/crate-ci/typos
rev: v1.26.0
rev: v1.39.0
hooks:
- id: typos
- id: typos

View File

@@ -13,6 +13,9 @@ extend-ignore-re = [
"[0-9+][A-Za-z0-9+]{30,}[a-z0-9+]",
"\\$[A-Z0-9+][A-Za-z0-9+]{6,}[a-z0-9+]",
"\\b[a-z0-9+/=][A-Za-z0-9+/=]{7,}[a-z0-9+/=][A-Z]\\b",
# In the renovate config
".ontainer"
]
[default.extend-words]

View File

@@ -7,5 +7,6 @@
"continuwuity",
"homeserver",
"homeservers"
]
],
"rust-analyzer.cargo.features": ["full"]
}

View File

@@ -65,11 +65,11 @@ # Run tests
cargo test
# Check compilation
cargo check --workspace --all-features
cargo check --workspace --features full
# Run lints
cargo clippy --workspace --all-features
# Auto-fix: cargo clippy --workspace --all-features --fix --allow-staged;
cargo clippy --workspace --features full
# Auto-fix: cargo clippy --workspace --features full --fix --allow-staged;
# Format code (must use nightly)
cargo +nightly fmt

2205
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -21,7 +21,7 @@ license = "Apache-2.0"
readme = "README.md"
repository = "https://forgejo.ellis.link/continuwuation/continuwuity"
rust-version = "1.86.0"
version = "0.5.0-rc.6"
version = "0.5.0-rc.8"
[workspace.metadata.crane]
name = "conduwuit"
@@ -45,18 +45,18 @@ version = "0.3"
features = ["ffi", "std", "union"]
[workspace.dependencies.const-str]
version = "0.6.2"
version = "0.7.0"
[workspace.dependencies.ctor]
version = "0.2.9"
version = "0.5.0"
[workspace.dependencies.cargo_toml]
version = "0.21"
version = "0.22"
default-features = false
features = ["features"]
[workspace.dependencies.toml]
version = "0.8.14"
version = "0.9.5"
default-features = false
features = ["parse"]
@@ -166,8 +166,8 @@ default-features = false
features = ["raw_value"]
# Used for appservice registration files
[workspace.dependencies.serde_yaml]
version = "0.9.34"
[workspace.dependencies.serde-saphyr]
version = "0.0.7"
# Used to load forbidden room/user regex from config
[workspace.dependencies.serde_regex]
@@ -210,13 +210,13 @@ default-features = false
version = "0.1.41"
default-features = false
[workspace.dependencies.tracing-subscriber]
version = "0.3.19"
version = "0.3.20"
default-features = false
features = ["env-filter", "std", "tracing", "tracing-log", "ansi", "fmt"]
[workspace.dependencies.tracing-journald]
version = "0.3.1"
[workspace.dependencies.tracing-core]
version = "0.1.33"
version = "0.1.34"
default-features = false
# for URL previews
@@ -286,7 +286,7 @@ features = [
]
[workspace.dependencies.hyper-util]
version = "0.1.11"
version = "=0.1.17"
default-features = false
features = [
"server-auto",
@@ -351,8 +351,7 @@ version = "0.1.2"
# Used for matrix spec type definitions and helpers
[workspace.dependencies.ruma]
git = "https://forgejo.ellis.link/continuwuation/ruwuma"
#branch = "conduwuit-changes"
rev = "b753738047d1f443aca870896ef27ecaacf027da"
rev = "50b2a91b2ab8f9830eea80b9911e11234e0eac66"
features = [
"compat",
"rand",
@@ -382,16 +381,18 @@ features = [
"unstable-msc4095",
"unstable-msc4121",
"unstable-msc4125",
"unstable-msc4155",
"unstable-msc4186",
"unstable-msc4203", # sending to-device events to appservices
"unstable-msc4210", # remove legacy mentions
"unstable-extensible-events",
"unstable-pdu",
"unstable-msc4155"
]
[workspace.dependencies.rust-rocksdb]
git = "https://forgejo.ellis.link/continuwuation/rust-rocksdb-zaidoon1"
rev = "fc9a99ac54a54208f90fdcba33ae6ee8bc3531dd"
rev = "61d9d23872197e9ace4a477f2617d5c9f50ecb23"
default-features = false
features = [
"multi-threaded-cf",
@@ -411,25 +412,27 @@ default-features = false
# optional opentelemetry, performance measurements, flamegraphs, etc for performance measurements and monitoring
[workspace.dependencies.opentelemetry]
version = "0.21.0"
version = "0.31.0"
[workspace.dependencies.tracing-flame]
version = "0.2.0"
[workspace.dependencies.tracing-opentelemetry]
version = "0.22.0"
version = "0.32.0"
[workspace.dependencies.opentelemetry_sdk]
version = "0.21.2"
version = "0.31.0"
features = ["rt-tokio"]
[workspace.dependencies.opentelemetry-jaeger]
version = "0.20.0"
features = ["rt-tokio"]
[workspace.dependencies.opentelemetry-otlp]
version = "0.31.0"
features = ["http", "trace", "logs", "metrics"]
# optional sentry metrics for crash/panic reporting
[workspace.dependencies.sentry]
version = "0.37.0"
version = "0.45.0"
default-features = false
features = [
"backtrace",
@@ -445,9 +448,9 @@ features = [
]
[workspace.dependencies.sentry-tracing]
version = "0.37.0"
version = "0.45.0"
[workspace.dependencies.sentry-tower]
version = "0.37.0"
version = "0.45.0"
# jemalloc usage
[workspace.dependencies.tikv-jemalloc-sys]
@@ -473,10 +476,10 @@ default-features = false
features = ["use_std"]
[workspace.dependencies.console-subscriber]
version = "0.4"
version = "0.5"
[workspace.dependencies.nix]
version = "0.29.0"
version = "0.30.1"
default-features = false
features = ["resource"]
@@ -498,7 +501,7 @@ version = "0.4.3"
default-features = false
[workspace.dependencies.termimad]
version = "0.31.2"
version = "0.34.0"
default-features = false
[workspace.dependencies.checked_ops]
@@ -536,35 +539,31 @@ version = "0.2"
version = "0.2"
[workspace.dependencies.minicbor]
version = "0.26.3"
version = "2.1.1"
features = ["std"]
[workspace.dependencies.minicbor-serde]
version = "0.4.1"
version = "0.6.0"
features = ["std"]
[workspace.dependencies.maplit]
version = "1.0.2"
[workspace.dependencies.ldap3]
version = "0.12.0"
default-features = false
features = ["sync", "tls-rustls", "rustls-provider"]
[workspace.dependencies.resolv-conf]
version = "0.7.5"
#
# Patches
#
# backport of [https://github.com/tokio-rs/tracing/pull/2956] to the 0.1.x branch of tracing.
# we can switch back to upstream if #2956 is merged and backported in the upstream repo.
# https://forgejo.ellis.link/continuwuation/tracing/commit/b348dca742af641c47bc390261f60711c2af573c
[patch.crates-io.tracing-subscriber]
git = "https://forgejo.ellis.link/continuwuation/tracing"
rev = "1e64095a8051a1adf0d1faa307f9f030889ec2aa"
[patch.crates-io.tracing]
git = "https://forgejo.ellis.link/continuwuation/tracing"
rev = "1e64095a8051a1adf0d1faa307f9f030889ec2aa"
[patch.crates-io.tracing-core]
git = "https://forgejo.ellis.link/continuwuation/tracing"
rev = "1e64095a8051a1adf0d1faa307f9f030889ec2aa"
[patch.crates-io.tracing-log]
git = "https://forgejo.ellis.link/continuwuation/tracing"
rev = "1e64095a8051a1adf0d1faa307f9f030889ec2aa"
# adds a tab completion callback: https://forgejo.ellis.link/continuwuation/rustyline-async/src/branch/main/.patchy/0002-add-tab-completion-callback.patch
# adds event for CTRL+\: https://forgejo.ellis.link/continuwuation/rustyline-async/src/branch/main/.patchy/0001-add-event-for-ctrl.patch
@@ -588,13 +587,7 @@ rev = "9c8e51510c35077df888ee72a36b4b05637147da"
# reverts hyperium#148 conflicting with our delicate federation resolver hooks
[patch.crates-io.hyper-util]
git = "https://forgejo.ellis.link/continuwuation/hyper-util"
rev = "e4ae7628fe4fcdacef9788c4c8415317a4489941"
# Allows no-aaaa option in resolv.conf
# Use 1-indexed line numbers when displaying parse error messages
[patch.crates-io.resolv-conf]
git = "https://forgejo.ellis.link/continuwuation/resolv-conf"
rev = "56251316cc4127bcbf36e68ce5e2093f4d33e227"
rev = "5886d5292bf704c246206ad72d010d674a7b77d0"
#
# Our crates
@@ -759,25 +752,6 @@ incremental = true
[profile.dev.package.conduwuit_core]
inherits = "dev"
#rustflags = [
# '--cfg', 'conduwuit_mods',
# '-Ztime-passes',
# '-Zmir-opt-level=0',
# '-Ztls-model=initial-exec',
# '-Cprefer-dynamic=true',
# '-Zstaticlib-prefer-dynamic=true',
# '-Zstaticlib-allow-rdylib-deps=true',
# '-Zpacked-bundled-libs=false',
# '-Zplt=true',
# '-Clink-arg=-Wl,--as-needed',
# '-Clink-arg=-Wl,--allow-shlib-undefined',
# '-Clink-arg=-Wl,-z,lazy',
# '-Clink-arg=-Wl,-z,unique',
# '-Clink-arg=-Wl,-z,nodlopen',
# '-Clink-arg=-Wl,-z,nodelete',
#]
[profile.dev.package.xtask-generate-commands]
inherits = "dev"
[profile.dev.package.conduwuit]
inherits = "dev"
#rustflags = [
@@ -867,7 +841,7 @@ unused-qualifications = "warn"
#unused-results = "warn" # TODO
## some sadness
elided_named_lifetimes = "allow" # TODO!
mismatched_lifetime_syntaxes = "allow" # TODO!
let_underscore_drop = "allow"
missing_docs = "allow"
# cfgs cannot be limited to expected cfgs or their de facto non-transitive/opt-in use-case e.g.
@@ -1006,3 +980,6 @@ literal_string_with_formatting_args = { level = "allow", priority = 1 }
needless_raw_string_hashes = "allow"
# TODO: Enable this lint & fix all instances
collapsible_if = "allow"

View File

@@ -57,7 +57,7 @@ ### What are the project's goals?
### Can I try it out?
Check out the [documentation](introduction) for installation instructions.
Check out the [documentation](https://continuwuity.org) for installation instructions.
There are currently no open registration Continuwuity instances available.

View File

@@ -1,83 +0,0 @@
[Unit]
Description=Continuwuity - Matrix homeserver
Wants=network-online.target
After=network-online.target
Documentation=https://continuwuity.org/
RequiresMountsFor=/var/lib/private/conduwuit
Alias=matrix-conduwuit.service
[Service]
DynamicUser=yes
Type=notify-reload
ReloadSignal=SIGUSR1
TTYPath=/dev/tty25
DeviceAllow=char-tty
StandardInput=tty-force
StandardOutput=tty
StandardError=journal+console
Environment="CONTINUWUITY_LOG_TO_JOURNALD=true"
Environment="CONTINUWUITY_JOURNALD_IDENTIFIER=%N"
TTYReset=yes
# uncomment to allow buffer to be cleared every restart
TTYVTDisallocate=no
TTYColumns=120
TTYRows=40
AmbientCapabilities=
CapabilityBoundingSet=
DevicePolicy=closed
LockPersonality=yes
MemoryDenyWriteExecute=yes
NoNewPrivileges=yes
#ProcSubset=pid
ProtectClock=yes
ProtectControlGroups=yes
ProtectHome=yes
ProtectHostname=yes
ProtectKernelLogs=yes
ProtectKernelModules=yes
ProtectKernelTunables=yes
ProtectProc=invisible
ProtectSystem=strict
PrivateDevices=yes
PrivateMounts=yes
PrivateTmp=yes
PrivateUsers=yes
PrivateIPC=yes
RemoveIPC=yes
RestrictAddressFamilies=AF_INET AF_INET6 AF_UNIX
RestrictNamespaces=yes
RestrictRealtime=yes
RestrictSUIDSGID=yes
SystemCallArchitectures=native
SystemCallFilter=@system-service @resources
SystemCallFilter=~@clock @debug @module @mount @reboot @swap @cpu-emulation @obsolete @timer @chown @setuid @privileged @keyring @ipc
SystemCallErrorNumber=EPERM
StateDirectory=conduwuit
RuntimeDirectory=conduwuit
RuntimeDirectoryMode=0750
Environment=CONTINUWUITY_CONFIG=${CREDENTIALS_DIRECTORY}/config.toml
LoadCredential=config.toml:/etc/conduwuit/conduwuit.toml
BindPaths=/var/lib/private/conduwuit:/var/lib/matrix-conduit
BindPaths=/var/lib/private/conduwuit:/var/lib/private/matrix-conduit
ExecStart=/usr/bin/conduwuit
Restart=on-failure
RestartSec=5
TimeoutStopSec=4m
TimeoutStartSec=4m
StartLimitInterval=1m
StartLimitBurst=5
[Install]
WantedBy=multi-user.target

View File

@@ -79,9 +79,11 @@
# This is the only directory where continuwuity will save its data,
# including media. Note: this was previously "/var/lib/matrix-conduit".
#
# YOU NEED TO EDIT THIS.
# YOU NEED TO EDIT THIS, UNLESS you are running continuwuity as a
# `systemd` service. The service file sets it to `/var/lib/conduwuit`
# using an environment variable and also grants write access.
#
# example: "/var/lib/continuwuity"
# example: "/var/lib/conduwuit"
#
#database_path =
@@ -589,13 +591,19 @@
#
#default_room_version = 11
# This item is undocumented. Please contribute documentation for it.
# Enable OpenTelemetry OTLP tracing export. This replaces the deprecated
# Jaeger exporter. Traces will be sent via OTLP to a collector (such as
# Jaeger) that supports the OpenTelemetry Protocol.
#
#allow_jaeger = false
# Configure your OTLP endpoint using the OTEL_EXPORTER_OTLP_ENDPOINT
# environment variable (defaults to http://localhost:4318).
#
#allow_otlp = false
# This item is undocumented. Please contribute documentation for it.
# Filter for OTLP tracing spans. This controls which spans are exported
# to the OTLP collector.
#
#jaeger_filter = "info"
#otlp_filter = "info"
# If the 'perf_measurements' compile-time feature is enabled, enables
# collecting folded stack trace profile of tracing spans using
@@ -949,6 +957,21 @@
#
#rocksdb_bottommost_compression = true
# Compression algorithm for RocksDB's Write-Ahead-Log (WAL).
#
# At present, only ZSTD compression is supported by RocksDB for WAL
# compression. Enabling this can reduce WAL size at the expense of some
# CPU usage during writes.
#
# The options are:
# - "none" = No compression
# - "zstd" = ZSTD compression
#
# For more information on WAL compression, see:
# https://github.com/facebook/rocksdb/wiki/WAL-Compression
#
#rocksdb_wal_compression = "zstd"
# Database recovery mode (for RocksDB WAL corruption).
#
# Use this option when the server reports corruption and refuses to start.
@@ -1489,6 +1512,19 @@
#
#block_non_admin_invites = false
# Enable or disable making requests to MSC4284 Policy Servers.
# It is recommended you keep this enabled unless you experience frequent
# connectivity issues, such as in a restricted networking environment.
#
#enable_msc4284_policy_servers = true
# Enable running locally generated events through configured MSC4284
# policy servers. You may wish to disable this if your server is
# single-user for a slight speed benefit in some rooms, but otherwise
# should leave it enabled.
#
#policy_server_check_own_events = true
# Allow admins to enter commands in rooms other than "#admins" (admin
# room) by prefixing your message with "\!admin" or "\\!admin" followed up
# a normal continuwuity admin command. The reply will be publicly visible
@@ -1696,6 +1732,10 @@
#
#config_reload_signal = true
# This item is undocumented. Please contribute documentation for it.
#
#ldap = false
[global.tls]
# Path to a valid TLS certificate file.
@@ -1774,3 +1814,91 @@
# is 33.55MB. Setting it to 0 disables blurhashing.
#
#blurhash_max_raw_size = 33554432
[global.ldap]
# Whether to enable LDAP login.
#
# example: "true"
#
#enable = false
# Whether to force LDAP authentication or authorize classical password
# login.
#
# example: "true"
#
#ldap_only = false
# URI of the LDAP server.
#
# example: "ldap://ldap.example.com:389"
#
#uri = ""
# Root of the searches.
#
# example: "ou=users,dc=example,dc=org"
#
#base_dn = ""
# Bind DN if anonymous search is not enabled.
#
# You can use the variable `{username}` that will be replaced by the
# entered username. In such case, the password used to bind will be the
# one provided for the login and not the one given by
# `bind_password_file`. Beware: automatically granting admin rights will
# not work if you use this direct bind instead of a LDAP search.
#
# example: "cn=ldap-reader,dc=example,dc=org" or
# "cn={username},ou=users,dc=example,dc=org"
#
#bind_dn = ""
# Path to a file on the system that contains the password for the
# `bind_dn`.
#
# The server must be able to access the file, and it must not be empty.
#
#bind_password_file = ""
# Search filter to limit user searches.
#
# You can use the variable `{username}` that will be replaced by the
# entered username for more complex filters.
#
# example: "(&(objectClass=person)(memberOf=matrix))"
#
#filter = "(objectClass=*)"
# Attribute to use to uniquely identify the user.
#
# example: "uid" or "cn"
#
#uid_attribute = "uid"
# Attribute containing the display name of the user.
#
# example: "givenName" or "sn"
#
#name_attribute = "givenName"
# Root of the searches for admin users.
#
# Defaults to `base_dn` if empty.
#
# example: "ou=admins,dc=example,dc=org"
#
#admin_base_dn = ""
# The LDAP search filter to find administrative users for continuwuity.
#
# If left blank, administrative state must be configured manually for each
# user.
#
# You can use the variable `{username}` that will be replaced by the
# entered username for more complex filters.
#
# example: "(objectClass=conduwuitAdmin)" or "(uid={username})"
#
#admin_filter = ""

44
debian/postinst vendored
View File

@@ -1,44 +0,0 @@
#!/bin/sh
set -e
# TODO: implement debconf support that is maintainable without duplicating the config
#. /usr/share/debconf/confmodule
CONDUWUIT_DATABASE_PATH=/var/lib/conduwuit
CONDUWUIT_CONFIG_PATH=/etc/conduwuit
case "$1" in
configure)
# Create the `conduwuit` user if it does not exist yet.
if ! getent passwd conduwuit > /dev/null ; then
echo 'Adding system user for the conduwuit Matrix homeserver' 1>&2
adduser --system --group --quiet \
--home "$CONDUWUIT_DATABASE_PATH" \
--disabled-login \
--shell "/usr/sbin/nologin" \
conduwuit
fi
# Create the database path if it does not exist yet and fix up ownership
# and permissions for the config.
mkdir -v -p "$CONDUWUIT_DATABASE_PATH"
# symlink the previous location for compatibility if it does not exist yet.
if ! test -L "/var/lib/matrix-conduit" ; then
ln -s -v "$CONDUWUIT_DATABASE_PATH" "/var/lib/matrix-conduit"
fi
chown -v conduwuit:conduwuit -R "$CONDUWUIT_DATABASE_PATH"
chown -v conduwuit:conduwuit -R "$CONDUWUIT_CONFIG_PATH"
chmod -v 740 "$CONDUWUIT_DATABASE_PATH"
echo ''
echo 'Make sure you edit the example config at /etc/conduwuit/conduwuit.toml before starting!'
echo 'To start the server, run: systemctl start conduwuit.service'
echo ''
;;
esac
#DEBHELPER#

View File

@@ -48,11 +48,13 @@ EOF
# Developer tool versions
# renovate: datasource=github-releases depName=cargo-bins/cargo-binstall
ENV BINSTALL_VERSION=1.13.0
ENV BINSTALL_VERSION=1.15.10
# renovate: datasource=github-releases depName=psastras/sbom-rs
ENV CARGO_SBOM_VERSION=0.9.1
# renovate: datasource=crate depName=lddtree
ENV LDDTREE_VERSION=0.3.7
# renovate: datasource=crate depName=timelord-cli
ENV TIMELORD_VERSION=3.0.1
# Install unpackaged tools
RUN <<EOF
@@ -60,6 +62,7 @@ RUN <<EOF
curl --retry 5 -L --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/cargo-bins/cargo-binstall/main/install-from-binstall-release.sh | bash
cargo binstall --no-confirm cargo-sbom --version $CARGO_SBOM_VERSION
cargo binstall --no-confirm lddtree --version $LDDTREE_VERSION
cargo binstall --no-confirm timelord-cli --version $TIMELORD_VERSION
EOF
# Set up xx (cross-compilation scripts)
@@ -78,17 +81,20 @@ RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
WORKDIR /app
COPY ./rust-toolchain.toml .
RUN rustc --version \
&& rustup target add $(xx-cargo --print-target-triple)
&& xx-cargo --setup-target-triple
# Build binary
# We disable incremental compilation to save disk space, as it only produces a minimal speedup for this case.
RUN echo "CARGO_INCREMENTAL=0" >> /etc/environment
# Configure incremental compilation based on build context
ARG CARGO_INCREMENTAL=0
RUN echo "CARGO_INCREMENTAL=${CARGO_INCREMENTAL}" >> /etc/environment
# Configure pkg-config
RUN <<EOF
set -o xtrace
echo "PKG_CONFIG_LIBDIR=/usr/lib/$(xx-info)/pkgconfig" >> /etc/environment
echo "PKG_CONFIG=/usr/bin/$(xx-info)-pkg-config" >> /etc/environment
if command -v "$(xx-info)-pkg-config" >/dev/null 2>/dev/null; then
echo "PKG_CONFIG_LIBDIR=/usr/lib/$(xx-info)/pkgconfig" >> /etc/environment
echo "PKG_CONFIG=/usr/bin/$(xx-info)-pkg-config" >> /etc/environment
fi
echo "PKG_CONFIG_ALLOW_CROSS=true" >> /etc/environment
EOF
@@ -109,16 +115,17 @@ RUN <<EOF
EOF
# Apply CPU-specific optimizations if TARGET_CPU is provided
ARG TARGET_CPU=
ARG TARGET_CPU
RUN <<EOF
set -o allexport
set -o xtrace
. /etc/environment
if [ -n "${TARGET_CPU}" ]; then
echo "CFLAGS='${CFLAGS} -march=${TARGET_CPU}'" >> /etc/environment
echo "CXXFLAGS='${CXXFLAGS} -march=${TARGET_CPU}'" >> /etc/environment
echo "RUSTFLAGS='${RUSTFLAGS} -C target-cpu=${TARGET_CPU}'" >> /etc/environment
fi
set -o allexport
set -o xtrace
. /etc/environment
if [ -n "${TARGET_CPU}" ]; then
echo "CFLAGS='${CFLAGS} -march=${TARGET_CPU}'" >> /etc/environment
echo "CXXFLAGS='${CXXFLAGS} -march=${TARGET_CPU}'" >> /etc/environment
echo "RUSTFLAGS='${RUSTFLAGS} -C target-cpu=${TARGET_CPU}'" >> /etc/environment
fi
EOF
# Prepare output directories
@@ -130,18 +137,23 @@ FROM toolchain AS builder
# Get source
COPY . .
# Restore timestamps from timelord cache if available
RUN --mount=type=cache,target=/timelord/ \
echo "Restoring timestamps from timelord cache"; \
timelord sync --source-dir /app --cache-dir /timelord;
ARG TARGETPLATFORM
# Verify environment configuration
RUN xx-cargo --print-target-triple
# Conduwuit version info
ARG GIT_COMMIT_HASH=
ARG GIT_COMMIT_HASH_SHORT=
ARG GIT_REMOTE_URL=
ARG GIT_REMOTE_COMMIT_URL=
ARG CONDUWUIT_VERSION_EXTRA=
ARG CONTINUWUITY_VERSION_EXTRA=
ARG GIT_COMMIT_HASH
ARG GIT_COMMIT_HASH_SHORT
ARG GIT_REMOTE_URL
ARG GIT_REMOTE_COMMIT_URL
ARG CONDUWUIT_VERSION_EXTRA
ARG CONTINUWUITY_VERSION_EXTRA
ENV GIT_COMMIT_HASH=$GIT_COMMIT_HASH
ENV GIT_COMMIT_HASH_SHORT=$GIT_COMMIT_HASH_SHORT
ENV GIT_REMOTE_URL=$GIT_REMOTE_URL
@@ -154,7 +166,7 @@ ARG RUST_PROFILE=release
# Build the binary
RUN --mount=type=cache,target=/usr/local/cargo/registry \
--mount=type=cache,target=/usr/local/cargo/git/db \
--mount=type=cache,target=/app/target,id=cargo-target-${TARGET_CPU}-${TARGETPLATFORM}-${RUST_PROFILE} \
--mount=type=cache,target=/app/target,id=continuwuity-cargo-target-${TARGET_CPU}-${TARGETPLATFORM}-${RUST_PROFILE} \
bash <<'EOF'
set -o allexport
set -o xtrace
@@ -169,8 +181,8 @@ RUN --mount=type=cache,target=/usr/local/cargo/registry \
jq -r ".packages[] | select(.name == \"$PACKAGE\") | .targets[] | select( .kind | map(. == \"bin\") | any ) | .name"))
for BINARY in "${BINARIES[@]}"; do
echo $BINARY
xx-verify $TARGET_DIR/$(xx-cargo --print-target-triple)/release/$BINARY
cp $TARGET_DIR/$(xx-cargo --print-target-triple)/release/$BINARY /out/sbin/$BINARY
xx-verify $TARGET_DIR/$(xx-cargo --print-target-triple)/${RUST_PROFILE}/$BINARY
cp $TARGET_DIR/$(xx-cargo --print-target-triple)/${RUST_PROFILE}/$BINARY /out/sbin/$BINARY
done
EOF
@@ -196,32 +208,57 @@ RUN --mount=type=cache,target=/usr/local/cargo/registry \
EOF
# Extract dynamically linked dependencies
RUN <<EOF
RUN <<'DEPS_EOF'
set -o xtrace
mkdir /out/libs
mkdir /out/libs-root
mkdir /out/libs /out/libs-root
# Process each binary
for BINARY in /out/sbin/*; do
lddtree "$BINARY" | awk '{print $(NF-0) " " $1}' | sort -u -k 1,1 | awk '{print "install", "-D", $1, (($2 ~ /^\//) ? "/out/libs-root" $2 : "/out/libs/" $2)}' | xargs -I {} sh -c {}
if lddtree_output=$(lddtree "$BINARY" 2>/dev/null) && [ -n "$lddtree_output" ]; then
echo "$lddtree_output" | awk '{print $(NF-0) " " $1}' | sort -u -k 1,1 | \
awk '{dest = ($2 ~ /^\//) ? "/out/libs-root" $2 : "/out/libs/" $2; print "install -D " $1 " " dest}' | \
while read cmd; do eval "$cmd"; done
fi
done
EOF
# Show what will be copied to runtime
echo "=== Libraries being copied to runtime image:"
find /out/libs* -type f 2>/dev/null | sort || echo "No libraries found"
DEPS_EOF
FROM ubuntu:latest AS prepper
# Create layer structure
RUN mkdir -p /layer1/etc/ssl/certs \
/layer2/usr/lib \
/layer3/sbin /layer3/sbom
# Copy SSL certs and root-path libraries to layer1 (ultra-stable)
COPY --from=base /etc/ssl/certs /layer1/etc/ssl/certs
COPY --from=builder /out/libs-root/ /layer1/
# Copy application libraries to layer2 (semi-stable)
COPY --from=builder /out/libs/ /layer2/usr/lib/
# Copy binaries and SBOM to layer3 (volatile)
COPY --from=builder /out/sbin/ /layer3/sbin/
COPY --from=builder /out/sbom/ /layer3/sbom/
# Fix permissions after copying
RUN chmod -R 755 /layer1 /layer2 /layer3
FROM scratch
WORKDIR /
# Copy root certs for tls into image
# You can also mount the certs from the host
# --volume /etc/ssl/certs:/etc/ssl/certs:ro
COPY --from=base /etc/ssl/certs /etc/ssl/certs
# Copy ultra-stable layer (SSL certs, system libraries)
COPY --from=prepper /layer1/ /
# Copy our build
COPY --from=builder /out/sbin/ /sbin/
# Copy SBOM
COPY --from=builder /out/sbom/ /sbom/
# Copy semi-stable layer (application libraries)
COPY --from=prepper /layer2/ /
# Copy dynamic libraries to root
COPY --from=builder /out/libs-root/ /
COPY --from=builder /out/libs/ /usr/lib/
# Copy volatile layer (binaries, SBOM)
COPY --from=prepper /layer3/ /
# Inform linker where to find libraries
ENV LD_LIBRARY_PATH=/usr/lib

200
docker/musl.Dockerfile Normal file
View File

@@ -0,0 +1,200 @@
# Why does this exist?
# Debian doesn't provide prebuilt musl packages
# rocksdb requires a prebuilt liburing, and linking fails if a gnu one is provided
ARG RUST_VERSION=1
ARG ALPINE_VERSION=3.22
FROM --platform=$BUILDPLATFORM docker.io/tonistiigi/xx AS xx
FROM --platform=$BUILDPLATFORM rust:${RUST_VERSION}-alpine${ALPINE_VERSION} AS base
FROM --platform=$BUILDPLATFORM rust:${RUST_VERSION}-alpine${ALPINE_VERSION} AS toolchain
# Install repo tools and dependencies
RUN --mount=type=cache,target=/etc/apk/cache apk add \
build-base pkgconfig make jq bash \
curl git file \
llvm-dev clang clang-static lld
# Developer tool versions
# renovate: datasource=github-releases depName=cargo-bins/cargo-binstall
ENV BINSTALL_VERSION=1.15.10
# renovate: datasource=github-releases depName=psastras/sbom-rs
ENV CARGO_SBOM_VERSION=0.9.1
# renovate: datasource=crate depName=lddtree
ENV LDDTREE_VERSION=0.3.7
# Install unpackaged tools
RUN <<EOF
set -o xtrace
curl --retry 5 -L --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/cargo-bins/cargo-binstall/main/install-from-binstall-release.sh | bash
cargo binstall --no-confirm cargo-sbom --version $CARGO_SBOM_VERSION
cargo binstall --no-confirm lddtree --version $LDDTREE_VERSION
EOF
# Set up xx (cross-compilation scripts)
COPY --from=xx / /
ARG TARGETPLATFORM
# Install libraries linked by the binary
RUN --mount=type=cache,target=/etc/apk/cache xx-apk add musl-dev gcc g++ liburing-dev
# Set up Rust toolchain
WORKDIR /app
COPY ./rust-toolchain.toml .
RUN rustc --version \
&& xx-cargo --setup-target-triple
# Build binary
# We disable incremental compilation to save disk space, as it only produces a minimal speedup for this case.
RUN echo "CARGO_INCREMENTAL=0" >> /etc/environment
# Configure pkg-config
RUN <<EOF
set -o xtrace
if command -v "$(xx-info)-pkg-config" >/dev/null 2>/dev/null; then
echo "PKG_CONFIG_LIBDIR=/usr/lib/$(xx-info)/pkgconfig" >> /etc/environment
echo "PKG_CONFIG=/usr/bin/$(xx-info)-pkg-config" >> /etc/environment
fi
echo "PKG_CONFIG_ALLOW_CROSS=true" >> /etc/environment
EOF
# Configure cc to use clang version
RUN <<EOF
set -o xtrace
echo "CC=clang" >> /etc/environment
echo "CXX=clang++" >> /etc/environment
EOF
# Cross-language LTO
RUN <<EOF
set -o xtrace
echo "CFLAGS=-flto" >> /etc/environment
echo "CXXFLAGS=-flto" >> /etc/environment
# Linker is set to target-compatible clang by xx
echo "RUSTFLAGS='-Clinker-plugin-lto -Clink-arg=-fuse-ld=lld'" >> /etc/environment
EOF
# Apply CPU-specific optimizations if TARGET_CPU is provided
ARG TARGET_CPU
RUN <<EOF
set -o allexport
set -o xtrace
. /etc/environment
if [ -n "${TARGET_CPU}" ]; then
echo "CFLAGS='${CFLAGS} -march=${TARGET_CPU}'" >> /etc/environment
echo "CXXFLAGS='${CXXFLAGS} -march=${TARGET_CPU}'" >> /etc/environment
echo "RUSTFLAGS='${RUSTFLAGS} -C target-cpu=${TARGET_CPU}'" >> /etc/environment
fi
EOF
# Prepare output directories
RUN mkdir /out
FROM toolchain AS builder
# Get source
COPY . .
ARG TARGETPLATFORM
# Verify environment configuration
RUN xx-cargo --print-target-triple
# Conduwuit version info
ARG GIT_COMMIT_HASH
ARG GIT_COMMIT_HASH_SHORT
ARG GIT_REMOTE_URL
ARG GIT_REMOTE_COMMIT_URL
ARG CONDUWUIT_VERSION_EXTRA
ARG CONTINUWUITY_VERSION_EXTRA
ENV GIT_COMMIT_HASH=$GIT_COMMIT_HASH
ENV GIT_COMMIT_HASH_SHORT=$GIT_COMMIT_HASH_SHORT
ENV GIT_REMOTE_URL=$GIT_REMOTE_URL
ENV GIT_REMOTE_COMMIT_URL=$GIT_REMOTE_COMMIT_URL
ENV CONDUWUIT_VERSION_EXTRA=$CONDUWUIT_VERSION_EXTRA
ENV CONTINUWUITY_VERSION_EXTRA=$CONTINUWUITY_VERSION_EXTRA
ARG RUST_PROFILE=release
# Build the binary
RUN --mount=type=cache,target=/usr/local/cargo/registry \
--mount=type=cache,target=/usr/local/cargo/git/db \
--mount=type=cache,target=/app/target,id=continuwuity-cargo-target-${TARGET_CPU}-${TARGETPLATFORM}-musl-${RUST_PROFILE} \
bash <<'EOF'
set -o allexport
set -o xtrace
. /etc/environment
TARGET_DIR=($(cargo metadata --no-deps --format-version 1 | \
jq -r ".target_directory"))
mkdir /out/sbin
PACKAGE=conduwuit
xx-cargo build --locked --profile ${RUST_PROFILE} \
-p $PACKAGE --no-default-features --features bindgen-static,release_max_log_level,standard;
BINARIES=($(cargo metadata --no-deps --format-version 1 | \
jq -r ".packages[] | select(.name == \"$PACKAGE\") | .targets[] | select( .kind | map(. == \"bin\") | any ) | .name"))
for BINARY in "${BINARIES[@]}"; do
echo $BINARY
xx-verify $TARGET_DIR/$(xx-cargo --print-target-triple)/release/$BINARY
cp $TARGET_DIR/$(xx-cargo --print-target-triple)/release/$BINARY /out/sbin/$BINARY
done
EOF
# Generate Software Bill of Materials (SBOM)
RUN --mount=type=cache,target=/usr/local/cargo/registry \
--mount=type=cache,target=/usr/local/cargo/git/db \
bash <<'EOF'
set -o xtrace
mkdir /out/sbom
typeset -A PACKAGES
for BINARY in /out/sbin/*; do
BINARY_BASE=$(basename ${BINARY})
package=$(cargo metadata --no-deps --format-version 1 | jq -r ".packages[] | select(.targets[] | select( .kind | map(. == \"bin\") | any ) | .name == \"$BINARY_BASE\") | .name")
if [ -z "$package" ]; then
continue
fi
PACKAGES[$package]=1
done
for PACKAGE in $(echo ${!PACKAGES[@]}); do
echo $PACKAGE
cargo sbom --cargo-package $PACKAGE > /out/sbom/$PACKAGE.spdx.json
done
EOF
# Extract dynamically linked dependencies
RUN <<EOF
set -o xtrace
mkdir /out/libs
mkdir /out/libs-root
for BINARY in /out/sbin/*; do
lddtree "$BINARY" | awk '{print $(NF-0) " " $1}' | sort -u -k 1,1 | awk '{print "install", "-D", $1, (($2 ~ /^\//) ? "/out/libs-root" $2 : "/out/libs/" $2)}' | xargs -I {} sh -c {}
done
EOF
FROM scratch
WORKDIR /
# Copy root certs for tls into image
# You can also mount the certs from the host
# --volume /etc/ssl/certs:/etc/ssl/certs:ro
COPY --from=base /etc/ssl/certs /etc/ssl/certs
# Copy our build
COPY --from=builder /out/sbin/ /sbin/
# Copy SBOM
COPY --from=builder /out/sbom/ /sbom/
# Copy dynamic libraries to root
COPY --from=builder /out/libs-root/ /
COPY --from=builder /out/libs/ /usr/lib/
# Inform linker where to find libraries
ENV LD_LIBRARY_PATH=/usr/lib
# Continuwuity default port
EXPOSE 8008
CMD ["/sbin/conduwuit"]

View File

@@ -10,6 +10,7 @@ # Summary
- [Kubernetes](deploying/kubernetes.md)
- [Arch Linux](deploying/arch-linux.md)
- [Debian](deploying/debian.md)
- [Fedora](deploying/fedora.md)
- [FreeBSD](deploying/freebsd.md)
- [TURN](turn.md)
- [Appservices](appservices.md)

View File

@@ -21,6 +21,7 @@ # Command-Line Help for `admin`
* [`admin users list-joined-rooms`↴](#admin-users-list-joined-rooms)
* [`admin users force-join-room`↴](#admin-users-force-join-room)
* [`admin users force-leave-room`↴](#admin-users-force-leave-room)
* [`admin users force-leave-remote-room`↴](#admin-users-force-leave-remote-room)
* [`admin users force-demote`↴](#admin-users-force-demote)
* [`admin users make-user-admin`↴](#admin-users-make-user-admin)
* [`admin users put-room-tag`↴](#admin-users-put-room-tag)
@@ -295,6 +296,7 @@ ###### **Subcommands:**
* `list-joined-rooms` — - Lists all the rooms (local and remote) that the specified user is joined in
* `force-join-room` — - Manually join a local user to a room
* `force-leave-room` — - Manually leave a local user from a room
* `force-leave-remote-room` — - Manually leave a remote room for a local user
* `force-demote` — - Forces the specified user to drop their power levels to the room default, if their permissions allow and the auth check permits
* `make-user-admin` — - Grant server-admin privileges to a user
* `put-room-tag` — - Puts a room tag for the specified user and room ID
@@ -449,6 +451,19 @@ ###### **Arguments:**
## `admin users force-leave-remote-room`
- Manually leave a remote room for a local user
**Usage:** `admin users force-leave-remote-room <USER_ID> <ROOM_ID>`
###### **Arguments:**
* `<USER_ID>`
* `<ROOM_ID>`
## `admin users force-demote`
- Forces the specified user to drop their power levels to the room default, if their permissions allow and the auth check permits
@@ -1063,7 +1078,10 @@ ###### **Subcommands:**
* `delete` — - Deletes a single media file from our database and on the filesystem via a single MXC URL or event ID (not redacted)
* `delete-list` — - Deletes a codeblock list of MXC URLs from our database and on the filesystem. This will always ignore errors
* `delete-past-remote-media` - Deletes all remote (and optionally local) media created before or after [duration] time using filesystem metadata first created at date, or fallback to last modified date. This will always ignore errors by default
* `delete-past-remote-media` — Deletes all remote (and optionally local) media created before/after
[duration] ago, using filesystem metadata first created at date, or
fallback to last modified date. This will always ignore errors by
default.
* `delete-all-from-user` — - Deletes all the local media from a local user on our server. This will always ignore errors by default
* `delete-all-from-server` — - Deletes all remote media from the specified remote server. This will always ignore errors by default
* `get-file-info`
@@ -1095,13 +1113,25 @@ ## `admin media delete-list`
## `admin media delete-past-remote-media`
- Deletes all remote (and optionally local) media created before or after [duration] time using filesystem metadata first created at date, or fallback to last modified date. This will always ignore errors by default
Deletes all remote (and optionally local) media created before/after
[duration] ago, using filesystem metadata first created at date, or
fallback to last modified date. This will always ignore errors by
default.
* Examples:
* Delete all remote media older than a year:
`!admin media delete-past-remote-media -b 1y`
* Delete all remote and local media from 3 days ago, up until now:
`!admin media delete-past-remote-media -a 3d --yes-i-want-to-delete-local-media`
**Usage:** `admin media delete-past-remote-media [OPTIONS] <DURATION>`
###### **Arguments:**
* `<DURATION>` — - The relative time (e.g. 30s, 5m, 7d) within which to search
* `<DURATION>` — - The relative time (e.g. 30s, 5m, 7d) from now within which to search
###### **Options:**

View File

@@ -9,24 +9,11 @@ ## Example configuration
</details>
## Debian systemd unit file
## systemd unit file
<details>
<summary>Debian systemd unit file</summary>
<summary>systemd unit file</summary>
```
{{#include ../../debian/conduwuit.service}}
{{#include ../../pkg/conduwuit.service}}
```
</details>
## Arch Linux systemd unit file
<details>
<summary>Arch Linux systemd unit file</summary>
```
{{#include ../../arch/conduwuit.service}}
```
</details>

View File

@@ -1 +1 @@
{{#include ../../debian/README.md}}
{{#include ../../pkg/debian/README.md}}

View File

@@ -12,6 +12,15 @@ services:
#- ./continuwuity.toml:/etc/continuwuity.toml
networks:
- proxy
labels:
- "traefik.enable=true"
- "traefik.http.routers.continuwuity.rule=(Host(`matrix.example.com`) || (Host(`example.com`) && PathPrefix(`/.well-known/matrix`)))"
- "traefik.http.routers.continuwuity.entrypoints=websecure" # your HTTPS entry point
- "traefik.http.routers.continuwuity.tls=true"
- "traefik.http.routers.continuwuity.service=continuwuity"
- "traefik.http.services.continuwuity.loadbalancer.server.port=6167"
# possibly, depending on your config:
# - "traefik.http.routers.continuwuity.tls.certresolver=letsencrypt"
environment:
CONTINUWUITY_SERVER_NAME: your.server.name.example # EDIT THIS
CONTINUWUITY_DATABASE_PATH: /var/lib/continuwuity

View File

@@ -12,6 +12,14 @@ services:
#- ./continuwuity.toml:/etc/continuwuity.toml
networks:
- proxy
labels:
- "traefik.enable=true"
- "traefik.http.routers.continuwuity.rule=(Host(`matrix.example.com`) || (Host(`example.com`) && PathPrefix(`/.well-known/matrix`)))"
- "traefik.http.routers.continuwuity.entrypoints=websecure"
- "traefik.http.routers.continuwuity.tls.certresolver=letsencrypt"
- "traefik.http.services.continuwuity.loadbalancer.server.port=6167"
# Uncomment and adjust the following if you want to use middleware
# - "traefik.http.routers.continuwuity.middlewares=secureHeaders@file"
environment:
CONTINUWUITY_SERVER_NAME: your.server.name.example # EDIT THIS
CONTINUWUITY_TRUSTED_SERVERS: '["matrix.org"]'

201
docs/deploying/fedora.md Normal file
View File

@@ -0,0 +1,201 @@
# RPM Installation Guide
Continuwuity is available as RPM packages for Fedora, RHEL, and compatible distributions.
The RPM packaging files are maintained in the `fedora/` directory:
- `continuwuity.spec.rpkg` - RPM spec file using rpkg macros for building from git
- `continuwuity.service` - Systemd service file for the server
- `RPM-GPG-KEY-continuwuity.asc` - GPG public key for verifying signed packages
RPM packages built by CI are signed with our GPG key (Ed25519, ID: `5E0FF73F411AAFCA`).
```bash
# Import the signing key
sudo rpm --import https://forgejo.ellis.link/continuwuation/continuwuity/raw/branch/main/fedora/RPM-GPG-KEY-continuwuity.asc
# Verify a downloaded package
rpm --checksig continuwuity-*.rpm
```
## Installation methods
**Stable releases** (recommended)
```bash
# Add the repository and install
sudo dnf config-manager addrepo --from-repofile=https://forgejo.ellis.link/api/packages/continuwuation/rpm/stable/continuwuation.repo
sudo dnf install continuwuity
```
**Development builds** from main branch
```bash
# Add the dev repository and install
sudo dnf config-manager addrepo --from-repofile=https://forgejo.ellis.link/api/packages/continuwuation/rpm/dev/continuwuation.repo
sudo dnf install continuwuity
```
**Feature branch builds** (example: `tom/new-feature`)
```bash
# Branch names are sanitized (slashes become hyphens, lowercase only)
sudo dnf config-manager addrepo --from-repofile=https://forgejo.ellis.link/api/packages/continuwuation/rpm/tom-new-feature/continuwuation.repo
sudo dnf install continuwuity
```
**Direct installation** without adding repository
```bash
# Latest stable release
sudo dnf install https://forgejo.ellis.link/api/packages/continuwuation/rpm/stable/continuwuity
# Latest development build
sudo dnf install https://forgejo.ellis.link/api/packages/continuwuation/rpm/dev/continuwuity
# Specific feature branch
sudo dnf install https://forgejo.ellis.link/api/packages/continuwuation/rpm/branch-name/continuwuity
```
**Manual repository configuration** (alternative method)
```bash
cat << 'EOF' | sudo tee /etc/yum.repos.d/continuwuity.repo
[continuwuity]
name=Continuwuity - Matrix homeserver
baseurl=https://forgejo.ellis.link/api/packages/continuwuation/rpm/stable
enabled=1
gpgcheck=1
gpgkey=https://forgejo.ellis.link/continuwuation/continuwuity/raw/branch/main/fedora/RPM-GPG-KEY-continuwuity.asc
EOF
sudo dnf install continuwuity
```
## Package management
**Automatic updates** with DNF Automatic
```bash
# Install and configure
sudo dnf install dnf-automatic
sudo nano /etc/dnf/automatic.conf # Set: apply_updates = yes
sudo systemctl enable --now dnf-automatic.timer
```
**Manual updates**
```bash
# Check for updates
sudo dnf check-update continuwuity
# Update to latest version
sudo dnf update continuwuity
```
**Switching channels** (stable/dev/feature branches)
```bash
# List enabled repositories
dnf repolist | grep continuwuation
# Disable current repository
sudo dnf config-manager --set-disabled continuwuation-stable # or -dev, or branch name
# Enable desired repository
sudo dnf config-manager --set-enabled continuwuation-dev # or -stable, or branch name
# Update to the new channel's version
sudo dnf update continuwuity
```
**Verifying installation**
```bash
# Check installed version
rpm -q continuwuity
# View package information
rpm -qi continuwuity
# List installed files
rpm -ql continuwuity
# Verify package integrity
rpm -V continuwuity
```
## Service management and removal
**Systemd service commands**
```bash
# Start the service
sudo systemctl start conduwuit
# Enable on boot
sudo systemctl enable conduwuit
# Check status
sudo systemctl status conduwuit
# View logs
sudo journalctl -u conduwuit -f
```
**Uninstallation**
```bash
# Stop and disable the service
sudo systemctl stop conduwuit
sudo systemctl disable conduwuit
# Remove the package
sudo dnf remove continuwuity
# Remove the repository (optional)
sudo rm /etc/yum.repos.d/continuwuation-*.repo
```
## Troubleshooting
**GPG key errors**: Temporarily disable GPG checking
```bash
sudo dnf --nogpgcheck install continuwuity
```
**Repository metadata issues**: Clear and rebuild cache
```bash
sudo dnf clean all
sudo dnf makecache
```
**Finding specific versions**
```bash
# List all available versions
dnf --showduplicates list continuwuity
# Install a specific version
sudo dnf install continuwuity-<version>
```
## Building locally
Build the RPM locally using rpkg:
```bash
# Install dependencies
sudo dnf install rpkg rpm-build cargo-rpm-macros systemd-rpm-macros
# Clone the repository
git clone https://forgejo.ellis.link/continuwuation/continuwuity.git
cd continuwuity
# Build SRPM
rpkg srpm
# Build RPM
rpmbuild --rebuild *.src.rpm
```

View File

@@ -44,7 +44,7 @@ ### Building with the Rust toolchain
- (On linux) `pkg-config` on the compiling machine to allow finding `liburing`
- A C++ compiler and (on linux) `libclang` for RocksDB
You can build Continuwuity using `cargo build --release --all-features`.
You can build Continuwuity using `cargo build --release`.
### Building with Nix

View File

@@ -241,7 +241,7 @@ ## Documentation
### Code Comments
- Reference related documentation or parts of the specification
- When a task has multiple ways of being acheved, explain your reasoning for your decision
- When a task has multiple ways of being achieved, explain your reasoning for your decision
- Update comments when code changes
```rs

View File

@@ -6,8 +6,8 @@
"message": "Welcome to Continuwuity! Important announcements about the project will appear here."
},
{
"id": 2,
"message": "🎉 Continuwuity v0.5.0-rc.6 is now available! This release includes improved knock-restricted room handling, automatic support contact configuration, and a new HTML landing page. Check [the release notes for full details](https://forgejo.ellis.link/continuwuation/continuwuity/releases/tag/v0.5.0-rc.6) and upgrade instructions."
"id": 3,
"message": "_taps microphone_ The Continuwuity 0.5.0-rc.7 release is now available, and it's better than ever! **177 commits**, **35 pull requests**, **11 contributors,** and a lot of new stuff!\n\nFor highlights, we've got:\n\n* 🕵️ Full Policy Server support to fight spam!\n* 🚀 Smarter room & space upgrades.\n* 🚫 User suspension tools for better moderation.\n* 🤖 reCaptcha support for safer open registration.\n* 🔍 Ability to disable read receipts & typing indicators.\n* ⚡ Sweeping performance improvements!\n\nGet the [full changelog and downloads on our Forgejo](https://forgejo.ellis.link/continuwuation/continuwuity/releases/tag/v0.5.0-rc.7) - and make sure you're in the [Announcements room](https://matrix.to/#/!releases:continuwuity.org/$hN9z6L2_dTAlPxFLAoXVfo_g8DyYXu4cpvWsSrWhmB0) to get stuff like this sooner."
}
]
}

View File

@@ -83,7 +83,7 @@ env DIRENV_DEVSHELL=all-features \
--workspace \
--locked \
--profile test \
--all-features \
--features full \
--no-deps \
--document-private-items \
--color always
@@ -96,7 +96,7 @@ script = """
direnv exec . \
cargo clippy \
--workspace \
--all-features \
--features full \
--locked \
--profile test \
--color=always \
@@ -114,7 +114,7 @@ env DIRENV_DEVSHELL=all-features \
--workspace \
--locked \
--profile test \
--all-features \
--features full \
--color=always \
-- \
-D warnings

513
flake.lock generated
View File

@@ -1,95 +1,28 @@
{
"nodes": {
"attic": {
"inputs": {
"crane": "crane",
"flake-compat": "flake-compat",
"flake-parts": "flake-parts",
"nix-github-actions": "nix-github-actions",
"nixpkgs": "nixpkgs",
"nixpkgs-stable": "nixpkgs-stable"
},
"advisory-db": {
"flake": false,
"locked": {
"lastModified": 1751403276,
"narHash": "sha256-V0EPQNsQko1a8OqIWc2lLviLnMpR1m08Ej00z5RVTfs=",
"owner": "zhaofengli",
"repo": "attic",
"rev": "896ad88fa57ad5dbcd267c0ac51f1b71ccfcb4dd",
"lastModified": 1761112158,
"narHash": "sha256-RIXu/7eyKpQHjsPuAUODO81I4ni8f+WYSb7K4mTG6+0=",
"owner": "rustsec",
"repo": "advisory-db",
"rev": "58f3aaec0e1776f4a900737be8cd7cb00972210d",
"type": "github"
},
"original": {
"owner": "zhaofengli",
"ref": "main",
"repo": "attic",
"type": "github"
}
},
"cachix": {
"inputs": {
"devenv": "devenv",
"flake-compat": "flake-compat_2",
"git-hooks": "git-hooks",
"nixpkgs": "nixpkgs_4"
},
"locked": {
"lastModified": 1748883665,
"narHash": "sha256-R0W7uAg+BLoHjMRMQ8+oiSbTq8nkGz5RDpQ+ZfxxP3A=",
"owner": "cachix",
"repo": "cachix",
"rev": "f707778d902af4d62d8dd92c269f8e70de09acbe",
"type": "github"
},
"original": {
"owner": "cachix",
"ref": "master",
"repo": "cachix",
"type": "github"
}
},
"cachix_2": {
"inputs": {
"devenv": [
"cachix",
"devenv"
],
"flake-compat": [
"cachix",
"devenv"
],
"git-hooks": [
"cachix",
"devenv"
],
"nixpkgs": "nixpkgs_2"
},
"locked": {
"lastModified": 1744206633,
"narHash": "sha256-pb5aYkE8FOoa4n123slgHiOf1UbNSnKe5pEZC+xXD5g=",
"owner": "cachix",
"repo": "cachix",
"rev": "8a60090640b96f9df95d1ab99e5763a586be1404",
"type": "github"
},
"original": {
"owner": "cachix",
"ref": "latest",
"repo": "cachix",
"owner": "rustsec",
"repo": "advisory-db",
"type": "github"
}
},
"crane": {
"inputs": {
"nixpkgs": [
"attic",
"nixpkgs"
]
},
"locked": {
"lastModified": 1722960479,
"narHash": "sha256-NhCkJJQhD5GUib8zN9JrmYGMwt4lCRp6ZVNzIiYCl0Y=",
"lastModified": 1760924934,
"narHash": "sha256-tuuqY5aU7cUkR71sO2TraVKK2boYrdW3gCSXUkF4i44=",
"owner": "ipetkov",
"repo": "crane",
"rev": "4c6c77920b8d44cd6660c1621dea6b3fc4b4c4f4",
"rev": "c6b4d5308293d0d04fcfeee92705017537cad02f",
"type": "github"
},
"original": {
@@ -98,53 +31,6 @@
"type": "github"
}
},
"crane_2": {
"locked": {
"lastModified": 1750266157,
"narHash": "sha256-tL42YoNg9y30u7zAqtoGDNdTyXTi8EALDeCB13FtbQA=",
"owner": "ipetkov",
"repo": "crane",
"rev": "e37c943371b73ed87faf33f7583860f81f1d5a48",
"type": "github"
},
"original": {
"owner": "ipetkov",
"ref": "master",
"repo": "crane",
"type": "github"
}
},
"devenv": {
"inputs": {
"cachix": "cachix_2",
"flake-compat": [
"cachix",
"flake-compat"
],
"git-hooks": [
"cachix",
"git-hooks"
],
"nix": "nix",
"nixpkgs": [
"cachix",
"nixpkgs"
]
},
"locked": {
"lastModified": 1748273445,
"narHash": "sha256-5V0dzpNgQM0CHDsMzh+ludYeu1S+Y+IMjbaskSSdFh0=",
"owner": "cachix",
"repo": "devenv",
"rev": "668a50d8b7bdb19a0131f53c9f6c25c9071e1ffb",
"type": "github"
},
"original": {
"owner": "cachix",
"repo": "devenv",
"type": "github"
}
},
"fenix": {
"inputs": {
"nixpkgs": [
@@ -153,53 +39,20 @@
"rust-analyzer-src": "rust-analyzer-src"
},
"locked": {
"lastModified": 1751525020,
"narHash": "sha256-oDO6lCYS5Bf4jUITChj9XV7k3TP38DE0Ckz5n5ORCME=",
"lastModified": 1761115517,
"narHash": "sha256-Fev/ag/c3Fp3JBwHfup3lpA5FlNXfkoshnQ7dssBgJ0=",
"owner": "nix-community",
"repo": "fenix",
"rev": "a1a5f92f47787e7df9f30e5e5ac13e679215aa1e",
"rev": "320433651636186ea32b387cff05d6bbfa30cea7",
"type": "github"
},
"original": {
"owner": "nix-community",
"ref": "main",
"repo": "fenix",
"type": "github"
}
},
"flake-compat": {
"flake": false,
"locked": {
"lastModified": 1696426674,
"narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=",
"owner": "edolstra",
"repo": "flake-compat",
"rev": "0f9255e01c2351cc7d116c072cb317785dd33b33",
"type": "github"
},
"original": {
"owner": "edolstra",
"repo": "flake-compat",
"type": "github"
}
},
"flake-compat_2": {
"flake": false,
"locked": {
"lastModified": 1747046372,
"narHash": "sha256-CIVLLkVgvHYbgI2UpXvIIBJ12HWgX+fjA8Xf8PUmqCY=",
"owner": "edolstra",
"repo": "flake-compat",
"rev": "9100a0f413b0c601e0533d1d94ffd501ce2e7885",
"type": "github"
},
"original": {
"owner": "edolstra",
"repo": "flake-compat",
"type": "github"
}
},
"flake-compat_3": {
"flake": false,
"locked": {
"lastModified": 1747046372,
@@ -218,17 +71,14 @@
},
"flake-parts": {
"inputs": {
"nixpkgs-lib": [
"attic",
"nixpkgs"
]
"nixpkgs-lib": "nixpkgs-lib"
},
"locked": {
"lastModified": 1722555600,
"narHash": "sha256-XOQkdLafnb/p9ij77byFQjDf5m5QYl9b2REiVClC+x4=",
"lastModified": 1760948891,
"narHash": "sha256-TmWcdiUUaWk8J4lpjzu4gCGxWY6/Ok7mOK4fIFfBuU4=",
"owner": "hercules-ci",
"repo": "flake-parts",
"rev": "8471fe90ad337a8074e957b69ca4d0089218391d",
"rev": "864599284fc7c0ba6357ed89ed5e2cd5040f0c04",
"type": "github"
},
"original": {
@@ -237,225 +87,13 @@
"type": "github"
}
},
"flake-parts_2": {
"inputs": {
"nixpkgs-lib": [
"cachix",
"devenv",
"nix",
"nixpkgs"
]
},
"locked": {
"lastModified": 1712014858,
"narHash": "sha256-sB4SWl2lX95bExY2gMFG5HIzvva5AVMJd4Igm+GpZNw=",
"owner": "hercules-ci",
"repo": "flake-parts",
"rev": "9126214d0a59633752a136528f5f3b9aa8565b7d",
"type": "github"
},
"original": {
"owner": "hercules-ci",
"repo": "flake-parts",
"type": "github"
}
},
"flake-utils": {
"inputs": {
"systems": "systems"
},
"locked": {
"lastModified": 1731533236,
"narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "11707dc2f618dd54ca8739b309ec4fc024de578b",
"type": "github"
},
"original": {
"owner": "numtide",
"ref": "main",
"repo": "flake-utils",
"type": "github"
}
},
"git-hooks": {
"inputs": {
"flake-compat": [
"cachix",
"flake-compat"
],
"gitignore": "gitignore",
"nixpkgs": [
"cachix",
"nixpkgs"
]
},
"locked": {
"lastModified": 1747372754,
"narHash": "sha256-2Y53NGIX2vxfie1rOW0Qb86vjRZ7ngizoo+bnXU9D9k=",
"owner": "cachix",
"repo": "git-hooks.nix",
"rev": "80479b6ec16fefd9c1db3ea13aeb038c60530f46",
"type": "github"
},
"original": {
"owner": "cachix",
"repo": "git-hooks.nix",
"type": "github"
}
},
"gitignore": {
"inputs": {
"nixpkgs": [
"cachix",
"git-hooks",
"nixpkgs"
]
},
"locked": {
"lastModified": 1709087332,
"narHash": "sha256-HG2cCnktfHsKV0s4XW83gU3F57gaTljL9KNSuG6bnQs=",
"owner": "hercules-ci",
"repo": "gitignore.nix",
"rev": "637db329424fd7e46cf4185293b9cc8c88c95394",
"type": "github"
},
"original": {
"owner": "hercules-ci",
"repo": "gitignore.nix",
"type": "github"
}
},
"libgit2": {
"flake": false,
"locked": {
"lastModified": 1697646580,
"narHash": "sha256-oX4Z3S9WtJlwvj0uH9HlYcWv+x1hqp8mhXl7HsLu2f0=",
"owner": "libgit2",
"repo": "libgit2",
"rev": "45fd9ed7ae1a9b74b957ef4f337bc3c8b3df01b5",
"type": "github"
},
"original": {
"owner": "libgit2",
"repo": "libgit2",
"type": "github"
}
},
"nix": {
"inputs": {
"flake-compat": [
"cachix",
"devenv"
],
"flake-parts": "flake-parts_2",
"libgit2": "libgit2",
"nixpkgs": "nixpkgs_3",
"nixpkgs-23-11": [
"cachix",
"devenv"
],
"nixpkgs-regression": [
"cachix",
"devenv"
],
"pre-commit-hooks": [
"cachix",
"devenv"
]
},
"locked": {
"lastModified": 1745930071,
"narHash": "sha256-bYyjarS3qSNqxfgc89IoVz8cAFDkF9yPE63EJr+h50s=",
"owner": "domenkozar",
"repo": "nix",
"rev": "b455edf3505f1bf0172b39a735caef94687d0d9c",
"type": "github"
},
"original": {
"owner": "domenkozar",
"ref": "devenv-2.24",
"repo": "nix",
"type": "github"
}
},
"nix-filter": {
"locked": {
"lastModified": 1731533336,
"narHash": "sha256-oRam5PS1vcrr5UPgALW0eo1m/5/pls27Z/pabHNy2Ms=",
"owner": "numtide",
"repo": "nix-filter",
"rev": "f7653272fd234696ae94229839a99b73c9ab7de0",
"type": "github"
},
"original": {
"owner": "numtide",
"ref": "main",
"repo": "nix-filter",
"type": "github"
}
},
"nix-github-actions": {
"inputs": {
"nixpkgs": [
"attic",
"nixpkgs"
]
},
"locked": {
"lastModified": 1729742964,
"narHash": "sha256-B4mzTcQ0FZHdpeWcpDYPERtyjJd/NIuaQ9+BV1h+MpA=",
"owner": "nix-community",
"repo": "nix-github-actions",
"rev": "e04df33f62cdcf93d73e9a04142464753a16db67",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "nix-github-actions",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1726042813,
"narHash": "sha256-LnNKCCxnwgF+575y0pxUdlGZBO/ru1CtGHIqQVfvjlA=",
"lastModified": 1760878510,
"narHash": "sha256-K5Osef2qexezUfs0alLvZ7nQFTGS9DL2oTVsIXsqLgs=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "159be5db480d1df880a0135ca0bfed84c2f88353",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixpkgs-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs-stable": {
"locked": {
"lastModified": 1724316499,
"narHash": "sha256-Qb9MhKBUTCfWg/wqqaxt89Xfi6qTD3XpTzQ9eXi3JmE=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "797f7dc49e0bc7fab4b57c021cdf68f595e47841",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-24.05",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs_2": {
"locked": {
"lastModified": 1733212471,
"narHash": "sha256-M1+uCoV5igihRfcUKrr1riygbe73/dzNnzPsmaLCmpo=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "55d15ad12a74eb7d4646254e13638ad0c4128776",
"rev": "5e2a59a5b1a82f89f2c7e598302a9cacebb72a67",
"type": "github"
},
"original": {
@@ -465,92 +103,40 @@
"type": "github"
}
},
"nixpkgs_3": {
"nixpkgs-lib": {
"locked": {
"lastModified": 1717432640,
"narHash": "sha256-+f9c4/ZX5MWDOuB1rKoWj+lBNm0z0rs4CK47HBLxy1o=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "88269ab3044128b7c2f4c7d68448b2fb50456870",
"lastModified": 1754788789,
"narHash": "sha256-x2rJ+Ovzq0sCMpgfgGaaqgBSwY+LST+WbZ6TytnT9Rk=",
"owner": "nix-community",
"repo": "nixpkgs.lib",
"rev": "a73b9c743612e4244d865a2fdee11865283c04e6",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "release-24.05",
"repo": "nixpkgs",
"owner": "nix-community",
"repo": "nixpkgs.lib",
"type": "github"
}
},
"nixpkgs_4": {
"locked": {
"lastModified": 1748190013,
"narHash": "sha256-R5HJFflOfsP5FBtk+zE8FpL8uqE7n62jqOsADvVshhE=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "62b852f6c6742134ade1abdd2a21685fd617a291",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs_5": {
"locked": {
"lastModified": 1751498133,
"narHash": "sha256-QWJ+NQbMU+NcU2xiyo7SNox1fAuwksGlQhpzBl76g1I=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "d55716bb59b91ae9d1ced4b1ccdea7a442ecbfdb",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixpkgs-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"rocksdb": {
"flake": false,
"locked": {
"lastModified": 1741308171,
"narHash": "sha256-YdBvdQ75UJg5ffwNjxizpviCVwVDJnBkM8ZtGIduMgY=",
"ref": "v9.11.1",
"rev": "3ce04794bcfbbb0d2e6f81ae35fc4acf688b6986",
"revCount": 13177,
"type": "git",
"url": "https://forgejo.ellis.link/continuwuation/rocksdb"
},
"original": {
"ref": "v9.11.1",
"type": "git",
"url": "https://forgejo.ellis.link/continuwuation/rocksdb"
}
},
"root": {
"inputs": {
"attic": "attic",
"cachix": "cachix",
"crane": "crane_2",
"advisory-db": "advisory-db",
"crane": "crane",
"fenix": "fenix",
"flake-compat": "flake-compat_3",
"flake-utils": "flake-utils",
"nix-filter": "nix-filter",
"nixpkgs": "nixpkgs_5",
"rocksdb": "rocksdb"
"flake-compat": "flake-compat",
"flake-parts": "flake-parts",
"nixpkgs": "nixpkgs",
"treefmt-nix": "treefmt-nix"
}
},
"rust-analyzer-src": {
"flake": false,
"locked": {
"lastModified": 1751433876,
"narHash": "sha256-IsdwOcvLLDDlkFNwhdD5BZy20okIQL01+UQ7Kxbqh8s=",
"lastModified": 1761077270,
"narHash": "sha256-O1uTuvI/rUlubJ8AXKyzh1WSWV3qCZX0huTFUvWLN4E=",
"owner": "rust-lang",
"repo": "rust-analyzer",
"rev": "11d45c881389dae90b0da5a94cde52c79d0fc7ef",
"rev": "39990a923c8bca38f5bd29dc4c96e20ee7808d5d",
"type": "github"
},
"original": {
@@ -560,18 +146,23 @@
"type": "github"
}
},
"systems": {
"treefmt-nix": {
"inputs": {
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"lastModified": 1760945191,
"narHash": "sha256-ZRVs8UqikBa4Ki3X4KCnMBtBW0ux1DaT35tgsnB1jM4=",
"owner": "numtide",
"repo": "treefmt-nix",
"rev": "f56b1934f5f8fcab8deb5d38d42fd692632b47c2",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"owner": "numtide",
"repo": "treefmt-nix",
"type": "github"
}
}

365
flake.nix
View File

@@ -1,345 +1,46 @@
{
description = "A nix flake for the continuwuity project";
inputs = {
attic.url = "github:zhaofengli/attic?ref=main";
cachix.url = "github:cachix/cachix?ref=master";
crane = {
url = "github:ipetkov/crane?ref=master";
};
# basics
flake-parts.url = "github:hercules-ci/flake-parts";
nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable";
# for rust via nix
crane.url = "github:ipetkov/crane";
fenix = {
url = "github:nix-community/fenix?ref=main";
url = "github:nix-community/fenix";
inputs.nixpkgs.follows = "nixpkgs";
};
# for vuln checks
advisory-db = {
url = "github:rustsec/advisory-db";
flake = false;
};
treefmt-nix = {
url = "github:numtide/treefmt-nix";
inputs.nixpkgs.follows = "nixpkgs";
};
# for default.nix
flake-compat = {
url = "github:edolstra/flake-compat?ref=master";
flake = false;
};
flake-utils.url = "github:numtide/flake-utils?ref=main";
nix-filter.url = "github:numtide/nix-filter?ref=main";
nixpkgs.url = "github:NixOS/nixpkgs?ref=nixpkgs-unstable";
rocksdb = {
url = "git+https://forgejo.ellis.link/continuwuation/rocksdb?ref=v9.11.1";
flake = false;
};
};
outputs =
inputs:
inputs.flake-utils.lib.eachDefaultSystem (
system:
let
pkgsHost = import inputs.nixpkgs {
inherit system;
};
# The Rust toolchain to use
toolchain = inputs.fenix.packages.${system}.fromToolchainFile {
file = ./rust-toolchain.toml;
# See also `rust-toolchain.toml`
sha256 = "sha256-KUm16pHj+cRedf8vxs/Hd2YWxpOrWZ7UOrwhILdSJBU=";
};
mkScope =
pkgs:
pkgs.lib.makeScope pkgs.newScope (self: {
inherit pkgs inputs;
craneLib = (inputs.crane.mkLib pkgs).overrideToolchain (_: toolchain);
main = self.callPackage ./nix/pkgs/main { };
liburing = pkgs.liburing.overrideAttrs {
# Tests weren't building
outputs = [
"out"
"dev"
"man"
];
buildFlags = [ "library" ];
};
rocksdb =
(pkgs.rocksdb_9_10.override {
# Override the liburing input for the build with our own so
# we have it built with the library flag
inherit (self) liburing;
}).overrideAttrs
(old: {
src = inputs.rocksdb;
version = "v9.11.1";
cmakeFlags =
pkgs.lib.subtractLists [
# No real reason to have snappy or zlib, no one uses this
"-DWITH_SNAPPY=1"
"-DZLIB=1"
"-DWITH_ZLIB=1"
# We don't need to use ldb or sst_dump (core_tools)
"-DWITH_CORE_TOOLS=1"
# We don't need to build rocksdb tests
"-DWITH_TESTS=1"
# We use rust-rocksdb via C interface and don't need C++ RTTI
"-DUSE_RTTI=1"
# This doesn't exist in RocksDB, and USE_SSE is deprecated for
# PORTABLE=$(march)
"-DFORCE_SSE42=1"
# PORTABLE will get set in main/default.nix
"-DPORTABLE=1"
] old.cmakeFlags
++ [
# No real reason to have snappy, no one uses this
"-DWITH_SNAPPY=0"
"-DZLIB=0"
"-DWITH_ZLIB=0"
# We don't need to use ldb or sst_dump (core_tools)
"-DWITH_CORE_TOOLS=0"
# We don't need trace tools
"-DWITH_TRACE_TOOLS=0"
# We don't need to build rocksdb tests
"-DWITH_TESTS=0"
# We use rust-rocksdb via C interface and don't need C++ RTTI
"-DUSE_RTTI=0"
];
# outputs has "tools" which we don't need or use
outputs = [ "out" ];
# preInstall hooks has stuff for messing with ldb/sst_dump which we don't need or use
preInstall = "";
# We have this already at https://forgejo.ellis.link/continuwuation/rocksdb/commit/a935c0273e1ba44eacf88ce3685a9b9831486155
# Unsetting this so we don't have to revert it and make this nix exclusive
patches = [ ];
postPatch = ''
# Fix gcc-13 build failures due to missing <cstdint> and
# <system_error> includes, fixed upstream since 8.x
sed -e '1i #include <cstdint>' -i db/compaction/compaction_iteration_stats.h
sed -e '1i #include <cstdint>' -i table/block_based/data_block_hash_index.h
sed -e '1i #include <cstdint>' -i util/string_util.h
sed -e '1i #include <cstdint>' -i include/rocksdb/utilities/checkpoint.h
'';
});
});
scopeHost = mkScope pkgsHost;
mkCrossScope =
crossSystem:
let
pkgsCrossStatic =
(import inputs.nixpkgs {
inherit system;
crossSystem = {
config = crossSystem;
};
}).pkgsStatic;
in
mkScope pkgsCrossStatic;
in
{
packages =
{
default = scopeHost.main.override {
disable_features = [
# Don't include experimental features
"experimental"
# jemalloc profiling/stats features are expensive and shouldn't
# be expected on non-debug builds.
"jemalloc_prof"
"jemalloc_stats"
# This is non-functional on nix for some reason
"hardened_malloc"
# conduwuit_mods is a development-only hot reload feature
"conduwuit_mods"
];
};
default-debug = scopeHost.main.override {
profile = "dev";
# Debug build users expect full logs
disable_release_max_log_level = true;
disable_features = [
# Don't include experimental features
"experimental"
# This is non-functional on nix for some reason
"hardened_malloc"
# conduwuit_mods is a development-only hot reload feature
"conduwuit_mods"
];
};
# Just a test profile used for things like CI and complement
default-test = scopeHost.main.override {
profile = "test";
disable_release_max_log_level = true;
disable_features = [
# Don't include experimental features
"experimental"
# this is non-functional on nix for some reason
"hardened_malloc"
# conduwuit_mods is a development-only hot reload feature
"conduwuit_mods"
];
};
all-features = scopeHost.main.override {
all_features = true;
disable_features = [
# Don't include experimental features
"experimental"
# jemalloc profiling/stats features are expensive and shouldn't
# be expected on non-debug builds.
"jemalloc_prof"
"jemalloc_stats"
# This is non-functional on nix for some reason
"hardened_malloc"
# conduwuit_mods is a development-only hot reload feature
"conduwuit_mods"
];
};
all-features-debug = scopeHost.main.override {
profile = "dev";
all_features = true;
# Debug build users expect full logs
disable_release_max_log_level = true;
disable_features = [
# Don't include experimental features
"experimental"
# This is non-functional on nix for some reason
"hardened_malloc"
# conduwuit_mods is a development-only hot reload feature
"conduwuit_mods"
];
};
hmalloc = scopeHost.main.override { features = [ "hardened_malloc" ]; };
}
// builtins.listToAttrs (
builtins.concatLists (
builtins.map
(
crossSystem:
let
binaryName = "static-${crossSystem}";
scopeCrossStatic = mkCrossScope crossSystem;
in
[
# An output for a statically-linked binary
{
name = binaryName;
value = scopeCrossStatic.main;
}
# An output for a statically-linked binary with x86_64 haswell
# target optimisations
{
name = "${binaryName}-x86_64-haswell-optimised";
value = scopeCrossStatic.main.override {
x86_64_haswell_target_optimised =
if (crossSystem == "x86_64-linux-gnu" || crossSystem == "x86_64-linux-musl") then true else false;
};
}
# An output for a statically-linked unstripped debug ("dev") binary
{
name = "${binaryName}-debug";
value = scopeCrossStatic.main.override {
profile = "dev";
# debug build users expect full logs
disable_release_max_log_level = true;
};
}
# An output for a statically-linked unstripped debug binary with the
# "test" profile (for CI usage only)
{
name = "${binaryName}-test";
value = scopeCrossStatic.main.override {
profile = "test";
disable_release_max_log_level = true;
disable_features = [
# dont include experimental features
"experimental"
# this is non-functional on nix for some reason
"hardened_malloc"
# conduwuit_mods is a development-only hot reload feature
"conduwuit_mods"
];
};
}
# An output for a statically-linked binary with `--all-features`
{
name = "${binaryName}-all-features";
value = scopeCrossStatic.main.override {
all_features = true;
disable_features = [
# dont include experimental features
"experimental"
# jemalloc profiling/stats features are expensive and shouldn't
# be expected on non-debug builds.
"jemalloc_prof"
"jemalloc_stats"
# this is non-functional on nix for some reason
"hardened_malloc"
# conduwuit_mods is a development-only hot reload feature
"conduwuit_mods"
];
};
}
# An output for a statically-linked binary with `--all-features` and with x86_64 haswell
# target optimisations
{
name = "${binaryName}-all-features-x86_64-haswell-optimised";
value = scopeCrossStatic.main.override {
all_features = true;
disable_features = [
# dont include experimental features
"experimental"
# jemalloc profiling/stats features are expensive and shouldn't
# be expected on non-debug builds.
"jemalloc_prof"
"jemalloc_stats"
# this is non-functional on nix for some reason
"hardened_malloc"
# conduwuit_mods is a development-only hot reload feature
"conduwuit_mods"
];
x86_64_haswell_target_optimised =
if (crossSystem == "x86_64-linux-gnu" || crossSystem == "x86_64-linux-musl") then true else false;
};
}
# An output for a statically-linked unstripped debug ("dev") binary with `--all-features`
{
name = "${binaryName}-all-features-debug";
value = scopeCrossStatic.main.override {
profile = "dev";
all_features = true;
# debug build users expect full logs
disable_release_max_log_level = true;
disable_features = [
# dont include experimental features
"experimental"
# this is non-functional on nix for some reason
"hardened_malloc"
# conduwuit_mods is a development-only hot reload feature
"conduwuit_mods"
];
};
}
# An output for a statically-linked binary with hardened_malloc
{
name = "${binaryName}-hmalloc";
value = scopeCrossStatic.main.override {
features = [ "hardened_malloc" ];
};
}
]
)
[
#"x86_64-apple-darwin"
#"aarch64-apple-darwin"
"x86_64-linux-gnu"
"x86_64-linux-musl"
"aarch64-linux-musl"
]
)
);
}
);
inputs@{ flake-parts, ... }:
flake-parts.lib.mkFlake { inherit inputs; } {
imports = [ ./nix ];
systems = [
# good support
"x86_64-linux"
# support untested but theoretically there
"aarch64-linux"
];
};
}

108
nix/checks/default.nix Normal file
View File

@@ -0,0 +1,108 @@
{ inputs, ... }:
{
perSystem =
{
self',
lib,
pkgs,
...
}:
let
uwulib = inputs.self.uwulib.init pkgs;
rocksdbAllFeatures = self'.packages.rocksdb.override {
enableJemalloc = true;
enableLiburing = true;
};
commonAttrs = (uwulib.build.commonAttrs { }) // {
buildInputs = [
pkgs.liburing
pkgs.rust-jemalloc-sys-unprefixed
rocksdbAllFeatures
];
nativeBuildInputs = [
pkgs.pkg-config
# bindgen needs the build platform's libclang. Apparently due to "splicing
# weirdness", pkgs.rustPlatform.bindgenHook on its own doesn't quite do the
# right thing here.
pkgs.rustPlatform.bindgenHook
];
env = {
LIBCLANG_PATH = lib.makeLibraryPath [ pkgs.llvmPackages.libclang.lib ];
LD_LIBRARY_PATH = lib.makeLibraryPath [
pkgs.liburing
pkgs.rust-jemalloc-sys-unprefixed
rocksdbAllFeatures
];
}
// uwulib.environment.buildPackageEnv
// {
ROCKSDB_INCLUDE_DIR = "${rocksdbAllFeatures}/include";
ROCKSDB_LIB_DIR = "${rocksdbAllFeatures}/lib";
};
};
cargoArtifacts = self'.packages.continuwuity-all-features-deps;
in
{
# taken from
#
# https://crane.dev/examples/quick-start.html
checks = {
continuwuity-all-features-build = self'.packages.continuwuity-all-features-bin;
continuwuity-all-features-clippy = uwulib.build.craneLibForChecks.cargoClippy (
commonAttrs
// {
inherit cargoArtifacts;
cargoClippyExtraArgs = "-- --deny warnings";
}
);
continuwuity-all-features-docs = uwulib.build.craneLibForChecks.cargoDoc (
commonAttrs
// {
inherit cargoArtifacts;
# This can be commented out or tweaked as necessary, e.g. set to
# `--deny rustdoc::broken-intra-doc-links` to only enforce that lint
env.RUSTDOCFLAGS = "--deny warnings";
}
);
# Check formatting
continuwuity-all-features-fmt = uwulib.build.craneLibForChecks.cargoFmt {
src = uwulib.build.src;
};
continuwuity-all-features-toml-fmt = uwulib.build.craneLibForChecks.taploFmt {
src = pkgs.lib.sources.sourceFilesBySuffices uwulib.build.src [ ".toml" ];
# taplo arguments can be further customized below as needed
taploExtraArgs = "--config ${inputs.self}/taplo.toml";
};
# Audit dependencies
continuwuity-all-features-audit = uwulib.build.craneLibForChecks.cargoAudit {
inherit (inputs) advisory-db;
src = uwulib.build.src;
};
# Audit licenses
continuwuity-all-features-deny = uwulib.build.craneLibForChecks.cargoDeny {
src = uwulib.build.src;
};
# Run tests with cargo-nextest
# Consider setting `doCheck = false` on `continuwuity-all-features` if you do not want
# the tests to run twice
continuwuity-all-features-nextest = uwulib.build.craneLibForChecks.cargoNextest (
commonAttrs
// {
inherit cargoArtifacts;
partitions = 1;
partitionType = "count";
cargoNextestPartitionsExtraArgs = "--no-tests=pass";
}
);
};
};
}

11
nix/default.nix Normal file
View File

@@ -0,0 +1,11 @@
{
imports = [
./checks
./packages
./shells
./tests
./hydra.nix
./fmt.nix
];
}

37
nix/fmt.nix Normal file
View File

@@ -0,0 +1,37 @@
{ inputs, ... }:
{
# load the flake module from upstream
imports = [ inputs.treefmt-nix.flakeModule ];
perSystem =
{ self', lib, ... }:
{
treefmt = {
# repo root as project root
projectRoot = inputs.self;
# the formatters
programs = {
nixfmt.enable = true;
typos = {
enable = true;
configFile = "${inputs.self}/.typos.toml";
};
taplo = {
enable = true;
settings = lib.importTOML "${inputs.self}/taplo.toml";
};
};
settings.formatter.rustfmt = {
command = "${lib.getExe' self'.packages.dev-toolchain "rustfmt"}";
includes = [ "**/*.rs" ];
options = [
"--unstable-features"
"--edition=2024"
"--config-path=${inputs.self}/rustfmt.toml"
];
};
};
};
}

9
nix/hydra.nix Normal file
View File

@@ -0,0 +1,9 @@
{ inputs, ... }:
let
lib = inputs.nixpkgs.lib;
in
{
flake.hydraJobs.packages = builtins.mapAttrs (
_name: lib.hydraJob
) inputs.self.packages.x86_64-linux;
}

View File

@@ -0,0 +1,60 @@
{ inputs, ... }:
{
perSystem =
{
self',
lib,
pkgs,
...
}:
let
uwulib = inputs.self.uwulib.init pkgs;
in
{
packages =
lib.pipe
[
# this is the default variant
{
variantName = "default";
commonAttrsArgs.profile = "release";
rocksdb = self'.packages.rocksdb;
features = { };
}
# this is the variant with all features enabled (liburing + jemalloc)
{
variantName = "all-features";
commonAttrsArgs.profile = "release";
rocksdb = self'.packages.rocksdb.override {
enableJemalloc = true;
enableLiburing = true;
};
features = {
enabledFeatures = "all";
disabledFeatures = uwulib.features.defaultDisabledFeatures ++ [ "bindgen-static" ];
};
}
]
[
(builtins.map (cfg: rec {
deps = {
name = "continuwuity-${cfg.variantName}-deps";
value = uwulib.build.buildDeps {
features = uwulib.features.calcFeatures cfg.features;
inherit (cfg) commonAttrsArgs rocksdb;
};
};
bin = {
name = "continuwuity-${cfg.variantName}-bin";
value = uwulib.build.buildPackage {
deps = self'.packages.${deps.name};
features = uwulib.features.calcFeatures cfg.features;
inherit (cfg) commonAttrsArgs rocksdb;
};
};
}))
(builtins.concatMap builtins.attrValues)
builtins.listToAttrs
];
};
}

14
nix/packages/default.nix Normal file
View File

@@ -0,0 +1,14 @@
{
imports = [
./continuwuity
./rocksdb
./rust.nix
./uwulib
];
perSystem =
{ self', ... }:
{
packages.default = self'.packages.continuwuity-default-bin;
};
}

View File

@@ -0,0 +1,12 @@
{
perSystem =
{
pkgs,
...
}:
{
packages = {
rocksdb = pkgs.callPackage ./package.nix { };
};
};
}

View File

@@ -0,0 +1,88 @@
{
lib,
stdenv,
rocksdb,
liburing,
rust-jemalloc-sys-unprefixed,
enableJemalloc ? false,
enableLiburing ? false,
fetchFromGitea,
...
}:
let
notDarwin = !stdenv.hostPlatform.isDarwin;
in
(rocksdb.override {
# Override the liburing input for the build with our own so
# we have it built with the library flag
inherit liburing;
jemalloc = rust-jemalloc-sys-unprefixed;
# rocksdb fails to build with prefixed jemalloc, which is required on
# darwin due to [1]. In this case, fall back to building rocksdb with
# libc malloc. This should not cause conflicts, because all of the
# jemalloc symbols are prefixed.
#
# [1]: https://github.com/tikv/jemallocator/blob/ab0676d77e81268cd09b059260c75b38dbef2d51/jemalloc-sys/src/env.rs#L17
enableJemalloc = enableJemalloc && notDarwin;
# for some reason enableLiburing in nixpkgs rocksdb is default true
# which breaks Darwin entirely
enableLiburing = enableLiburing && notDarwin;
}).overrideAttrs
(old: {
src = fetchFromGitea {
domain = "forgejo.ellis.link";
owner = "continuwuation";
repo = "rocksdb";
rev = "10.5.fb";
sha256 = "sha256-X4ApGLkHF9ceBtBg77dimEpu720I79ffLoyPa8JMHaU=";
};
version = "10.5.fb";
cmakeFlags =
lib.subtractLists (builtins.map (flag: lib.cmakeBool flag true) [
# No real reason to have snappy or zlib, no one uses this
"WITH_SNAPPY"
"ZLIB"
"WITH_ZLIB"
# We don't need to use ldb or sst_dump (core_tools)
"WITH_CORE_TOOLS"
# We don't need to build rocksdb tests
"WITH_TESTS"
# We use rust-rocksdb via C interface and don't need C++ RTTI
"USE_RTTI"
# This doesn't exist in RocksDB, and USE_SSE is deprecated for
# PORTABLE=$(march)
"FORCE_SSE42"
]) old.cmakeFlags
++ (builtins.map (flag: lib.cmakeBool flag false) [
# No real reason to have snappy, no one uses this
"WITH_SNAPPY"
"ZLIB"
"WITH_ZLIB"
# We don't need to use ldb or sst_dump (core_tools)
"WITH_CORE_TOOLS"
# We don't need trace tools
"WITH_TRACE_TOOLS"
# We don't need to build rocksdb tests
"WITH_TESTS"
# We use rust-rocksdb via C interface and don't need C++ RTTI
"USE_RTTI"
]);
enableLiburing = enableLiburing && notDarwin;
# outputs has "tools" which we don't need or use
outputs = [ "out" ];
# preInstall hooks has stuff for messing with ldb/sst_dump which we don't need or use
preInstall = "";
# We have this already at https://forgejo.ellis.link/continuwuation/rocksdb/commit/a935c0273e1ba44eacf88ce3685a9b9831486155
# Unsetting `patches` so we don't have to revert it and make this nix exclusive
patches = [ ];
})

32
nix/packages/rust.nix Normal file
View File

@@ -0,0 +1,32 @@
{ inputs, ... }:
{
perSystem =
{
system,
lib,
...
}:
{
packages =
let
fnx = inputs.fenix.packages.${system};
stable = fnx.fromToolchainFile {
file = inputs.self + "/rust-toolchain.toml";
# See also `rust-toolchain.toml`
sha256 = "sha256-+9FmLhAOezBZCOziO0Qct1NOrfpjNsXxc/8I0c7BdKE=";
};
in
{
# used for building nix stuff (doesn't include rustfmt overhead)
build-toolchain = stable;
# used for dev shells
dev-toolchain = fnx.combine [
stable
# use the nightly rustfmt because we use nightly features
fnx.complete.rustfmt
];
};
};
}

View File

@@ -0,0 +1,108 @@
args@{ pkgs, inputs, ... }:
let
inherit (pkgs) lib;
uwuenv = import ./environment.nix args;
selfpkgs = inputs.self.packages.${pkgs.stdenv.system};
in
rec {
# basic, very minimal instance of the crane library with a minimal rust toolchain
craneLib = (inputs.crane.mkLib pkgs).overrideToolchain (_: selfpkgs.build-toolchain);
# the checks require more rust toolchain components, hence we have this separate instance of the crane library
craneLibForChecks = (inputs.crane.mkLib pkgs).overrideToolchain (_: selfpkgs.dev-toolchain);
# meta information (name, version, etc) of the rust crate based on the Cargo.toml
crateInfo = craneLib.crateNameFromCargoToml { cargoToml = "${inputs.self}/Cargo.toml"; };
src =
let
# see https://crane.dev/API.html#cranelibfiltercargosources
#
# we need to keep the `web` directory which would be filtered out by the regular source filtering function
#
# https://crane.dev/API.html#cranelibcleancargosource
isWebTemplate = path: _type: builtins.match ".*src/web.*" path != null;
isRust = craneLib.filterCargoSources;
isNix = path: _type: builtins.match ".+/nix.*" path != null;
webOrRustNotNix = p: t: !(isNix p t) && (isWebTemplate p t || isRust p t);
in
lib.cleanSourceWith {
src = inputs.self;
filter = webOrRustNotNix;
name = "source";
};
# common attrs that are shared between building continuwuity's deps and the package itself
commonAttrs =
{
profile ? "dev",
...
}:
{
inherit (crateInfo)
pname
version
;
inherit src;
# this prevents unnecessary rebuilds
strictDeps = true;
dontStrip = profile == "dev" || profile == "test";
dontPatchELF = profile == "dev" || profile == "test";
doCheck = true;
nativeBuildInputs = [
# bindgen needs the build platform's libclang. Apparently due to "splicing
# weirdness", pkgs.rustPlatform.bindgenHook on its own doesn't quite do the
# right thing here.
pkgs.rustPlatform.bindgenHook
];
};
makeRocksDBEnv =
{ rocksdb }:
{
ROCKSDB_INCLUDE_DIR = "${rocksdb}/include";
ROCKSDB_LIB_DIR = "${rocksdb}/lib";
};
# function that builds the continuwuity dependencies derivation
buildDeps =
{
rocksdb,
features,
commonAttrsArgs,
}:
craneLib.buildDepsOnly (
(commonAttrs commonAttrsArgs)
// {
env = uwuenv.buildDepsOnlyEnv // (makeRocksDBEnv { inherit rocksdb; });
inherit (features) cargoExtraArgs;
}
);
# function that builds the continuwuity package
buildPackage =
{
deps,
rocksdb,
features,
commonAttrsArgs,
}:
let
rocksdbEnv = makeRocksDBEnv { inherit rocksdb; };
in
craneLib.buildPackage (
(commonAttrs commonAttrsArgs)
// {
cargoArtifacts = deps;
doCheck = true;
env = uwuenv.buildPackageEnv // rocksdbEnv;
passthru.env = uwuenv.buildPackageEnv // rocksdbEnv;
meta.mainProgram = crateInfo.pname;
inherit (features) cargoExtraArgs;
}
);
}

View File

@@ -0,0 +1,10 @@
{ inputs, ... }:
{
flake.uwulib = {
init = pkgs: {
features = import ./features.nix { inherit pkgs inputs; };
environment = import ./environment.nix { inherit pkgs inputs; };
build = import ./build.nix { inherit pkgs inputs; };
};
};
}

View File

@@ -0,0 +1,18 @@
args@{ pkgs, inputs, ... }:
let
uwubuild = import ./build.nix args;
in
rec {
buildDepsOnlyEnv = {
# https://crane.dev/faq/rebuilds-bindgen.html
NIX_OUTPATH_USED_AS_RANDOM_SEED = "aaaaaaaaaa";
CARGO_PROFILE = "release";
}
// uwubuild.craneLib.mkCrossToolchainEnv (p: pkgs.clangStdenv);
buildPackageEnv = {
GIT_COMMIT_HASH = inputs.self.rev or inputs.self.dirtyRev or "";
GIT_COMMIT_HASH_SHORT = inputs.self.shortRev or inputs.self.dirtyShortRev or "";
}
// buildDepsOnlyEnv;
}

View File

@@ -0,0 +1,77 @@
{ pkgs, inputs, ... }:
let
inherit (pkgs) lib;
in
rec {
defaultDisabledFeatures = [
# dont include experimental features
"experimental"
# jemalloc profiling/stats features are expensive and shouldn't
# be expected on non-debug builds.
"jemalloc_prof"
"jemalloc_stats"
# this is non-functional on nix for some reason
"hardened_malloc"
# conduwuit_mods is a development-only hot reload feature
"conduwuit_mods"
# we don't want to enable this feature set by default but be more specific about it
"full"
];
# We perform default-feature unification in nix, because some of the dependencies
# on the nix side depend on feature values.
calcFeatures =
{
tomlPath ? "${inputs.self}/src/main",
# either a list of feature names or a string "all" which enables all non-default features
enabledFeatures ? [ ],
disabledFeatures ? defaultDisabledFeatures,
default_features ? true,
disable_release_max_log_level ? false,
}:
let
# simple helper to get the contents of a Cargo.toml file in a nix format
getToml = path: lib.importTOML "${path}/Cargo.toml";
# get all the features except for the default features
allFeatures = lib.pipe tomlPath [
getToml
(manifest: manifest.features)
lib.attrNames
(lib.remove "default")
];
# get just the default enabled features
allDefaultFeatures = lib.pipe tomlPath [
getToml
(manifest: manifest.features.default)
];
# depending on the value of enabledFeatures choose just a set or all non-default features
#
# - [ list of features ] -> choose exactly the features listed
# - "all" -> choose all non-default features
additionalFeatures = if enabledFeatures == "all" then allFeatures else enabledFeatures;
# unification with default features (if enabled)
features = lib.unique (additionalFeatures ++ lib.optionals default_features allDefaultFeatures);
# prepare the features that are subtracted from the set
disabledFeatures' =
disabledFeatures ++ lib.optionals disable_release_max_log_level [ "release_max_log_level" ];
# construct the final feature set
finalFeatures = lib.subtractLists disabledFeatures' features;
in
{
# final feature set, useful for querying it
features = finalFeatures;
# crane flag with the relevant features
cargoExtraArgs = builtins.concatStringsSep " " [
"--no-default-features"
"--locked"
(lib.optionalString (finalFeatures != [ ]) "--features")
(builtins.concatStringsSep "," finalFeatures)
];
};
}

View File

@@ -1,83 +0,0 @@
{ lib
, pkgsBuildHost
, rust
, stdenv
}:
lib.optionalAttrs stdenv.hostPlatform.isStatic
{
ROCKSDB_STATIC = "";
}
//
{
CARGO_BUILD_RUSTFLAGS =
lib.concatStringsSep
" "
(lib.optionals
stdenv.hostPlatform.isStatic
[ "-C" "relocation-model=static" ]
++ lib.optionals
(stdenv.buildPlatform.config != stdenv.hostPlatform.config)
[
"-l"
"c"
"-l"
"stdc++"
"-L"
"${stdenv.cc.cc.lib}/${stdenv.hostPlatform.config}/lib"
]
);
}
# What follows is stolen from [here][0]. Its purpose is to properly
# configure compilers and linkers for various stages of the build, and
# even covers the case of build scripts that need native code compiled and
# run on the build platform (I think).
#
# [0]: https://github.com/NixOS/nixpkgs/blob/nixpkgs-unstable/pkgs/build-support/rust/lib/default.nix#L48-L68
//
(
let
inherit (rust.lib) envVars;
in
lib.optionalAttrs
(stdenv.targetPlatform.rust.rustcTarget
!= stdenv.hostPlatform.rust.rustcTarget)
(
let
inherit (stdenv.targetPlatform.rust) cargoEnvVarTarget;
in
{
"CC_${cargoEnvVarTarget}" = envVars.ccForTarget;
"CXX_${cargoEnvVarTarget}" = envVars.cxxForTarget;
"CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = envVars.ccForTarget;
}
)
//
(
let
inherit (stdenv.hostPlatform.rust) cargoEnvVarTarget rustcTarget;
in
{
"CC_${cargoEnvVarTarget}" = envVars.ccForHost;
"CXX_${cargoEnvVarTarget}" = envVars.cxxForHost;
"CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = envVars.ccForHost;
CARGO_BUILD_TARGET = rustcTarget;
}
)
//
(
let
inherit (stdenv.buildPlatform.rust) cargoEnvVarTarget;
in
{
"CC_${cargoEnvVarTarget}" = envVars.ccForBuild;
"CXX_${cargoEnvVarTarget}" = envVars.cxxForBuild;
"CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = envVars.ccForBuild;
HOST_CC = "${pkgsBuildHost.stdenv.cc}/bin/cc";
HOST_CXX = "${pkgsBuildHost.stdenv.cc}/bin/c++";
}
)
)

View File

@@ -1,224 +0,0 @@
# Dependencies (keep sorted)
{ craneLib
, inputs
, jq
, lib
, libiconv
, liburing
, pkgsBuildHost
, rocksdb
, removeReferencesTo
, rust
, rust-jemalloc-sys
, stdenv
# Options (keep sorted)
, all_features ? false
, default_features ? true
# default list of disabled features
, disable_features ? [
# dont include experimental features
"experimental"
# jemalloc profiling/stats features are expensive and shouldn't
# be expected on non-debug builds.
"jemalloc_prof"
"jemalloc_stats"
# this is non-functional on nix for some reason
"hardened_malloc"
# conduwuit_mods is a development-only hot reload feature
"conduwuit_mods"
]
, disable_release_max_log_level ? false
, features ? [ ]
, profile ? "release"
# rocksdb compiled with -march=haswell and target-cpu=haswell rustflag
# haswell is pretty much any x86 cpu made in the last 12 years, and
# supports modern CPU extensions that rocksdb can make use of.
# disable if trying to make a portable x86_64 build for very old hardware
, x86_64_haswell_target_optimised ? false
}:
let
# We perform default-feature unification in nix, because some of the dependencies
# on the nix side depend on feature values.
crateFeatures = path:
let manifest = lib.importTOML "${path}/Cargo.toml"; in
lib.remove "default" (lib.attrNames manifest.features);
crateDefaultFeatures = path:
(lib.importTOML "${path}/Cargo.toml").features.default;
allDefaultFeatures = crateDefaultFeatures "${inputs.self}/src/main";
allFeatures = crateFeatures "${inputs.self}/src/main";
features' = lib.unique
(features ++
lib.optionals default_features allDefaultFeatures ++
lib.optionals all_features allFeatures);
disable_features' = disable_features ++ lib.optionals disable_release_max_log_level [ "release_max_log_level" ];
features'' = lib.subtractLists disable_features' features';
featureEnabled = feature: builtins.elem feature features'';
enableLiburing = featureEnabled "io_uring" && !stdenv.hostPlatform.isDarwin;
# This derivation will set the JEMALLOC_OVERRIDE variable, causing the
# tikv-jemalloc-sys crate to use the nixpkgs jemalloc instead of building it's
# own. In order for this to work, we need to set flags on the build that match
# whatever flags tikv-jemalloc-sys was going to use. These are dependent on
# which features we enable in tikv-jemalloc-sys.
rust-jemalloc-sys' = (rust-jemalloc-sys.override {
# tikv-jemalloc-sys/unprefixed_malloc_on_supported_platforms feature
unprefixed = true;
}).overrideAttrs (old: {
configureFlags = old.configureFlags ++
# we dont need docs
[ "--disable-doc" ] ++
# we dont need cxx/C++ integration
[ "--disable-cxx" ] ++
# tikv-jemalloc-sys/profiling feature
lib.optional (featureEnabled "jemalloc_prof") "--enable-prof" ++
# tikv-jemalloc-sys/stats feature
(if (featureEnabled "jemalloc_stats") then [ "--enable-stats" ] else [ "--disable-stats" ]);
});
buildDepsOnlyEnv =
let
rocksdb' = (rocksdb.override {
jemalloc = lib.optional (featureEnabled "jemalloc") rust-jemalloc-sys';
# rocksdb fails to build with prefixed jemalloc, which is required on
# darwin due to [1]. In this case, fall back to building rocksdb with
# libc malloc. This should not cause conflicts, because all of the
# jemalloc symbols are prefixed.
#
# [1]: https://github.com/tikv/jemallocator/blob/ab0676d77e81268cd09b059260c75b38dbef2d51/jemalloc-sys/src/env.rs#L17
enableJemalloc = featureEnabled "jemalloc" && !stdenv.hostPlatform.isDarwin;
# for some reason enableLiburing in nixpkgs rocksdb is default true
# which breaks Darwin entirely
inherit enableLiburing;
}).overrideAttrs (old: {
inherit enableLiburing;
cmakeFlags = (if x86_64_haswell_target_optimised then
(lib.subtractLists [
# dont make a portable build if x86_64_haswell_target_optimised is enabled
"-DPORTABLE=1"
]
old.cmakeFlags
++ [ "-DPORTABLE=haswell" ]) else [ "-DPORTABLE=1" ]
)
++ old.cmakeFlags;
# outputs has "tools" which we dont need or use
outputs = [ "out" ];
# preInstall hooks has stuff for messing with ldb/sst_dump which we dont need or use
preInstall = "";
});
in
{
# https://crane.dev/faq/rebuilds-bindgen.html
NIX_OUTPATH_USED_AS_RANDOM_SEED = "aaaaaaaaaa";
CARGO_PROFILE = profile;
ROCKSDB_INCLUDE_DIR = "${rocksdb'}/include";
ROCKSDB_LIB_DIR = "${rocksdb'}/lib";
}
//
(import ./cross-compilation-env.nix {
# Keep sorted
inherit
lib
pkgsBuildHost
rust
stdenv;
});
buildPackageEnv = {
GIT_COMMIT_HASH = inputs.self.rev or inputs.self.dirtyRev or "";
GIT_COMMIT_HASH_SHORT = inputs.self.shortRev or inputs.self.dirtyShortRev or "";
} // buildDepsOnlyEnv // {
# Only needed in static stdenv because these are transitive dependencies of rocksdb
CARGO_BUILD_RUSTFLAGS = buildDepsOnlyEnv.CARGO_BUILD_RUSTFLAGS
+ lib.optionalString (enableLiburing && stdenv.hostPlatform.isStatic)
" -L${lib.getLib liburing}/lib -luring"
+ lib.optionalString x86_64_haswell_target_optimised
" -Ctarget-cpu=haswell";
};
commonAttrs = {
inherit
(craneLib.crateNameFromCargoToml {
cargoToml = "${inputs.self}/Cargo.toml";
})
pname
version;
src = let filter = inputs.nix-filter.lib; in filter {
root = inputs.self;
# Keep sorted
include = [
".cargo"
"Cargo.lock"
"Cargo.toml"
"src"
"xtask"
];
};
doCheck = true;
cargoExtraArgs = "--no-default-features --locked "
+ lib.optionalString
(features'' != [ ])
"--features " + (builtins.concatStringsSep "," features'');
dontStrip = profile == "dev" || profile == "test";
dontPatchELF = profile == "dev" || profile == "test";
buildInputs = lib.optional (featureEnabled "jemalloc") rust-jemalloc-sys'
# needed to build Rust applications on macOS
++ lib.optionals stdenv.hostPlatform.isDarwin [
# https://github.com/NixOS/nixpkgs/issues/206242
# ld: library not found for -liconv
libiconv
# https://stackoverflow.com/questions/69869574/properly-adding-darwin-apple-sdk-to-a-nix-shell
# https://discourse.nixos.org/t/compile-a-rust-binary-on-macos-dbcrossbar/8612
pkgsBuildHost.darwin.apple_sdk.frameworks.Security
];
nativeBuildInputs = [
# bindgen needs the build platform's libclang. Apparently due to "splicing
# weirdness", pkgs.rustPlatform.bindgenHook on its own doesn't quite do the
# right thing here.
pkgsBuildHost.rustPlatform.bindgenHook
# We don't actually depend on `jq`, but crane's `buildPackage` does, but
# its `buildDepsOnly` doesn't. This causes those two derivations to have
# differing values for `NIX_CFLAGS_COMPILE`, which contributes to spurious
# rebuilds of bindgen and its depedents.
jq
];
};
in
craneLib.buildPackage (commonAttrs // {
cargoArtifacts = craneLib.buildDepsOnly (commonAttrs // {
env = buildDepsOnlyEnv;
});
doCheck = true;
cargoExtraArgs = "--no-default-features --locked "
+ lib.optionalString
(features'' != [ ])
"--features " + (builtins.concatStringsSep "," features'');
env = buildPackageEnv;
passthru = {
env = buildPackageEnv;
};
meta.mainProgram = commonAttrs.pname;
})

29
nix/shells/default.nix Normal file
View File

@@ -0,0 +1,29 @@
{ inputs, ... }:
{
perSystem =
{
self',
lib,
pkgs,
...
}:
let
uwulib = inputs.self.uwulib.init pkgs;
rocksdbAllFeatures = self'.packages.rocksdb.override {
enableJemalloc = true;
enableLiburing = true;
};
in
{
# basic nix shell containing all things necessary to build continuwuity in all flavors manually (on x86_64-linux)
devShells.default = uwulib.build.craneLib.devShell {
packages = [
pkgs.pkg-config
pkgs.liburing
pkgs.rust-jemalloc-sys-unprefixed
rocksdbAllFeatures
];
env.LIBCLANG_PATH = lib.makeLibraryPath [ pkgs.llvmPackages.libclang.lib ];
};
};
}

124
nix/tests/default.nix Normal file
View File

@@ -0,0 +1,124 @@
{
perSystem =
{
self',
lib,
pkgs,
...
}:
{
# run some nixos tests as checks
checks = lib.pipe self'.packages [
# we take all packages (names)
builtins.attrNames
# we filter out all packages that end with `-bin` (which we are interested in for testing)
(builtins.filter (lib.hasSuffix "-bin"))
# for each of these binaries we built the basic nixos test
#
# this test was initially yoinked from
#
# https://github.com/NixOS/nixpkgs/blob/960ce26339661b1b69c6f12b9063ca51b688615f/nixos/tests/matrix/continuwuity.nix
(builtins.map (name: {
name = "test-${name}";
value = pkgs.testers.runNixOSTest {
inherit name;
nodes = {
continuwuity = {
services.matrix-continuwuity = {
enable = true;
package = self'.packages.${name};
settings.global = {
server_name = name;
address = [ "0.0.0.0" ];
allow_registration = true;
yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse = true;
};
extraEnvironment.RUST_BACKTRACE = "yes";
};
networking.firewall.allowedTCPPorts = [ 6167 ];
};
client =
{ pkgs, ... }:
{
environment.systemPackages = [
(pkgs.writers.writePython3Bin "do_test" { libraries = [ pkgs.python3Packages.matrix-nio ]; } ''
import asyncio
import nio
async def main() -> None:
# Connect to continuwuity
client = nio.AsyncClient("http://continuwuity:6167", "alice")
# Register as user alice
response = await client.register("alice", "my-secret-password")
# Log in as user alice
response = await client.login("my-secret-password")
# Create a new room
response = await client.room_create(federate=False)
print("Matrix room create response:", response)
assert isinstance(response, nio.RoomCreateResponse)
room_id = response.room_id
# Join the room
response = await client.join(room_id)
print("Matrix join response:", response)
assert isinstance(response, nio.JoinResponse)
# Send a message to the room
response = await client.room_send(
room_id=room_id,
message_type="m.room.message",
content={
"msgtype": "m.text",
"body": "Hello continuwuity!"
}
)
print("Matrix room send response:", response)
assert isinstance(response, nio.RoomSendResponse)
# Sync responses
response = await client.sync(timeout=30000)
print("Matrix sync response:", response)
assert isinstance(response, nio.SyncResponse)
# Check the message was received by continuwuity
last_message = response.rooms.join[room_id].timeline.events[-1].body
assert last_message == "Hello continuwuity!"
# Leave the room
response = await client.room_leave(room_id)
print("Matrix room leave response:", response)
assert isinstance(response, nio.RoomLeaveResponse)
# Close the client
await client.close()
if __name__ == "__main__":
asyncio.run(main())
'')
];
};
};
testScript = ''
start_all()
with subtest("start continuwuity"):
continuwuity.wait_for_unit("continuwuity.service")
continuwuity.wait_for_open_port(6167)
with subtest("ensure messages can be exchanged"):
client.succeed("do_test >&2")
'';
};
}))
builtins.listToAttrs
];
};
}

View File

@@ -1,25 +1,25 @@
[Unit]
Description=Continuwuity - Matrix homeserver
Documentation=https://continuwuity.org/
Wants=network-online.target
After=network-online.target
Documentation=https://continuwuity.org/
Alias=matrix-conduwuit.service
[Service]
DynamicUser=yes
User=conduwuit
Group=conduwuit
Type=notify
Environment="CONTINUWUITY_CONFIG=/etc/conduwuit/conduwuit.toml"
Type=notify-reload
ReloadSignal=SIGUSR1
Environment="CONTINUWUITY_LOG_TO_JOURNALD=true"
Environment="CONTINUWUITY_JOURNALD_IDENTIFIER=%N"
Environment="CONTINUWUITY_DATABASE_PATH=%S/conduwuit"
Environment="CONTINUWUITY_CONFIG_RELOAD_SIGNAL=true"
ExecStart=/usr/sbin/conduwuit
LoadCredential=conduwuit.toml:/etc/conduwuit/conduwuit.toml
ReadWritePaths=/var/lib/conduwuit /etc/conduwuit
ExecStart=/usr/bin/conduwuit --config ${CREDENTIALS_DIRECTORY}/conduwuit.toml
AmbientCapabilities=
CapabilityBoundingSet=
@@ -52,16 +52,18 @@ SystemCallArchitectures=native
SystemCallFilter=@system-service @resources
SystemCallFilter=~@clock @debug @module @mount @reboot @swap @cpu-emulation @obsolete @timer @chown @setuid @privileged @keyring @ipc
SystemCallErrorNumber=EPERM
#StateDirectory=conduwuit
# ConfigurationDirectory isn't specified here because it's created by
# the distro's package manager.
StateDirectory=conduwuit
RuntimeDirectory=conduwuit
RuntimeDirectoryMode=0750
Restart=on-failure
RestartSec=5
TimeoutStopSec=2m
TimeoutStartSec=2m
TimeoutStopSec=4m
TimeoutStartSec=4m
StartLimitInterval=1m
StartLimitBurst=5

View File

@@ -1,12 +1,28 @@
# Continuwuity for Debian
This document provides information about downloading and deploying the Debian package. You can also use this guide for other `apt`-based distributions such as Ubuntu.
This document provides information about downloading and deploying the Debian package. You can also use this guide for other deb-based distributions such as Ubuntu.
### Installation
See the [generic deployment guide](../deploying/generic.md) for additional information about using the Debian package.
To add the Continuwuation apt repository:
```bash
# Replace with `"dev"` for bleeding-edge builds at your own risk
export COMPONENT="stable"
# Import the Continuwuation signing key
sudo curl https://forgejo.ellis.link/api/packages/continuwuation/debian/repository.key -o /etc/apt/keyrings/forgejo-continuwuation.asc
# Add a new apt source list pointing to the repository
echo "deb [signed-by=/etc/apt/keyrings/forgejo-continuwuation.asc] https://forgejo.ellis.link/api/packages/continuwuation/debian $(lsb_release -sc) $COMPONENT" | sudo tee /etc/apt/sources.list.d/continuwuation.list
# Update remote package lists
sudo apt update
```
No `apt` repository is currently available. This feature is in development.
To install continuwuity:
```bash
sudo apt install continuwuity
```
The `continuwuity` package conflicts with the old `conduwuit` package and will remove it automatically when installed.
See the [generic deployment guide](../deploying/generic.md) for additional information about using the Debian package.
### Configuration
@@ -16,7 +32,7 @@ ### Configuration
### Running
The package uses the [`conduwuit.service`](../configuration/examples.md#example-systemd-unit-file) systemd unit file to start and stop Continuwuity. The binary installs at `/usr/sbin/conduwuit`.
The package uses the [`conduwuit.service`](../configuration/examples.md#example-systemd-unit-file) systemd unit file to start and stop Continuwuity. The binary installs at `/usr/bin/conduwuit`.
By default, this package assumes that Continuwuity runs behind a reverse proxy. The default configuration options apply (listening on `localhost` and TCP port `6167`). Matrix federation requires a valid domain name and TLS. To federate properly, you must set up TLS certificates and certificate renewal.

20
pkg/debian/postinst Normal file
View File

@@ -0,0 +1,20 @@
#!/bin/sh
set -e
# TODO: implement debconf support that is maintainable without duplicating the config
#. /usr/share/debconf/confmodule
CONDUWUIT_DATABASE_PATH=/var/lib/conduwuit
CONDUWUIT_CONFIG_PATH=/etc/conduwuit
case "$1" in
configure)
echo ''
echo 'Make sure you edit the example config at /etc/conduwuit/conduwuit.toml before starting!'
echo 'To start the server, run: systemctl start conduwuit.service'
echo ''
;;
esac
#DEBHELPER#

View File

@@ -20,24 +20,18 @@ case $1 in
if [ -d "$CONDUWUIT_CONFIG_PATH" ]; then
if test -L "$CONDUWUIT_CONFIG_PATH"; then
echo "Deleting conduwuit configuration files"
echo "Deleting continuwuity configuration files"
rm -v -r "$CONDUWUIT_CONFIG_PATH"
fi
fi
if [ -d "$CONDUWUIT_DATABASE_PATH" ]; then
if test -L "$CONDUWUIT_DATABASE_PATH"; then
echo "Deleting conduwuit database directory"
echo "Deleting continuwuity database directory"
rm -r "$CONDUWUIT_DATABASE_PATH"
fi
fi
if [ -d "$CONDUWUIT_DATABASE_PATH_SYMLINK" ]; then
if test -L "$CONDUWUIT_DATABASE_SYMLINK"; then
echo "Removing matrix-conduit symlink"
rm -r "$CONDUWUIT_DATABASE_PATH_SYMLINK"
fi
fi
;;
esac

View File

@@ -0,0 +1,79 @@
# This should be run using rpkg: https://docs.pagure.org/rpkg
# it requires Internet access and is not suitable for Fedora main repos
Name: continuwuity
Version: {{{ git_repo_version }}}
Release: 1%{?dist}
Summary: Very cool Matrix chat homeserver written in Rust
License: Apache-2.0 AND MIT
URL: https://continuwuity.org
VCS: {{{ git_repo_vcs }}}
Source: {{{ git_repo_pack }}}
BuildRequires: cargo-rpm-macros >= 25
BuildRequires: systemd-rpm-macros
# Needed to build rust-librocksdb-sys
BuildRequires: clang
BuildRequires: liburing-devel
Requires: liburing
Requires: glibc
Requires: libstdc++
%global _description %{expand:
A cool hard fork of Conduit, a Matrix homeserver written in Rust}
%description %{_description}
%prep
{{{ git_repo_setup_macro }}}
%cargo_prep -N
# Perform an online build so Git dependencies can be retrieved
sed -i 's/^offline = true$//' .cargo/config.toml
%build
%cargo_build
# Here's the one legally required mystery incantation in this file.
# Some of our dependencies have source files which are (for some reason) marked as executable.
# Files in .cargo/registry/ are copied into /usr/src/ by the debuginfo machinery
# at the end of the build step, and then the BRP shebang mangling script checks
# the entire buildroot to find executable files, and fails the build because
# it thinks Rust's file attributes are shebangs because they start with `#!`.
# So we have to clear the executable bit on all of them before that happens.
find .cargo/registry/ -executable -name "*.rs" -exec chmod -x {} +
# TODO: this fails currently because it's forced to run in offline mode
# {cargo_license -- --no-dev} > LICENSE.dependencies
%install
install -Dpm0755 target/rpm/conduwuit -t %{buildroot}%{_bindir}
install -Dpm0644 pkg/conduwuit.service -t %{buildroot}%{_unitdir}
install -Dpm0600 conduwuit-example.toml %{buildroot}%{_sysconfdir}/conduwuit/conduwuit.toml
%files
%license LICENSE
%license src/core/matrix/state_res/LICENSE
%doc CODE_OF_CONDUCT.md
%doc CONTRIBUTING.md
%doc README.md
%doc SECURITY.md
%config(noreplace) %{_sysconfdir}/conduwuit/conduwuit.toml
%{_bindir}/conduwuit
%{_unitdir}/conduwuit.service
# Do not create /var/lib/conduwuit, systemd will create it if necessary
%post
%systemd_post conduwuit.service
%preun
%systemd_preun conduwuit.service
%postun
%systemd_postun_with_restart conduwuit.service
%changelog
{{{ git_repo_changelog }}}

View File

@@ -1,26 +1,88 @@
{
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
"extends": [
"config:recommended"
],
"lockFileMaintenance": {
"enabled": true,
"schedule": [
"at any time"
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
"extends": ["config:recommended", "replacements:all"],
"osvVulnerabilityAlerts": true,
"lockFileMaintenance": {
"enabled": true,
"schedule": ["at any time"]
},
"platformAutomerge": true,
"nix": {
"enabled": true
},
"pre-commit": {
"enabled": true
},
"labels": ["Dependencies", "Dependencies/Renovate"],
"ignoreDeps": [
"tikv-jemallocator",
"tikv-jemalloc-sys",
"tikv-jemalloc-ctl",
"rustyline-async",
"event-listener",
"async-channel",
"core_affinity",
"hyper-util"
],
"github-actions": {
"enabled": true,
"managerFilePatterns": [
"/(^|/)\\.forgejo/workflows/[^/]+\\.ya?ml$/",
"/(^|/)\\.forgejo/actions/[^/]+/action\\.ya?ml$/",
"/(^|/)\\.github/workflows/[^/]+\\.ya?ml$/",
"/(^|/)\\.github/actions/[^/]+/action\\.ya?ml$/"
]
},
"packageRules": [
{
"description": "Batch patch-level Rust dependency updates",
"matchManagers": ["cargo"],
"matchUpdateTypes": ["patch"],
"groupName": "rust-patch-updates"
},
{
"description": "Limit concurrent Cargo PRs",
"matchManagers": ["cargo"],
"prConcurrentLimit": 5
},
{
"description": "Group Rust toolchain updates into a single PR",
"matchManagers": ["custom.regex"],
"matchPackageNames": ["rust", "rustc", "cargo"],
"groupName": "rust-toolchain"
},
{
"description": "Batch minor and patch GitHub Actions updates",
"matchManagers": ["github-actions"],
"matchUpdateTypes": ["minor", "patch"],
"groupName": "github-actions-non-major"
},
{
"description": "Pin forgejo artifact actions to prevent breaking changes",
"matchManagers": ["github-actions"],
"matchPackageNames": ["forgejo/upload-artifact", "forgejo/download-artifact"],
"enabled": false
},
{
"description": "Auto-merge renovatebot docker image updates",
"matchDatasources": ["docker"],
"matchPackageNames": ["ghcr.io/renovatebot/renovate"],
"automerge": true,
"automergeStrategy": "fast-forward",
"extends": ["schedule:earlyMondays"]
}
],
"customManagers": [
{
"customType": "regex",
"description": "Update _VERSION variables in Dockerfiles",
"managerFilePatterns": [
"/(^|/)([Dd]ocker|[Cc]ontainer)file[^/]*$/",
"/(^|/|\\.)([Dd]ocker|[Cc]ontainer)file$/"
],
"matchStrings": [
"# renovate: datasource=(?<datasource>[a-zA-Z0-9-._]+?) depName=(?<depName>[^\\s]+?)(?: (lookupName|packageName)=(?<packageName>[^\\s]+?))?(?: versioning=(?<versioning>[^\\s]+?))?(?: extractVersion=(?<extractVersion>[^\\s]+?))?(?: registryUrl=(?<registryUrl>[^\\s]+?))?\\s+(?:ENV\\s+|ARG\\s+)?[A-Za-z0-9_]+?_VERSION[ =][\"']?(?<currentValue>.+?)[\"']?\\s+(?:(?:ENV\\s+|ARG\\s+)?[A-Za-z0-9_]+?_CHECKSUM[ =][\"']?(?<currentDigest>.+?)[\"']?\\s)?"
]
}
]
},
"nix": {
"enabled": true
},
"labels": [
"dependencies",
"github_actions"
],
"ignoreDeps": [
"tikv-jemllocator",
"tikv-jemalloc-sys",
"tikv-jemalloc-ctl",
"opentelemetry-rust",
"tracing-opentelemetry"
]
}

View File

@@ -9,13 +9,16 @@
# If you're having trouble making the relevant changes, bug a maintainer.
[toolchain]
channel = "1.87.0"
profile = "minimal"
channel = "1.89.0"
components = [
# For rust-analyzer
"rust-src",
"rust-analyzer",
# For CI and editors
"rustfmt",
"clippy",
# you have to install rustfmt nightly yourself (if you're not on NixOS)
#
# The rust-toolchain.toml file doesn't provide any syntax for specifying components from different toolchains
# "rustfmt"
]

View File

@@ -85,10 +85,11 @@ futures.workspace = true
log.workspace = true
ruma.workspace = true
serde_json.workspace = true
serde_yaml.workspace = true
serde-saphyr.workspace = true
tokio.workspace = true
tracing-subscriber.workspace = true
tracing.workspace = true
ctor.workspace = true
[lints]
workspace = true

View File

@@ -16,7 +16,7 @@ pub(super) async fn register(&self) -> Result {
let range = 1..checked!(body_len - 1)?;
let appservice_config_body = body[range].join("\n");
let parsed_config = serde_yaml::from_str(&appservice_config_body);
let parsed_config = serde_saphyr::from_str(&appservice_config_body);
match parsed_config {
| Err(e) => return Err!("Could not parse appservice config as YAML: {e}"),
| Ok(registration) => match self
@@ -57,7 +57,7 @@ pub(super) async fn show_appservice_config(&self, appservice_identifier: String)
{
| None => return Err!("Appservice does not exist."),
| Some(config) => {
let config_str = serde_yaml::to_string(&config)?;
let config_str = serde_saphyr::to_string(&config)?;
write!(self, "Config for {appservice_identifier}:\n\n```yaml\n{config_str}\n```")
},
}

View File

@@ -281,15 +281,8 @@ pub(super) async fn get_remote_pdu(
vec![(event_id, value, room_id)]
};
info!("Attempting to handle event ID {event_id} as backfilled PDU");
self.services
.rooms
.timeline
.backfill_pdu(&server, response.pdu)
.await?;
let text = serde_json::to_string_pretty(&json)?;
let msg = "Got PDU from specified server and handled as backfilled";
let msg = "Got PDU from specified server:";
write!(self, "{msg}. Event body:\n```json\n{text}\n```")
},
}
@@ -639,6 +632,7 @@ pub(super) async fn force_set_room_state_from_server(
.add_pdu_outlier(&event_id, &value);
}
info!("Resolving new room state");
let new_room_state = self
.services
.rooms
@@ -646,7 +640,7 @@ pub(super) async fn force_set_room_state_from_server(
.resolve_state(&room_id, &room_version, state)
.await?;
info!("Forcing new room state");
info!("Compressing new room state");
let HashSetCompressStateEvent {
shortstatehash: short_state_hash,
added,
@@ -660,6 +654,7 @@ pub(super) async fn force_set_room_state_from_server(
let state_lock = self.services.rooms.state.mutex.lock(&*room_id).await;
info!("Forcing new room state");
self.services
.rooms
.state

View File

@@ -2,7 +2,8 @@
use conduwuit::{
Err, Result, debug, debug_info, debug_warn, error, info, trace,
utils::time::parse_timepoint_ago, warn,
utils::time::{TimeDirection, parse_timepoint_ago},
warn,
};
use conduwuit_service::media::Dim;
use ruma::{Mxc, OwnedEventId, OwnedMxcUri, OwnedServerName};
@@ -235,14 +236,19 @@ pub(super) async fn delete_past_remote_media(
}
assert!(!(before && after), "--before and --after should not be specified together");
let duration = parse_timepoint_ago(&duration)?;
let direction = if after {
TimeDirection::After
} else {
TimeDirection::Before
};
let time_boundary = parse_timepoint_ago(&duration)?;
let deleted_count = self
.services
.media
.delete_all_remote_media_at_after_time(
duration,
before,
after,
.delete_all_media_within_timeframe(
time_boundary,
direction,
yes_i_want_to_delete_local_media,
)
.await?;

View File

@@ -27,12 +27,24 @@ pub enum MediaCommand {
/// filesystem. This will always ignore errors.
DeleteList,
/// - Deletes all remote (and optionally local) media created before or
/// after [duration] time using filesystem metadata first created at date,
/// or fallback to last modified date. This will always ignore errors by
/// default.
/// Deletes all remote (and optionally local) media created before/after
/// [duration] ago, using filesystem metadata first created at date, or
/// fallback to last modified date. This will always ignore errors by
/// default.
///
/// * Examples:
/// * Delete all remote media older than a year:
///
/// `!admin media delete-past-remote-media -b 1y`
///
/// * Delete all remote and local media from 3 days ago, up until now:
///
/// `!admin media delete-past-remote-media -a 3d
/// --yes-i-want-to-delete-local-media`
#[command(verbatim_doc_comment)]
DeletePastRemoteMedia {
/// - The relative time (e.g. 30s, 5m, 7d) within which to search
/// - The relative time (e.g. 30s, 5m, 7d) from now within which to
/// search
duration: String,
/// - Only delete media created before [duration] ago

View File

@@ -29,6 +29,8 @@
pub(crate) const PAGE_SIZE: usize = 100;
use ctor::{ctor, dtor};
conduwuit::mod_ctor! {}
conduwuit::mod_dtor! {}
conduwuit::rustc_flags_capture! {}

View File

@@ -57,5 +57,5 @@ pub(super) async fn pdus(
.try_collect()
.await?;
self.write_str(&format!("{result:#?}")).await
self.write_str(&format!("```\n{result:#?}\n```")).await
}

View File

@@ -1,8 +1,8 @@
use std::{collections::BTreeMap, fmt::Write as _};
use api::client::{
full_user_deactivate, join_room_by_id_helper, leave_all_rooms, leave_room, update_avatar_url,
update_displayname,
full_user_deactivate, join_room_by_id_helper, leave_all_rooms, leave_room, remote_leave_room,
update_avatar_url, update_displayname,
};
use conduwuit::{
Err, Result, debug, debug_warn, error, info, is_equal_to,
@@ -68,7 +68,8 @@ pub(super) async fn create_user(&self, username: String, password: Option<String
// Create user
self.services
.users
.create(&user_id, Some(password.as_str()))?;
.create(&user_id, Some(password.as_str()), None)
.await?;
// Default to pretty displayname
let mut displayname = user_id.localpart().to_owned();
@@ -178,7 +179,11 @@ pub(super) async fn create_user(&self, username: String, password: Option<String
.await
.is_ok_and(is_equal_to!(1))
{
self.services.admin.make_user_admin(&user_id).await?;
self.services
.admin
.make_user_admin(&user_id)
.boxed()
.await?;
warn!("Granting {user_id} admin privileges as the first user");
}
} else {
@@ -216,7 +221,9 @@ pub(super) async fn deactivate(&self, no_leave_rooms: bool, user_id: String) ->
.collect()
.await;
full_user_deactivate(self.services, &user_id, &all_joined_rooms).await?;
full_user_deactivate(self.services, &user_id, &all_joined_rooms)
.boxed()
.await?;
update_displayname(self.services, &user_id, None, &all_joined_rooms).await;
update_avatar_url(self.services, &user_id, None, None, &all_joined_rooms).await;
leave_all_rooms(self.services, &user_id).await;
@@ -284,6 +291,7 @@ pub(super) async fn reset_password(&self, username: String, password: Option<Str
.services
.users
.set_password(&user_id, Some(new_password.as_str()))
.await
{
| Err(e) => return Err!("Couldn't reset the password for user {user_id}: {e}"),
| Ok(()) => {
@@ -374,7 +382,9 @@ pub(super) async fn deactivate_all(&self, no_leave_rooms: bool, force: bool) ->
.collect()
.await;
full_user_deactivate(self.services, &user_id, &all_joined_rooms).await?;
full_user_deactivate(self.services, &user_id, &all_joined_rooms)
.boxed()
.await?;
update_displayname(self.services, &user_id, None, &all_joined_rooms).await;
update_avatar_url(self.services, &user_id, None, None, &all_joined_rooms)
.await;
@@ -754,7 +764,7 @@ pub(super) async fn force_demote(&self, user_id: String, room_id: OwnedRoomOrAli
.build_and_append_pdu(
PduBuilder::state(String::new(), &power_levels_content),
&user_id,
&room_id,
Some(&room_id),
&state_lock,
)
.await?;
@@ -774,7 +784,11 @@ pub(super) async fn make_user_admin(&self, user_id: String) -> Result {
"Parsed user_id must be a local user"
);
self.services.admin.make_user_admin(&user_id).await?;
self.services
.admin
.make_user_admin(&user_id)
.boxed()
.await?;
self.write_str(&format!("{user_id} has been granted admin privileges.",))
.await
@@ -899,7 +913,13 @@ pub(super) async fn redact_event(&self, event_id: OwnedEventId) -> Result {
);
let redaction_event_id = {
let state_lock = self.services.rooms.state.mutex.lock(event.room_id()).await;
let state_lock = self
.services
.rooms
.state
.mutex
.lock(&event.room_id_or_hash())
.await;
self.services
.rooms
@@ -913,7 +933,7 @@ pub(super) async fn redact_event(&self, event_id: OwnedEventId) -> Result {
})
},
event.sender(),
event.room_id(),
Some(&event.room_id_or_hash()),
&state_lock,
)
.await?
@@ -924,3 +944,29 @@ pub(super) async fn redact_event(&self, event_id: OwnedEventId) -> Result {
))
.await
}
#[admin_command]
pub(super) async fn force_leave_remote_room(
&self,
user_id: String,
room_id: OwnedRoomOrAliasId,
) -> Result {
let user_id = parse_local_user_id(self.services, &user_id)?;
let (room_id, _) = self
.services
.rooms
.alias
.resolve_with_servers(&room_id, None)
.await?;
assert!(
self.services.globals.user_is_local(&user_id),
"Parsed user_id must be a local user"
);
remote_leave_room(self.services, &user_id, &room_id, None)
.boxed()
.await?;
self.write_str(&format!("{user_id} has been joined to {room_id}.",))
.await
}

View File

@@ -103,6 +103,12 @@ pub enum UserCommand {
room_id: OwnedRoomOrAliasId,
},
/// - Manually leave a remote room for a local user.
ForceLeaveRemoteRoom {
user_id: String,
room_id: OwnedRoomOrAliasId,
},
/// - Forces the specified user to drop their power levels to the room
/// default, if their permissions allow and the auth check permits
ForceDemote {

View File

@@ -49,6 +49,9 @@ jemalloc_stats = [
"conduwuit-core/jemalloc_stats",
"conduwuit-service/jemalloc_stats",
]
ldap = [
"conduwuit-service/ldap"
]
release_max_log_level = [
"conduwuit-core/release_max_log_level",
"conduwuit-service/release_max_log_level",
@@ -90,6 +93,7 @@ serde.workspace = true
sha1.workspace = true
tokio.workspace = true
tracing.workspace = true
ctor.workspace = true
[lints]
workspace = true

View File

@@ -373,7 +373,7 @@ pub(crate) async fn register_route(
let password = if is_guest { None } else { body.password.as_deref() };
// Create user
services.users.create(&user_id, password)?;
services.users.create(&user_id, password, None).await?;
// Default to pretty displayname
let mut displayname = user_id.localpart().to_owned();
@@ -405,41 +405,36 @@ pub(crate) async fn register_route(
)
.await?;
if (!is_guest && body.inhibit_login)
// Generate new device id if the user didn't specify one
let no_device = body.inhibit_login
|| body
.appservice_info
.as_ref()
.is_some_and(|appservice| appservice.registration.device_management)
{
return Ok(register::v3::Response {
access_token: None,
user_id,
device_id: None,
refresh_token: None,
expires_in: None,
});
}
.is_some_and(|aps| aps.registration.device_management);
let (token, device) = if !no_device {
// Don't create a device for inhibited logins
let device_id = if is_guest { None } else { body.device_id.clone() }
.unwrap_or_else(|| utils::random_string(DEVICE_ID_LENGTH).into());
// Generate new device id if the user didn't specify one
let device_id = if is_guest { None } else { body.device_id.clone() }
.unwrap_or_else(|| utils::random_string(DEVICE_ID_LENGTH).into());
// Generate new token for the device
let new_token = utils::random_string(TOKEN_LENGTH);
// Generate new token for the device
let token = utils::random_string(TOKEN_LENGTH);
// Create device for this account
services
.users
.create_device(
&user_id,
&device_id,
&token,
body.initial_device_display_name.clone(),
Some(client.to_string()),
)
.await?;
debug_info!(%user_id, %device_id, "User account was created");
// Create device for this account
services
.users
.create_device(
&user_id,
&device_id,
&new_token,
body.initial_device_display_name.clone(),
Some(client.to_string()),
)
.await?;
debug_info!(%user_id, %device_id, "User account was created");
(Some(new_token), Some(device_id))
} else {
(None, None)
};
let device_display_name = body.initial_device_display_name.as_deref().unwrap_or("");
@@ -505,7 +500,7 @@ pub(crate) async fn register_route(
.await
.is_ok_and(is_equal_to!(1))
{
services.admin.make_user_admin(&user_id).await?;
services.admin.make_user_admin(&user_id).boxed().await?;
warn!("Granting {user_id} admin privileges as the first user");
} else if services.config.suspend_on_register {
// This is not an admin, suspend them.
@@ -583,9 +578,9 @@ pub(crate) async fn register_route(
}
Ok(register::v3::Response {
access_token: Some(token),
access_token: token,
user_id,
device_id: Some(device_id),
device_id: device,
refresh_token: None,
expires_in: None,
})
@@ -659,7 +654,8 @@ pub(crate) async fn change_password_route(
services
.users
.set_password(sender_user, Some(&body.new_password))?;
.set_password(sender_user, Some(&body.new_password))
.await?;
if body.logout_devices {
// Logout all devices except the current one
@@ -928,7 +924,7 @@ pub async fn full_user_deactivate(
.build_and_append_pdu(
PduBuilder::state(String::new(), &power_levels_content),
user_id,
room_id,
Some(room_id),
&state_lock,
)
.await

View File

@@ -0,0 +1,3 @@
mod suspend;
pub(crate) use self::suspend::*;

View File

@@ -0,0 +1,89 @@
use axum::extract::State;
use conduwuit::{Err, Result};
use futures::future::{join, join3};
use ruma::api::client::admin::{get_suspended, set_suspended};
use crate::Ruma;
/// # `GET /_matrix/client/v1/admin/suspend/{userId}`
///
/// Check the suspension status of a target user
pub(crate) async fn get_suspended_status(
State(services): State<crate::State>,
body: Ruma<get_suspended::v1::Request>,
) -> Result<get_suspended::v1::Response> {
let sender_user = body.sender_user();
let (admin, active) =
join(services.users.is_admin(sender_user), services.users.is_active(&body.user_id)).await;
if !admin {
return Err!(Request(Forbidden("Only server administrators can use this endpoint")));
}
if !services.globals.user_is_local(&body.user_id) {
return Err!(Request(InvalidParam("Can only check the suspended status of local users")));
}
if !active {
return Err!(Request(NotFound("Unknown user")));
}
Ok(get_suspended::v1::Response::new(
services.users.is_suspended(&body.user_id).await?,
))
}
/// # `PUT /_matrix/client/v1/admin/suspend/{userId}`
///
/// Set the suspension status of a target user
pub(crate) async fn put_suspended_status(
State(services): State<crate::State>,
body: Ruma<set_suspended::v1::Request>,
) -> Result<set_suspended::v1::Response> {
let sender_user = body.sender_user();
let (sender_admin, active, target_admin) = join3(
services.users.is_admin(sender_user),
services.users.is_active(&body.user_id),
services.users.is_admin(&body.user_id),
)
.await;
if !sender_admin {
return Err!(Request(Forbidden("Only server administrators can use this endpoint")));
}
if !services.globals.user_is_local(&body.user_id) {
return Err!(Request(InvalidParam("Can only set the suspended status of local users")));
}
if !active {
return Err!(Request(NotFound("Unknown user")));
}
if body.user_id == *sender_user {
return Err!(Request(Forbidden("You cannot suspend yourself")));
}
if target_admin {
return Err!(Request(Forbidden("You cannot suspend another server administrator")));
}
if services.users.is_suspended(&body.user_id).await? == body.suspended {
// No change
return Ok(set_suspended::v1::Response::new(body.suspended));
}
let action = if body.suspended {
services
.users
.suspend_account(&body.user_id, sender_user)
.await;
"suspended"
} else {
services.users.unsuspend_account(&body.user_id).await;
"unsuspended"
};
if services.config.admin_room_notices {
// Notify the admin room that an account has been un/suspended
services
.admin
.send_text(&format!("{} has been {} by {}.", body.user_id, action, sender_user))
.await;
}
Ok(set_suspended::v1::Response::new(body.suspended))
}

View File

@@ -19,7 +19,7 @@
/// of this server.
pub(crate) async fn get_capabilities_route(
State(services): State<crate::State>,
_body: Ruma<get_capabilities::v3::Request>,
body: Ruma<get_capabilities::v3::Request>,
) -> Result<get_capabilities::v3::Response> {
let available: BTreeMap<RoomVersionId, RoomVersionStability> =
Server::available_room_versions().collect();
@@ -45,5 +45,14 @@ pub(crate) async fn get_capabilities_route(
json!({"enabled": services.config.forget_forced_upon_leave}),
)?;
if services
.users
.is_admin(body.sender_user.as_ref().unwrap())
.await
{
// Advertise suspension API
capabilities.set("uk.timedout.msc4323", json!({"suspend":true, "lock": false}))?;
}
Ok(get_capabilities::v3::Response { capabilities })
}

View File

@@ -69,7 +69,7 @@ pub(crate) async fn get_context_route(
let (base_id, base_pdu, visible) = try_join3(base_id, base_pdu, visible).await?;
if base_pdu.room_id != *room_id || base_pdu.event_id != *event_id {
if base_pdu.room_id_or_hash() != *room_id || base_pdu.event_id != *event_id {
return Err!(Request(NotFound("Base event not found.")));
}
@@ -130,7 +130,7 @@ pub(crate) async fn get_context_route(
let state_at = events_after
.last()
.map(ref_at!(1))
.map_or(body.event_id.as_ref(), |pdu| pdu.event_id.as_ref());
.map_or_else(|| body.event_id.as_ref(), |pdu| pdu.event_id.as_ref());
let state_ids = services
.rooms

View File

@@ -64,10 +64,14 @@ pub(crate) async fn create_content_route(
media_id: &utils::random_string(MXC_LENGTH),
};
services
if let Err(e) = services
.media
.create(mxc, Some(user), Some(&content_disposition), content_type, &body.file)
.await?;
.await
{
err!("Failed to save uploaded media: {e}");
return Err!(Request(Unknown("Failed to save uploaded media")));
}
let blurhash = body.generate_blurhash.then(|| {
services

View File

@@ -49,7 +49,7 @@ pub(crate) async fn ban_user_route(
..current_member_content
}),
sender_user,
&body.room_id,
Some(&body.room_id),
&state_lock,
)
.await?;

View File

@@ -4,11 +4,14 @@
Err, Result, debug_error, err, info,
matrix::{event::gen_event_id_canonical_json, pdu::PduBuilder},
};
use futures::{FutureExt, join};
use futures::FutureExt;
use ruma::{
OwnedServerName, RoomId, UserId,
api::{client::membership::invite_user, federation::membership::create_invite},
events::room::member::{MembershipState, RoomMemberEventContent},
events::{
invite_permission_config::FilterLevel,
room::member::{MembershipState, RoomMemberEventContent},
},
};
use service::Services;
@@ -47,22 +50,21 @@ pub(crate) async fn invite_user_route(
.await?;
match &body.recipient {
| invite_user::v3::InvitationRecipient::UserId { user_id } => {
let sender_ignored_recipient = services.users.user_is_ignored(sender_user, user_id);
let recipient_ignored_by_sender =
services.users.user_is_ignored(user_id, sender_user);
| invite_user::v3::InvitationRecipient::UserId { user_id: recipient_user } => {
let sender_filter_level = services
.users
.invite_filter_level(recipient_user, sender_user)
.await;
let (sender_ignored_recipient, recipient_ignored_by_sender) =
join!(sender_ignored_recipient, recipient_ignored_by_sender);
if sender_ignored_recipient {
if !matches!(sender_filter_level, FilterLevel::Allow) {
// drop invites if the sender has the recipient filtered
return Ok(invite_user::v3::Response {});
}
if let Ok(target_user_membership) = services
.rooms
.state_accessor
.get_member(&body.room_id, user_id)
.get_member(&body.room_id, recipient_user)
.await
{
if target_user_membership.membership == MembershipState::Ban {
@@ -70,16 +72,27 @@ pub(crate) async fn invite_user_route(
}
}
if recipient_ignored_by_sender {
// silently drop the invite to the recipient if they've been ignored by the
// sender, pretend it worked
return Ok(invite_user::v3::Response {});
// check for blocked invites if the recipient is a local user.
if services.globals.user_is_local(recipient_user) {
let recipient_filter_level = services
.users
.invite_filter_level(sender_user, recipient_user)
.await;
// ignored invites aren't handled here
// since the recipient's membership should still be changed to `invite`.
// they're filtered out in the individual /sync handlers.
if matches!(recipient_filter_level, FilterLevel::Block) {
return Err!(Request(InviteBlocked(
"{recipient_user} has blocked invites from you."
)));
}
}
invite_helper(
&services,
sender_user,
user_id,
recipient_user,
&body.room_id,
body.reason.clone(),
false,
@@ -98,7 +111,7 @@ pub(crate) async fn invite_user_route(
pub(crate) async fn invite_helper(
services: &Services,
sender_user: &UserId,
user_id: &UserId,
recipient_user: &UserId,
room_id: &RoomId,
reason: Option<String>,
is_direct: bool,
@@ -111,12 +124,12 @@ pub(crate) async fn invite_helper(
return Err!(Request(Forbidden("Invites are not allowed on this server.")));
}
if !services.globals.user_is_local(user_id) {
if !services.globals.user_is_local(recipient_user) {
let (pdu, pdu_json, invite_room_state) = {
let state_lock = services.rooms.state.mutex.lock(room_id).await;
let content = RoomMemberEventContent {
avatar_url: services.users.avatar_url(user_id).await.ok(),
avatar_url: services.users.avatar_url(recipient_user).await.ok(),
is_direct: Some(is_direct),
reason,
..RoomMemberEventContent::new(MembershipState::Invite)
@@ -126,14 +139,14 @@ pub(crate) async fn invite_helper(
.rooms
.timeline
.create_hash_and_sign_event(
PduBuilder::state(user_id.to_string(), &content),
PduBuilder::state(recipient_user.to_string(), &content),
sender_user,
room_id,
Some(room_id),
&state_lock,
)
.await?;
let invite_room_state = services.rooms.state.summary_stripped(&pdu).await;
let invite_room_state = services.rooms.state.summary_stripped(&pdu, room_id).await;
drop(state_lock);
@@ -144,7 +157,7 @@ pub(crate) async fn invite_helper(
let response = services
.sending
.send_federation_request(user_id.server_name(), create_invite::v2::Request {
.send_federation_request(recipient_user.server_name(), create_invite::v2::Request {
room_id: room_id.to_owned(),
event_id: (*pdu.event_id).to_owned(),
room_version: room_version_id.clone(),
@@ -173,7 +186,7 @@ pub(crate) async fn invite_helper(
return Err!(Request(BadJson(warn!(
%pdu.event_id, %event_id,
"Server {} sent event with wrong event ID",
user_id.server_name()
recipient_user.server_name()
))));
}
@@ -213,9 +226,9 @@ pub(crate) async fn invite_helper(
let state_lock = services.rooms.state.mutex.lock(room_id).await;
let content = RoomMemberEventContent {
displayname: services.users.displayname(user_id).await.ok(),
avatar_url: services.users.avatar_url(user_id).await.ok(),
blurhash: services.users.blurhash(user_id).await.ok(),
displayname: services.users.displayname(recipient_user).await.ok(),
avatar_url: services.users.avatar_url(recipient_user).await.ok(),
blurhash: services.users.blurhash(recipient_user).await.ok(),
is_direct: Some(is_direct),
reason,
..RoomMemberEventContent::new(MembershipState::Invite)
@@ -225,9 +238,9 @@ pub(crate) async fn invite_helper(
.rooms
.timeline
.build_and_append_pdu(
PduBuilder::state(user_id.to_string(), &content),
PduBuilder::state(recipient_user.to_string(), &content),
sender_user,
room_id,
Some(room_id),
&state_lock,
)
.await?;

View File

@@ -18,7 +18,7 @@
},
warn,
};
use futures::{FutureExt, StreamExt};
use futures::{FutureExt, StreamExt, TryFutureExt};
use ruma::{
CanonicalJsonObject, CanonicalJsonValue, OwnedRoomId, OwnedServerName, OwnedUserId, RoomId,
RoomVersionId, UserId,
@@ -156,31 +156,34 @@ pub(crate) async fn join_room_by_id_or_alias_route(
.await?;
let mut servers = body.via.clone();
servers.extend(
services
.rooms
.state_cache
.servers_invite_via(&room_id)
.map(ToOwned::to_owned)
.collect::<Vec<_>>()
.await,
);
if servers.is_empty() {
debug!("No via servers provided for join, injecting some.");
servers.extend(
services
.rooms
.state_cache
.servers_invite_via(&room_id)
.map(ToOwned::to_owned)
.collect::<Vec<_>>()
.await,
);
servers.extend(
services
.rooms
.state_cache
.invite_state(sender_user, &room_id)
.await
.unwrap_or_default()
.iter()
.filter_map(|event| event.get_field("sender").ok().flatten())
.filter_map(|sender: &str| UserId::parse(sender).ok())
.map(|user| user.server_name().to_owned()),
);
servers.extend(
services
.rooms
.state_cache
.invite_state(sender_user, &room_id)
.await
.unwrap_or_default()
.iter()
.filter_map(|event| event.get_field("sender").ok().flatten())
.filter_map(|sender: &str| UserId::parse(sender).ok())
.map(|user| user.server_name().to_owned()),
);
if let Some(server) = room_id.server_name() {
servers.push(server.to_owned());
if let Some(server) = room_id.server_name() {
servers.push(server.to_owned());
}
}
servers.sort_unstable();
@@ -310,11 +313,14 @@ pub async fn join_room_by_id_helper(
}
}
let local_join = server_in_room
|| servers.is_empty()
|| (servers.len() == 1 && services.globals.server_is_ours(&servers[0]));
if !server_in_room && servers.is_empty() {
return Err!(Request(NotFound(
"No servers were provided to assist in joining the room remotely, and we are not \
already participating in the room."
)));
}
if local_join {
if server_in_room {
join_room_by_id_helper_local(
services,
sender_user,
@@ -553,6 +559,10 @@ async fn join_room_by_id_helper_remote(
services
.server_keys
.validate_and_add_event_id_no_fetch(pdu, &room_version_id)
.inspect_err(|e| {
debug_warn!("Could not validate send_join response room_state event: {e:?}");
})
.inspect(|_| debug!("Completed validating send_join response room_state event"))
})
.ready_filter_map(Result::ok)
.fold(HashMap::new(), |mut state, (event_id, value)| async move {
@@ -563,7 +573,6 @@ async fn join_room_by_id_helper_remote(
return state;
},
};
services.rooms.outlier.add_pdu_outlier(&event_id, &value);
if let Some(state_key) = &pdu.state_key {
let shortstatekey = services
@@ -574,7 +583,6 @@ async fn join_room_by_id_helper_remote(
state.insert(shortstatekey, pdu.event_id.clone());
}
state
})
.await;
@@ -595,6 +603,7 @@ async fn join_room_by_id_helper_remote(
})
.ready_filter_map(Result::ok)
.ready_for_each(|(event_id, value)| {
trace!(%event_id, "Adding PDU as an outlier from send_join auth_chain");
services.rooms.outlier.add_pdu_outlier(&event_id, &value);
})
.await;
@@ -615,6 +624,9 @@ async fn join_room_by_id_helper_remote(
&parsed_join_pdu,
None, // TODO: third party invite
|k, s| state_fetch(k.clone(), s.into()),
&state_fetch(StateEventType::RoomCreate, "".into())
.await
.expect("create event is missing from send_join auth"),
)
.await
.map_err(|e| err!(Request(Forbidden(warn!("Auth check failed: {e:?}")))))?;
@@ -649,7 +661,7 @@ async fn join_room_by_id_helper_remote(
.force_state(room_id, statehash_before_join, added, removed, &state_lock)
.await?;
info!("Updating joined counts for new room");
debug!("Updating joined counts for new room");
services
.rooms
.state_cache
@@ -662,7 +674,7 @@ async fn join_room_by_id_helper_remote(
let statehash_after_join = services
.rooms
.state
.append_to_state(&parsed_join_pdu)
.append_to_state(&parsed_join_pdu, room_id)
.await?;
info!("Appending new room join event");
@@ -674,6 +686,7 @@ async fn join_room_by_id_helper_remote(
join_event,
once(parsed_join_pdu.event_id.borrow()),
&state_lock,
room_id,
)
.await?;
@@ -729,6 +742,7 @@ async fn join_room_by_id_helper_local(
.iter()
.stream()
.any(|restriction_room_id| {
trace!("Checking if {sender_user} is joined to {restriction_room_id}");
services
.rooms
.state_cache
@@ -741,6 +755,7 @@ async fn join_room_by_id_helper_local(
.state_cache
.local_users_in_room(room_id)
.filter(|user| {
trace!("Checking if {user} can invite {sender_user} to {room_id}");
services.rooms.state_accessor.user_can_invite(
room_id,
user,
@@ -753,6 +768,7 @@ async fn join_room_by_id_helper_local(
.await
.map(ToOwned::to_owned)
} else {
trace!("No restriction rooms are joined by {sender_user}");
None
}
};
@@ -773,7 +789,7 @@ async fn join_room_by_id_helper_local(
.build_and_append_pdu(
PduBuilder::state(sender_user.to_string(), &content),
sender_user,
room_id,
Some(room_id),
&state_lock,
)
.await

View File

@@ -54,7 +54,7 @@ pub(crate) async fn kick_user_route(
..event
}),
sender_user,
&body.room_id,
Some(&body.room_id),
&state_lock,
)
.await?;

View File

@@ -373,7 +373,7 @@ async fn knock_room_helper_local(
.build_and_append_pdu(
PduBuilder::state(sender_user.to_string(), &content),
sender_user,
room_id,
Some(room_id),
&state_lock,
)
.await
@@ -502,6 +502,7 @@ async fn knock_room_helper_local(
knock_event,
once(parsed_knock_pdu.event_id.borrow()),
&state_lock,
room_id,
)
.await?;
@@ -672,7 +673,7 @@ async fn knock_room_helper_remote(
let statehash_after_knock = services
.rooms
.state
.append_to_state(&parsed_knock_pdu)
.append_to_state(&parsed_knock_pdu, room_id)
.await?;
info!("Updating membership locally to knock state with provided stripped state events");
@@ -701,6 +702,7 @@ async fn knock_room_helper_remote(
knock_event,
once(parsed_knock_pdu.event_id.borrow()),
&state_lock,
room_id,
)
.await?;

View File

@@ -206,7 +206,7 @@ pub async fn leave_room(
..event
}),
user_id,
room_id,
Some(room_id),
&state_lock,
)
.await?;
@@ -215,7 +215,7 @@ pub async fn leave_room(
Ok(())
}
async fn remote_leave_room(
pub async fn remote_leave_room(
services: &Services,
user_id: &UserId,
room_id: &RoomId,

View File

@@ -29,7 +29,7 @@
};
pub use self::{
join::join_room_by_id_helper,
leave::{leave_all_rooms, leave_room},
leave::{leave_all_rooms, leave_room, remote_leave_room},
};
use crate::{Ruma, client::full_user_deactivate};
@@ -69,11 +69,11 @@ pub(crate) async fn banned_room_check(
}
if let Some(room_id) = room_id {
if services.rooms.metadata.is_banned(room_id).await
|| services
.moderation
.is_remote_server_forbidden(room_id.server_name().expect("legacy room mxid"))
{
let room_banned = services.rooms.metadata.is_banned(room_id).await;
let server_banned = room_id.server_name().is_some_and(|server_name| {
services.moderation.is_remote_server_forbidden(server_name)
});
if room_banned || server_banned {
warn!(
"User {user_id} who is not an admin attempted to send an invite for or \
attempted to join a banned room or banned room server name: {room_id}"
@@ -106,7 +106,6 @@ pub(crate) async fn banned_room_check(
.boxed()
.await?;
}
return Err!(Request(Forbidden("This room is banned on this homeserver.")));
}
} else if let Some(server_name) = server_name {

View File

@@ -47,7 +47,7 @@ pub(crate) async fn unban_user_route(
..current_member_content
}),
sender_user,
&body.room_id,
Some(&body.room_id),
&state_lock,
)
.await?;

View File

@@ -8,7 +8,7 @@
ref_at,
utils::{
IterStream, ReadyExt,
result::{FlatOk, LogErr},
result::LogErr,
stream::{BroadbandExt, TryIgnore, WidebandExt},
},
};
@@ -30,6 +30,7 @@
events::{
AnyStateEvent, StateEventType,
TimelineEventType::{self, *},
invite_permission_config::FilterLevel,
},
serde::Raw,
};
@@ -91,7 +92,7 @@ pub(crate) async fn get_message_events_route(
| Direction::Backward => PduCount::max(),
});
let to: Option<PduCount> = body.to.as_deref().map(str::parse).flat_ok();
let to: Option<PduCount> = body.to.as_deref().map(str::parse).transpose()?;
let limit: usize = body
.limit
@@ -181,7 +182,7 @@ pub(crate) async fn get_message_events_route(
Ok(get_message_events::v3::Response {
start: from.to_string(),
end: next_token.as_ref().map(ToString::to_string),
end: next_token.as_ref().map(PduCount::to_string),
chunk,
state,
})
@@ -267,7 +268,7 @@ pub(crate) async fn ignored_filter(
pub(crate) async fn is_ignored_pdu<Pdu>(
services: &Services,
event: &Pdu,
user_id: &UserId,
recipient_user: &UserId,
) -> bool
where
Pdu: Event + Send + Sync,
@@ -278,20 +279,29 @@ pub(crate) async fn is_ignored_pdu<Pdu>(
return true;
}
let ignored_type = IGNORED_MESSAGE_TYPES.binary_search(event.kind()).is_ok();
let ignored_server = services
let sender_user = event.sender();
let type_ignored = IGNORED_MESSAGE_TYPES.binary_search(event.kind()).is_ok();
let server_ignored = services
.moderation
.is_remote_server_ignored(event.sender().server_name());
.is_remote_server_ignored(sender_user.server_name());
let user_ignored = services
.users
.user_is_ignored(sender_user, recipient_user)
.await;
if ignored_type
&& (ignored_server
|| (!services.config.send_messages_from_ignored_users_to_client
&& services
.users
.user_is_ignored(event.sender(), user_id)
.await))
{
if !type_ignored {
// We cannot safely ignore this type
return false;
}
if server_ignored {
// the sender's server is ignored, so ignore this event
return true;
}
if user_ignored && !services.config.send_messages_from_ignored_users_to_client {
// the recipient of this PDU has the sender ignored, and we're not
// configured to send ignored messages to clients
return true;
}
@@ -309,7 +319,7 @@ pub(crate) async fn visibility_filter(
services
.rooms
.state_accessor
.user_can_see_event(user_id, pdu.room_id(), pdu.event_id())
.user_can_see_event(user_id, &pdu.room_id_or_hash(), pdu.event_id())
.await
.then_some(item)
}
@@ -320,7 +330,30 @@ pub(crate) fn event_filter(item: PdusIterItem, filter: &RoomEventFilter) -> Opti
filter.matches(pdu).then_some(item)
}
#[cfg_attr(debug_assertions, conduwuit::ctor)]
#[inline]
pub(crate) async fn is_ignored_invite(
services: &Services,
recipient_user: &UserId,
room_id: &RoomId,
) -> bool {
let Ok(sender_user) = services
.rooms
.state_cache
.invite_sender(recipient_user, room_id)
.await
else {
// the invite may have been sent before the invite_sender table existed.
// assume it's not ignored
return false;
};
services
.users
.invite_filter_level(&sender_user, recipient_user)
.await == FilterLevel::Ignore
}
#[cfg_attr(debug_assertions, ctor::ctor)]
fn _is_sorted() {
debug_assert!(
IGNORED_MESSAGE_TYPES.is_sorted(),

Some files were not shown because too many files have changed in this diff Show More