Compare commits

..

84 Commits

Author SHA1 Message Date
Jade Ellis
f2e588b294 fix: Stop forcing the appservice token into the URL 2026-01-05 18:14:16 +00:00
Jade Ellis
7ec81c25f1 fix: Use correct token handlers for Ruma 2026-01-05 18:12:20 +00:00
Jade Ellis
61f61826bb fix: Apply timeouts in more places 2026-01-05 18:09:18 +00:00
timedout
27d6604d14 fix: Use a timeout instead of deadline 2026-01-03 17:08:47 +00:00
timedout
1c7bd2f6fa style: Remove unnecessary then() calls in chain 2026-01-03 16:22:49 +00:00
timedout
56d7099011 style: Include errors in key claim response too 2026-01-03 16:10:06 +00:00
timedout
bc426e1bfc fix: Apply client-requested timeout to federated key queries
Also parallelised federation calls in related functions
2026-01-03 16:05:05 +00:00
timedout
6c61b3ec5b fix: Build error two: electric boogaloo 2025-12-31 21:15:28 +00:00
timedout
9d9d1170b6 fix: Build error 2025-12-31 21:04:06 +00:00
Jade Ellis
7be20abcad style: Fix typo 2025-12-31 20:08:53 +00:00
Jade Ellis
078275964c chore: Update precommit hooks 2025-12-31 20:08:53 +00:00
timedout
bf200ad12d fix: Resolve compile errors
me and cargo check are oops now
2025-12-31 20:01:29 +00:00
timedout
41e628892d chore: Add news fragment 2025-12-31 20:01:29 +00:00
timedout
44851ee6a2 feat: Fall back to remote room summary if local fails 2025-12-31 20:01:29 +00:00
timedout
a7e6e6e83f feat: Allow local server admins to bypass summary visibility checks
feat: Allow local server admins to bypass summary visibility checks

Also improve error messages so they aren't so damn long.
2025-12-31 20:01:29 +00:00
Ginger
8a561fcd3a chore: Clippy fixes 2025-12-31 19:56:35 +00:00
Ginger
25c305f473 chore: Fix comment formatting 2025-12-31 19:56:35 +00:00
Ginger
c900350164 chore: Add news fragment 2025-12-31 19:56:35 +00:00
Ginger
c565e6ffbc feat: Restrict where certain admin commands may be used 2025-12-31 19:56:31 +00:00
Jade Ellis
442f887c98 style: Improve warning regarding admin removal 2025-12-31 19:40:42 +00:00
Terry
03220845e5 docs: Changelog 2025-12-31 19:35:53 +00:00
Terry
f8c1e9bcde feat: Config defined admin list
Closes !1246
2025-12-31 19:35:40 +00:00
Ginger
21324b748f feat: Enable console feature by default 2025-12-31 19:12:25 +00:00
Jade Ellis
b7bf36443b docs: Fix typo 2025-12-31 19:03:22 +00:00
ginger
d72192aa32 fix(ci): Stop using nightly to build Debian packages 2025-12-30 14:23:31 -05:00
Jade Ellis
38ecc41780 chore: Release 2025-12-30 17:45:32 +00:00
Jade Ellis
7ae958bb03 docs: Announcement 2025-12-30 17:35:20 +00:00
Jade Ellis
f676fa53f1 chore: Specify the tag body template 2025-12-30 17:34:44 +00:00
Jade Ellis
978bdc6466 docs: Changelog 2025-12-30 17:34:44 +00:00
timedout
7c741e62cf fix: Forbid creators in power levels 2025-12-30 17:34:43 +00:00
Olivia Lee
12aecf8091 validate membership events returned by remote servers
This fixes a vulnerability where an attacker with a malicious remote
server and a user on the local server can trick the local server into
signing arbitrary events. The attacker issue a remote leave as the local
user to a room on the malicious server. Without any validation of the
make_leave response, the local server would sign the attacker-controlled
event and pass it back to the malicious server with send_leave.

The join and knock endpoints are also fixed in this commit, but are less
useful for exploitation because the local server replaces the "content"
field returned by the remote server. Remote invites are unaffected
because we already check that the event returned from /invite has the
same event ID as the event passed to it.

Co-authored-by: timedout <git@nexy7574.co.uk>
Co-authored-by: Jade Ellis <jade@ellis.link>
Co-authored-by: Ginger <ginger@gingershaped.computer>
2025-12-30 15:24:45 +00:00
Renovate Bot
19372f0b15 chore(deps): update dependency cargo-bins/cargo-binstall to v1.16.6 2025-12-29 23:52:04 +00:00
Jade Ellis
a66b90cb3d ci: Explicitly auto tag latest 2025-12-29 23:45:02 +00:00
Jade Ellis
7234ce6cbe ci: Don't force tag all versions as latest 2025-12-29 23:45:02 +00:00
Jade Ellis
beb0c2ad9a fix(ci): Don't double append latest tag suffix 2025-12-29 23:45:02 +00:00
Jade Ellis
39aaf95d09 docs: Changelog 2025-12-29 23:33:12 +00:00
Jade Ellis
5e0edd5a1c feat: Allow configuring the OTLP protocol 2025-12-29 23:33:12 +00:00
Jade Ellis
d180f5a759 feat: Split otlp exporter into a new, enabled-by-default feature 2025-12-29 23:33:12 +00:00
Jade Ellis
f163264a82 docs: Update example domains 2025-12-29 23:33:12 +00:00
timedout
5e7bc590d2 chore: Apply suggestions 2025-12-29 23:30:49 +00:00
timedout
08df35946b fix: File -> line 2025-12-29 23:30:49 +00:00
timedout
c4ebf289fa fix: Dead link to code style doc 2025-12-29 23:30:49 +00:00
timedout
1fc6010f9a fix: Issue title -> pull request title 2025-12-29 23:30:49 +00:00
timedout
1d91331275 fix: Stray whitespace 2025-12-29 23:30:49 +00:00
timedout
77e62ad772 feat: Add pull request template 2025-12-29 23:30:49 +00:00
timedout
696a1e6a4d docs: Add information on writing changelog fragments 2025-12-28 00:59:31 +00:00
timedout
f41bbd7361 feat(meta): Set up towncrier 2025-12-28 00:53:44 +00:00
timedout
7350266c80 fix: Don't allow admin room upgrades and fix power levels during upgrade 2025-12-27 04:05:26 +00:00
Julian Anderson
322c0900c6 docs: handle traefik >=3.6.3 "encoded characters" 2025-12-24 22:40:50 -05:00
timedout
1237e60aaf Revert "feat(ci): Allow running manual workflows against specific commits"
This reverts commit 9b4845bf8d.
2025-12-22 13:45:45 +00:00
timedout
9b4845bf8d feat(ci): Allow running manual workflows against specific commits 2025-12-22 13:29:40 +00:00
aviac
fb5b515f96 chore: update flake lock 2025-12-22 04:11:41 +00:00
Jade Ellis
e6336d694a chore: Fix escape 2025-12-22 02:42:21 +00:00
Jade Ellis
b7841280d9 chore: Security announcement 2025-12-22 02:36:31 +00:00
Jade Ellis
f4ccb81913 chore: Release 2025-12-22 00:23:20 +00:00
Jade Ellis
710cdfeadb chore: Update mailmap 2025-12-21 20:34:11 +00:00
Jade Ellis
666849ea87 chore(ci): Unify artifact versions 2025-12-21 19:11:12 +00:00
Jade Ellis
71094803f1 fix(ci): Try use path that exists 2025-12-21 18:50:48 +00:00
Jade Ellis
bf91ce5c7f feat: Mark v12 as stable 2025-12-21 17:15:16 +00:00
Jade Ellis
8fd15f26ce style: Fix clippy 2025-12-21 17:12:36 +00:00
Jade Ellis
705fa6c5c6 fix: Simplify visibility check code 2025-12-21 17:12:36 +00:00
Jade Ellis
6f67c27538 fix: Ensure that room ID is present on state events sent to client
routes

Mostly fixes !1094

The remaining issue is federation routes
2025-12-21 17:12:35 +00:00
Jade Ellis
8586d747d1 feat: Run visibility checks on bundled relations 2025-12-21 17:12:35 +00:00
Jade Ellis
11012a9ce1 fix: Always return the same 404 message in context 2025-12-21 17:12:35 +00:00
Jade Ellis
07be190507 fix: Return 404 when event is not accessible 2025-12-21 17:12:35 +00:00
Jade Ellis
ae4acc9568 fix: Don't incorrectly add thread root to relation response 2025-12-21 17:12:35 +00:00
Jade Ellis
f83ddecd8c refactor(perf): Push down visibility check after limit 2025-12-21 17:12:34 +00:00
Jade Ellis
dd87232f1f refactor: Reduce database lookups in some cases 2025-12-21 17:12:34 +00:00
Jade Ellis
8e33f9a7d0 refactor: Improve code style for bundled aggregations 2025-12-21 17:12:34 +00:00
Jade Ellis
8d3e4eba99 fix: Add aggregations to the search endpoint 2025-12-21 17:12:34 +00:00
Jade Ellis
96bfdb97da fix: Filter out invalid replacements from bundled aggregations 2025-12-21 17:12:34 +00:00
Jade Ellis
b61010da47 feat: Add bundled aggregations support
Add support for the m.replace and m.reference bundled
aggregations.
This should fix plenty of subtle client issues.
Threads are not included in the new code as they have
historically been written to the database. Replacing the
old system would result in issues when switching away from
continuwuity, so saved for later.
Some TODOs have been left re event visibility and ignored users.
These should be OK for now, though.
2025-12-21 17:12:34 +00:00
Jade Ellis
987c5eeb03 refactor: Promote handling unsigned data out of timeline
Also fixes:
- Transaction IDs leaking in event route
- Age not being set for event relations or threads
- Both of the above for search results

Notes down concern with relations table
2025-12-21 17:12:33 +00:00
timedout
7fa4fa9862 fix: Also check sender origin 2025-12-21 10:58:50 +00:00
timedout
b2bead67ac fix: Apply additional validation to invites 2025-12-21 10:10:54 +00:00
timedout
48a6a475ce fix: Omit children with invalid state from space summary 2025-12-18 19:48:58 +00:00
timedout
86450da705 style: Run clippy 2025-12-18 19:48:26 +00:00
timedout
8538b21860 feat: Check for incoming signatures 2025-12-18 19:03:32 +00:00
timedout
63e4aacd2b style: Reword TODO comment 2025-12-18 18:24:00 +00:00
timedout
72f0eb9493 feat: Fetch policy server signatures 2025-12-18 18:23:54 +00:00
Odd E. Ebbesen
867d0ab671 fix(reload): Store paths to config files for admin reload
Paths given via --config at startup are now stored inside the config
struct at runtime, to make it possible to reload config without setting
an env var for the config file location.
2025-12-16 14:58:33 +00:00
Ginger
64e187e5b4 fix: Update comment in src/core/config/mod.rs 2025-12-16 14:19:43 +00:00
aviac
5dc449a87a test: add test for config with default_room_version
This commit refactors the test a bit to run the basic test script with
different configs. Currently we have two configs we test:

- the bare minimum to make it run (base)
- base + default_room_version set to "12"
2025-12-16 14:19:43 +00:00
aviac
f5fda01013 docs: Add note about the type of the default_room_version option 2025-12-16 14:19:43 +00:00
99 changed files with 2410 additions and 719 deletions

View File

@@ -32,11 +32,13 @@ outputs:
runs:
using: composite
steps:
- run: mkdir -p digests
shell: bash
- name: Download digests
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
uses: forgejo/download-artifact@v4
with:
path: /tmp/digests
path: digests
pattern: ${{ inputs.digest_pattern }}
merge-multiple: true
@@ -62,6 +64,7 @@ runs:
uses: docker/metadata-action@v5
with:
flavor: |
latest=auto
suffix=${{ inputs.tag_suffix }},onlatest=true
tags: |
type=semver,pattern={{version}},prefix=v
@@ -70,7 +73,6 @@ runs:
type=ref,event=branch,prefix=${{ format('refs/heads/{0}', github.event.repository.default_branch) != github.ref && 'branch-' || '' }},
type=ref,event=pr
type=sha,format=short
type=raw,value=latest${{ inputs.tag_suffix }},enable=${{ startsWith(github.ref, 'refs/tags/v') }},priority=1100
images: ${{ inputs.images }}
# default labels & annotations: https://github.com/docker/metadata-action/blob/master/src/meta.ts#L509
env:
@@ -78,7 +80,7 @@ runs:
- name: Create manifest list and push
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
working-directory: /tmp/digests
working-directory: digests
shell: bash
env:
IMAGES: ${{ inputs.images }}

View File

@@ -54,7 +54,7 @@ runs:
run: mv /tmp/binaries/sbin/conduwuit /tmp/binaries/conduwuit${{ inputs.cpu_suffix }}-${{ inputs.slug }}${{ inputs.artifact_suffix }}
- name: Upload binary artifact
uses: forgejo/upload-artifact@v3
uses: forgejo/upload-artifact@v4
with:
name: conduwuit${{ inputs.cpu_suffix }}-${{ inputs.slug }}${{ inputs.artifact_suffix }}
path: /tmp/binaries/conduwuit${{ inputs.cpu_suffix }}-${{ inputs.slug }}${{ inputs.artifact_suffix }}
@@ -62,7 +62,7 @@ runs:
- name: Upload digest
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
uses: forgejo/upload-artifact@v3
uses: forgejo/upload-artifact@v4
with:
name: digests${{ inputs.digest_suffix }}-${{ inputs.slug }}${{ inputs.cpu_suffix }}
path: /tmp/digests/*

View File

@@ -0,0 +1,82 @@
---
name: 'New pull request'
about: 'Open a new pull request to contribute to continuwuity'
ref: 'main'
---
<!--
In order to help reviewers know what your pull request does at a glance, you should ensure that
1. Your PR title is a short, single sentence describing what you changed
2. You have described in more detail what you have changed, why you have changed it, what the
intended effect is, and why you think this will be beneficial to the project.
If you have made any potentially strange/questionable design choices, but didn't feel they'd benefit
from code comments, please don't mention them here - after opening your pull request,
go to "files changed", and click on the "+" symbol in the line number gutter,
and attach comments to the lines that you think would benefit from some clarification.
-->
This pull request...
<!-- Example:
This pull request allows us to warp through time and space ten times faster than before by
double-inverting the warp drive with hyperheated jump fluid, both making the drive faster and more
efficient. This resolves the common issue where we have to wait more than 10 milliseconds to
engage, use, and disengage the warp drive when travelling between galaxies.
-->
<!-- Closes: #... -->
<!-- Fixes: #... -->
<!-- Uncomment the above line(s) if your pull request fixes an issue or closes another pull request
by superseding it. Replace `#...` with the issue/pr number, such as `#123`. -->
**Pull request checklist:**
<!-- You need to complete these before your PR can be considered.
If you aren't sure about some, feel free to ask for clarification in #dev:continuwuity.org. -->
- [ ] This pull request targets the `main` branch, and the branch is named something other than
`main`.
- [ ] I have written an appropriate pull request title and my description is clear.
- [ ] I understand I am responsible for the contents of this pull request.
- I have followed the [contributing guidelines][c1]:
- [ ] My contribution follows the [code style][c2], if applicable.
- [ ] I ran [pre-commit checks][c1pc] before opening/drafting this pull request.
- [ ] I have [tested my contribution][c1t] (or proof-read it for documentation-only changes)
myself, if applicable. This includes ensuring code compiles.
- [ ] My commit messages follow the [commit message format][c1cm] and are descriptive.
- [ ] I have written a [news fragment][n1] for this PR, if applicable<!--(can be done after hitting open!)-->.
<!--
Notes on these requirements:
- While not required, we encourage you to sign your commits with GPG or SSH to attest the
authenticity of your changes.
- While we allow LLM-assisted contributions, we do not appreciate contributions that are
low quality, which is typical of machine-generated contributions that have not had a lot of love
and care from a human. Please do not open a PR if all you have done is asked ChatGPT to tidy up
the codebase with a +-100,000 diff.
- In the case of code style violations, reviewers may leave review comments/change requests
indicating what the ideal change would look like. For example, a reviewer may suggest you lower
a log level, or use `match` instead of `if/else` etc.
- In the case of code style violations, pre-commit check failures, minor things like typos/spelling
errors, and in some cases commit format violations, reviewers may modify your branch directly,
typically by making changes and adding a commit. Particularly in the latter case, a reviewer may
rebase your commits to squash "spammy" ones (like "fix", "fix", "actually fix"), and reword
commit messages that don't satisfy the format.
- Pull requests MUST pass the `Checks` CI workflows to be capable of being merged. This can only be
bypassed in exceptional circumstances.
If your CI flakes, let us know in matrix:r/dev:continuwuity.org.
- Pull requests have to be based on the latest `main` commit before being merged. If the main branch
changes while you're making your changes, you should make sure you rebase on main before
opening a PR. Your branch will be rebased on main before it is merged if it has fallen behind.
- We typically only do fast-forward merges, so your entire commit log will be included. Once in
main, it's difficult to get out cleanly, so put on your best dress, smile for the cameras!
-->
[c1]: https://forgejo.ellis.link/continuwuation/continuwuity/src/branch/main/CONTRIBUTING.md
[c2]: https://forgejo.ellis.link/continuwuation/continuwuity/src/branch/main/docs/development/code_style.mdx
[c1pc]: https://forgejo.ellis.link/continuwuation/continuwuity/src/branch/main/CONTRIBUTING.md#pre-commit-checks
[c1t]: https://forgejo.ellis.link/continuwuation/continuwuity/src/branch/main/CONTRIBUTING.md#running-tests-locally
[c1cm]: https://forgejo.ellis.link/continuwuation/continuwuity/src/branch/main/CONTRIBUTING.md#commit-messages
[n1]: https://towncrier.readthedocs.io/en/stable/tutorial.html#creating-news-fragments

View File

@@ -59,10 +59,9 @@ jobs:
# Aggressive GC since cache restores don't increment counter
echo "CARGO_INCREMENTAL_GC_TRIGGER=5" >> $GITHUB_ENV
- name: Setup Rust nightly
- name: Setup Rust
uses: ./.forgejo/actions/setup-rust
with:
rust-version: nightly
github-token: ${{ secrets.GH_PUBLIC_RO }}
- name: Get package version and component
@@ -127,7 +126,7 @@ jobs:
[ -f /etc/conduwuit/conduwuit.toml ] && echo "✅ Config file installed"
- name: Upload deb artifact
uses: actions/upload-artifact@v3
uses: forgejo/upload-artifact@v4
with:
name: continuwuity-${{ steps.debian-version.outputs.distribution }}
path: ${{ steps.cargo-deb.outputs.path }}

View File

@@ -239,13 +239,13 @@ jobs:
cp $BIN_RPM upload-bin/
- name: Upload binary RPM
uses: actions/upload-artifact@v3
uses: forgejo/upload-artifact@v4
with:
name: continuwuity
path: upload-bin/
- name: Upload debug RPM artifact
uses: actions/upload-artifact@v3
uses: forgejo/upload-artifact@v4
with:
name: continuwuity-debug
path: artifacts/*debuginfo*.rpm

View File

@@ -109,7 +109,7 @@ jobs:
cat ./element-web/webapp/config.json
- name: 📤 Upload Artifact
uses: forgejo/upload-artifact@v3
uses: forgejo/upload-artifact@v4
with:
name: element-web
path: ./element-web/webapp/

View File

@@ -2,6 +2,7 @@ AlexPewMaster <git@alex.unbox.at> <68469103+AlexPewMaster@users.noreply.github.c
Daniel Wiesenberg <weasy@hotmail.de> <weasy666@gmail.com>
Devin Ragotzy <devin.ragotzy@gmail.com> <d6ragotzy@wmich.edu>
Devin Ragotzy <devin.ragotzy@gmail.com> <dragotzy7460@mail.kvcc.edu>
Ginger <ginger@gingershaped.computer> <75683114+gingershaped@users.noreply.github.com>
Jonas Platte <jplatte+git@posteo.de> <jplatte+gitlab@posteo.de>
Jonas Zohren <git-pbkyr@jzohren.de> <gitlab-jfowl-0ux98@sh14.de>
Jonathan de Jong <jonathan@automatia.nl> <jonathandejong02@gmail.com>
@@ -12,5 +13,6 @@ Olivia Lee <olivia@computer.surgery> <benjamin@computer.surgery>
Rudi Floren <rudi.floren@gmail.com> <rudi.floren@googlemail.com>
Tamara Schmitz <tamara.zoe.schmitz@posteo.de> <15906939+tamara-schmitz@users.noreply.github.com>
Timo Kösters <timo@koesters.xyz>
nexy7574 <git@nexy7574.co.uk> <nex@noreply.forgejo.ellis.link>
nexy7574 <git@nexy7574.co.uk> <nex@noreply.localhost>
x4u <xi.zhu@protonmail.ch> <14617923-x4u@users.noreply.gitlab.com>
Ginger <ginger@gingershaped.computer> <75683114+gingershaped@users.noreply.github.com>

View File

@@ -23,7 +23,7 @@ repos:
- id: check-added-large-files
- repo: https://github.com/crate-ci/typos
rev: v1.40.0
rev: v1.41.0
hooks:
- id: typos
- id: typos
@@ -31,7 +31,7 @@ repos:
stages: [commit-msg]
- repo: https://github.com/crate-ci/committed
rev: v1.1.8
rev: v1.1.9
hooks:
- id: committed

View File

@@ -24,3 +24,4 @@ extend-ignore-re = [
"continuwuity" = "continuwuity"
"continuwity" = "continuwuity"
"execuse" = "execuse"
"oltp" = "OTLP"

12
CHANGELOG.md Normal file
View File

@@ -0,0 +1,12 @@
# Continuwuity 0.5.0 (2025-12-30)
**This release contains a CRITICAL vulnerability patch, and you must update as soon as possible**
## Features
- Enabled the OTLP exporter in default builds, and allow configuring the exporter protocol. (@Jade). (#1251)
## Bug Fixes
- Don't allow admin room upgrades, as this can break the admin room (@timedout) (#1245)
- Fix invalid creators in power levels during upgrade to v12 (@timedout) (#1245)

48
Cargo.lock generated
View File

@@ -940,7 +940,7 @@ dependencies = [
[[package]]
name = "conduwuit"
version = "0.5.0-rc.8.1"
version = "0.5.1"
dependencies = [
"clap",
"conduwuit_admin",
@@ -972,7 +972,7 @@ dependencies = [
[[package]]
name = "conduwuit_admin"
version = "0.5.0-rc.8.1"
version = "0.5.1"
dependencies = [
"clap",
"conduwuit_api",
@@ -994,7 +994,7 @@ dependencies = [
[[package]]
name = "conduwuit_api"
version = "0.5.0-rc.8.1"
version = "0.5.1"
dependencies = [
"async-trait",
"axum 0.7.9",
@@ -1027,14 +1027,14 @@ dependencies = [
[[package]]
name = "conduwuit_build_metadata"
version = "0.5.0-rc.8.1"
version = "0.5.1"
dependencies = [
"built",
]
[[package]]
name = "conduwuit_core"
version = "0.5.0-rc.8.1"
version = "0.5.1"
dependencies = [
"argon2",
"arrayvec",
@@ -1095,7 +1095,7 @@ dependencies = [
[[package]]
name = "conduwuit_database"
version = "0.5.0-rc.8.1"
version = "0.5.1"
dependencies = [
"async-channel",
"conduwuit_core",
@@ -1114,7 +1114,7 @@ dependencies = [
[[package]]
name = "conduwuit_macros"
version = "0.5.0-rc.8.1"
version = "0.5.1"
dependencies = [
"itertools 0.14.0",
"proc-macro2",
@@ -1124,7 +1124,7 @@ dependencies = [
[[package]]
name = "conduwuit_router"
version = "0.5.0-rc.8.1"
version = "0.5.1"
dependencies = [
"axum 0.7.9",
"axum-client-ip",
@@ -1159,7 +1159,7 @@ dependencies = [
[[package]]
name = "conduwuit_service"
version = "0.5.0-rc.8.1"
version = "0.5.1"
dependencies = [
"async-trait",
"base64 0.22.1",
@@ -1200,7 +1200,7 @@ dependencies = [
[[package]]
name = "conduwuit_web"
version = "0.5.0-rc.8.1"
version = "0.5.1"
dependencies = [
"askama",
"axum 0.7.9",
@@ -3305,6 +3305,8 @@ dependencies = [
"prost",
"reqwest",
"thiserror 2.0.17",
"tokio",
"tonic",
"tracing",
]
@@ -4063,7 +4065,7 @@ checksum = "88f8660c1ff60292143c98d08fc6e2f654d722db50410e3f3797d40baaf9d8f3"
[[package]]
name = "ruma"
version = "0.10.1"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=50b2a91b2ab8f9830eea80b9911e11234e0eac66#50b2a91b2ab8f9830eea80b9911e11234e0eac66"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=27abe0dcd33fd4056efc94bab3582646b31b6ce9#27abe0dcd33fd4056efc94bab3582646b31b6ce9"
dependencies = [
"assign",
"js_int",
@@ -4083,7 +4085,7 @@ dependencies = [
[[package]]
name = "ruma-appservice-api"
version = "0.10.0"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=50b2a91b2ab8f9830eea80b9911e11234e0eac66#50b2a91b2ab8f9830eea80b9911e11234e0eac66"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=27abe0dcd33fd4056efc94bab3582646b31b6ce9#27abe0dcd33fd4056efc94bab3582646b31b6ce9"
dependencies = [
"js_int",
"ruma-common",
@@ -4095,7 +4097,7 @@ dependencies = [
[[package]]
name = "ruma-client-api"
version = "0.18.0"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=50b2a91b2ab8f9830eea80b9911e11234e0eac66#50b2a91b2ab8f9830eea80b9911e11234e0eac66"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=27abe0dcd33fd4056efc94bab3582646b31b6ce9#27abe0dcd33fd4056efc94bab3582646b31b6ce9"
dependencies = [
"as_variant",
"assign",
@@ -4118,7 +4120,7 @@ dependencies = [
[[package]]
name = "ruma-common"
version = "0.13.0"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=50b2a91b2ab8f9830eea80b9911e11234e0eac66#50b2a91b2ab8f9830eea80b9911e11234e0eac66"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=27abe0dcd33fd4056efc94bab3582646b31b6ce9#27abe0dcd33fd4056efc94bab3582646b31b6ce9"
dependencies = [
"as_variant",
"base64 0.22.1",
@@ -4150,7 +4152,7 @@ dependencies = [
[[package]]
name = "ruma-events"
version = "0.28.1"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=50b2a91b2ab8f9830eea80b9911e11234e0eac66#50b2a91b2ab8f9830eea80b9911e11234e0eac66"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=27abe0dcd33fd4056efc94bab3582646b31b6ce9#27abe0dcd33fd4056efc94bab3582646b31b6ce9"
dependencies = [
"as_variant",
"indexmap",
@@ -4175,7 +4177,7 @@ dependencies = [
[[package]]
name = "ruma-federation-api"
version = "0.9.0"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=50b2a91b2ab8f9830eea80b9911e11234e0eac66#50b2a91b2ab8f9830eea80b9911e11234e0eac66"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=27abe0dcd33fd4056efc94bab3582646b31b6ce9#27abe0dcd33fd4056efc94bab3582646b31b6ce9"
dependencies = [
"bytes",
"headers",
@@ -4197,7 +4199,7 @@ dependencies = [
[[package]]
name = "ruma-identifiers-validation"
version = "0.9.5"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=50b2a91b2ab8f9830eea80b9911e11234e0eac66#50b2a91b2ab8f9830eea80b9911e11234e0eac66"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=27abe0dcd33fd4056efc94bab3582646b31b6ce9#27abe0dcd33fd4056efc94bab3582646b31b6ce9"
dependencies = [
"js_int",
"thiserror 2.0.17",
@@ -4206,7 +4208,7 @@ dependencies = [
[[package]]
name = "ruma-identity-service-api"
version = "0.9.0"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=50b2a91b2ab8f9830eea80b9911e11234e0eac66#50b2a91b2ab8f9830eea80b9911e11234e0eac66"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=27abe0dcd33fd4056efc94bab3582646b31b6ce9#27abe0dcd33fd4056efc94bab3582646b31b6ce9"
dependencies = [
"js_int",
"ruma-common",
@@ -4216,7 +4218,7 @@ dependencies = [
[[package]]
name = "ruma-macros"
version = "0.13.0"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=50b2a91b2ab8f9830eea80b9911e11234e0eac66#50b2a91b2ab8f9830eea80b9911e11234e0eac66"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=27abe0dcd33fd4056efc94bab3582646b31b6ce9#27abe0dcd33fd4056efc94bab3582646b31b6ce9"
dependencies = [
"cfg-if",
"proc-macro-crate",
@@ -4231,7 +4233,7 @@ dependencies = [
[[package]]
name = "ruma-push-gateway-api"
version = "0.9.0"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=50b2a91b2ab8f9830eea80b9911e11234e0eac66#50b2a91b2ab8f9830eea80b9911e11234e0eac66"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=27abe0dcd33fd4056efc94bab3582646b31b6ce9#27abe0dcd33fd4056efc94bab3582646b31b6ce9"
dependencies = [
"js_int",
"ruma-common",
@@ -4243,7 +4245,7 @@ dependencies = [
[[package]]
name = "ruma-signatures"
version = "0.15.0"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=50b2a91b2ab8f9830eea80b9911e11234e0eac66#50b2a91b2ab8f9830eea80b9911e11234e0eac66"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=27abe0dcd33fd4056efc94bab3582646b31b6ce9#27abe0dcd33fd4056efc94bab3582646b31b6ce9"
dependencies = [
"base64 0.22.1",
"ed25519-dalek",
@@ -6204,7 +6206,7 @@ dependencies = [
[[package]]
name = "xtask"
version = "0.5.0-rc.8.1"
version = "0.5.1"
dependencies = [
"clap",
"serde",
@@ -6213,7 +6215,7 @@ dependencies = [
[[package]]
name = "xtask-generate-commands"
version = "0.5.0-rc.8.1"
version = "0.5.1"
dependencies = [
"clap-markdown",
"clap_builder",

View File

@@ -21,7 +21,7 @@ license = "Apache-2.0"
readme = "README.md"
repository = "https://forgejo.ellis.link/continuwuation/continuwuity"
rust-version = "1.86.0"
version = "0.5.0-rc.8.1"
version = "0.5.1"
[workspace.metadata.crane]
name = "conduwuit"
@@ -351,7 +351,7 @@ version = "0.1.2"
# Used for matrix spec type definitions and helpers
[workspace.dependencies.ruma]
git = "https://forgejo.ellis.link/continuwuation/ruwuma"
rev = "50b2a91b2ab8f9830eea80b9911e11234e0eac66"
rev = "27abe0dcd33fd4056efc94bab3582646b31b6ce9"
features = [
"compat",
"rand",
@@ -426,7 +426,7 @@ features = ["rt-tokio"]
[workspace.dependencies.opentelemetry-otlp]
version = "0.31.0"
features = ["http", "trace", "logs", "metrics"]
features = ["http", "grpc-tonic", "trace", "logs", "metrics"]

View File

@@ -0,0 +1 @@
The `console` feature is now enabled by default, allowing the server console to be used for running admin commands directly.

View File

@@ -0,0 +1 @@
Certain potentially dangerous admin commands are now restricted to only be usable in the admin room and server console.

1
changelog.d/1253.feature Normal file
View File

@@ -0,0 +1 @@
Implemented a configuration defined admin list independent of the admin room. (@Terryiscool160).

1
changelog.d/1257.bugfix Normal file
View File

@@ -0,0 +1 @@
Fixed unreliable room summary fetching and improved error messages. Contributed by @nex.

2
changelog.d/1261.bugfix Normal file
View File

@@ -0,0 +1,2 @@
Client requested timeout parameter is now applied to e2ee key lookups and claims. Related federation requests are now
also concurrent. Contributed by @nex.

View File

@@ -26,8 +26,8 @@
# Also see the `[global.well_known]` config section at the very bottom.
#
# Examples of delegation:
# - https://puppygock.gay/.well-known/matrix/server
# - https://puppygock.gay/.well-known/matrix/client
# - https://continuwuity.org/.well-known/matrix/server
# - https://continuwuity.org/.well-known/matrix/client
#
# YOU NEED TO EDIT THIS. THIS CANNOT BE CHANGED AFTER WITHOUT A DATABASE
# WIPE.
@@ -389,7 +389,15 @@
#
#appservice_idle_timeout = 300
# Notification gateway pusher idle connection pool timeout.
# Notification gateway pusher request connection timeout (seconds).
#
#pusher_conn_timeout = 15
# Notification gateway pusher total request timeout (seconds).
#
#pusher_timeout = 60
# Notification gateway pusher idle connection pool timeout (seconds).
#
#pusher_idle_timeout = 15
@@ -586,10 +594,13 @@
#allow_unstable_room_versions = true
# Default room version continuwuity will create rooms with.
# Note that this has to be a string since the room version is a string
# rather than an integer. Forgetting the quotes will make the server fail
# to start!
#
# Per spec, room version 11 is the default.
# Per spec, room version "11" is the default.
#
#default_room_version = 11
#default_room_version = "11"
# Enable OpenTelemetry OTLP tracing export. This replaces the deprecated
# Jaeger exporter. Traces will be sent via OTLP to a collector (such as
@@ -605,6 +616,11 @@
#
#otlp_filter = "info"
# Protocol to use for OTLP tracing export. Options are "http" or "grpc".
# The HTTP protocol uses port 4318 by default, while gRPC uses port 4317.
#
#otlp_protocol = "http"
# If the 'perf_measurements' compile-time feature is enabled, enables
# collecting folded stack trace profile of tracing spans using
# tracing_flame. The resulting profile can be visualized with inferno[1],
@@ -1447,6 +1463,11 @@
#
#url_preview_max_spider_size = 256000
# Total request timeout for URL previews (seconds). This includes
# connection, request, and response body reading time.
#
#url_preview_timeout = 120
# Option to decide whether you would like to run the domain allowlist
# checks (contains and explicit) on the root domain or not. Does not apply
# to URL contains allowlist. Defaults to false.
@@ -1530,7 +1551,7 @@
# a normal continuwuity admin command. The reply will be publicly visible
# to the room, originating from the sender.
#
# example: \\!admin debug ping puppygock.gay
# example: \\!admin debug ping continuwuity.org
#
#admin_escape_commands = true
@@ -1548,7 +1569,8 @@
# For example: `./continuwuity --execute "server admin-notice continuwuity
# has started up at $(date)"`
#
# example: admin_execute = ["debug ping puppygock.gay", "debug echo hi"]`
# example: admin_execute = ["debug ping continuwuity.org", "debug echo
# hi"]`
#
#admin_execute = []
@@ -1581,6 +1603,18 @@
#
#admin_room_tag = "m.server_notice"
# A list of Matrix IDs that are qualified as server admins.
#
# Any Matrix IDs within this list are regarded as an admin
# regardless of whether they are in the admin room or not
#
#admins_list = []
# Defines whether those within the admin room are added to the
# admins_list.
#
#admins_from_room = true
# Sentry.io crash/panic reporting, performance monitoring/metrics, etc.
# This is NOT enabled by default.
#

View File

@@ -48,7 +48,7 @@ EOF
# Developer tool versions
# renovate: datasource=github-releases depName=cargo-bins/cargo-binstall
ENV BINSTALL_VERSION=1.16.2
ENV BINSTALL_VERSION=1.16.6
# renovate: datasource=github-releases depName=psastras/sbom-rs
ENV CARGO_SBOM_VERSION=0.9.1
# renovate: datasource=crate depName=lddtree

View File

@@ -18,7 +18,7 @@ RUN --mount=type=cache,target=/etc/apk/cache apk add \
# Developer tool versions
# renovate: datasource=github-releases depName=cargo-bins/cargo-binstall
ENV BINSTALL_VERSION=1.16.2
ENV BINSTALL_VERSION=1.16.6
# renovate: datasource=github-releases depName=psastras/sbom-rs
ENV CARGO_SBOM_VERSION=0.9.1
# renovate: datasource=crate depName=lddtree

View File

@@ -114,6 +114,10 @@ services:
TRAEFIK_CERTIFICATESRESOLVERS_LETSENCRYPT_ACME_HTTPCHALLENGE_ENTRYPOINT: web
TRAEFIK_CERTIFICATESRESOLVERS_LETSENCRYPT_ACME_STORAGE: "/etc/traefik/acme/acme.json"
# Since Traefik 3.6.3, paths with certain "encoded characters" are now blocked by default; we need a couple, or else things *will* break
TRAEFIK_ENTRYPOINTS_WEBSECURE_HTTP_ENCODEDCHARACTERS_ALLOWENCODEDSLASH: true
TRAEFIK_ENTRYPOINTS_WEBSECURE_HTTP_ENCODEDCHARACTERS_ALLOWENCODEDHASH: true
TRAEFIK_PROVIDERS_DOCKER: true
TRAEFIK_PROVIDERS_DOCKER_ENDPOINT: "unix:///var/run/docker.sock"
TRAEFIK_PROVIDERS_DOCKER_EXPOSEDBYDEFAULT: false

View File

@@ -149,11 +149,12 @@ ### Creating pull requests
*looks* done.
Before submitting a pull request, please ensure:
1. Your code passes all CI checks (formatting, linting, typo detection, etc.)
1. Your code passes all CI checks (formatting, linting, typo detection, etc.). Run pre-commit for this.
2. Your code follows the [code style guide](./code_style)
3. Your commit messages follow the conventional commits format
4. Tests are added for new functionality
5. Documentation is updated if needed
6. You have written a [news fragment](#writing-news-fragments) for your changes
Direct all PRs/MRs to the `main` branch.
@@ -171,3 +172,32 @@ ### Creating pull requests
[sytest]: https://github.com/matrix-org/sytest/
[mdbook]: https://rust-lang.github.io/mdBook/
[documentation.yml]: https://forgejo.ellis.link/continuwuation/continuwuity/src/branch/main/.forgejo/workflows/documentation.yml
#### Writing news fragments
In order to make writing our changelogs easier, we make use of [Towncrier]. Towncrier builds changelogs based on
"news fragments", which are little markdown files in the `changelog.d/` directory that describe individual changes.
When you make a pull request that changes functionality, fixes a bug, or adds documentation, please add a news fragment
describing your change. The file name *MUST* be in the format of `{pull_request_number}.{type}`, where `{type}` is one
of the following:
- `feature` - for new features
- `bugfix` - for bug fixes
- `doc` - for documentation changes
- `misc` - for other changes that don't fit the above categories
For example:
```bash
$ echo "Fixed the quantum flux stabiliser. Contributed by @alice." > changelog.d/42.bugfix
```
(Note: If you want to credit yourself, you should reference your forgejo handle, however links to other platforms are also acceptable.)
When the next release is made, Towncrier will automatically include your news fragment in the changelog.
You can read more about writing news fragments in the [Towncrier tutorial][tt].
[Towncrier]: https://towncrier.readthedocs.io/
[tt]: https://towncrier.readthedocs.io/en/stable/tutorial.html#creating-news-fragments

View File

@@ -6,12 +6,10 @@
"message": "Welcome to Continuwuity! Important announcements about the project will appear here."
},
{
"id": 3,
"message": "_taps microphone_ The Continuwuity 0.5.0-rc.7 release is now available, and it's better than ever! **177 commits**, **35 pull requests**, **11 contributors,** and a lot of new stuff!\n\nFor highlights, we've got:\n\n* 🕵️ Full Policy Server support to fight spam!\n* 🚀 Smarter room & space upgrades.\n* 🚫 User suspension tools for better moderation.\n* 🤖 reCaptcha support for safer open registration.\n* 🔍 Ability to disable read receipts & typing indicators.\n* ⚡ Sweeping performance improvements!\n\nGet the [full changelog and downloads on our Forgejo](https://forgejo.ellis.link/continuwuation/continuwuity/releases/tag/v0.5.0-rc.7) - and make sure you're in the [Announcements room](https://matrix.to/#/!releases:continuwuity.org/$hN9z6L2_dTAlPxFLAoXVfo_g8DyYXu4cpvWsSrWhmB0) to get stuff like this sooner."
},
{
"id": 5,
"message": "It's a bird! It's a plane! No, it's 0.5.0-rc.8.1!\n\nThis is a minor bugfix update to the rc8 which backports some important fixes from the latest main branch. If you still haven't updated to rc8, you should skip to main. Otherwise, you should upgrade to this bugfix release as soon as possible.\n\nBugfixes backported to this version:\n\n- Resolved several issues with state resolution v2.1 (room version 12)\n- Fixed issues with the `restricted` and `knock_restricted` join rules that would sometimes incorrectly disallow a valid join\n- Fixed the automatic support contact listing being a no-op\n- Fixed upgrading pre-v12 rooms to v12 rooms\n- Fixed policy servers sending the incorrect JSON objects (resulted in false positives)\n- Fixed debug build panic during MSC4133 migration\n\nIt is recommended, if you can and are comfortable with doing so, following updates to the main branch - we're in the run up to the full 0.5.0 release, and more and more bugfixes and new features are being pushed constantly. Please don't forget to join [#announcements:continuwuity.org](https://matrix.to/#/#announcements:continuwuity.org) to receive this news faster and be alerted to other important updates!"
"id": 7,
"mention_room": true,
"date": "2025-12-30",
"message": "Continuwuity v0.5.1 has been released. **The release contains a fix for the critical vulnerability [GHSA-m5p2-vccg-8c9v](https://github.com/continuwuity/continuwuity/security/advisories/GHSA-m5p2-vccg-8c9v) (embargoed) affecting all Conduit-derived servers. Update as soon as possible.**\n\nThis has been *actively exploited* to attempt account takeover and forge events bricking the Continuwuity rooms. The new space is accessible at [Continuwuity (room list)](https://matrix.to/#/!8cR4g-i9ucof69E4JHNg9LbPVkGprHb3SzcrGBDDJgk?via=continuwuity.org&via=starstruck.systems&via=gingershaped.computer)\n"
}
]
}

54
flake.lock generated
View File

@@ -3,11 +3,11 @@
"advisory-db": {
"flake": false,
"locked": {
"lastModified": 1761112158,
"narHash": "sha256-RIXu/7eyKpQHjsPuAUODO81I4ni8f+WYSb7K4mTG6+0=",
"lastModified": 1766324728,
"narHash": "sha256-9C+WyE5U3y5w4WQXxmb0ylRyMMsPyzxielWXSHrcDpE=",
"owner": "rustsec",
"repo": "advisory-db",
"rev": "58f3aaec0e1776f4a900737be8cd7cb00972210d",
"rev": "c88b88c62bda077be8aa621d4e89d8701e39cb5d",
"type": "github"
},
"original": {
@@ -18,11 +18,11 @@
},
"crane": {
"locked": {
"lastModified": 1760924934,
"narHash": "sha256-tuuqY5aU7cUkR71sO2TraVKK2boYrdW3gCSXUkF4i44=",
"lastModified": 1766194365,
"narHash": "sha256-4AFsUZ0kl6MXSm4BaQgItD0VGlEKR3iq7gIaL7TjBvc=",
"owner": "ipetkov",
"repo": "crane",
"rev": "c6b4d5308293d0d04fcfeee92705017537cad02f",
"rev": "7d8ec2c71771937ab99790b45e6d9b93d15d9379",
"type": "github"
},
"original": {
@@ -39,11 +39,11 @@
"rust-analyzer-src": "rust-analyzer-src"
},
"locked": {
"lastModified": 1761115517,
"narHash": "sha256-Fev/ag/c3Fp3JBwHfup3lpA5FlNXfkoshnQ7dssBgJ0=",
"lastModified": 1766299592,
"narHash": "sha256-7u+q5hexu2eAxL2VjhskHvaUKg+GexmelIR2ve9Nbb4=",
"owner": "nix-community",
"repo": "fenix",
"rev": "320433651636186ea32b387cff05d6bbfa30cea7",
"rev": "381579dee168d5ced412e2990e9637ecc7cf1c5d",
"type": "github"
},
"original": {
@@ -55,11 +55,11 @@
"flake-compat": {
"flake": false,
"locked": {
"lastModified": 1747046372,
"narHash": "sha256-CIVLLkVgvHYbgI2UpXvIIBJ12HWgX+fjA8Xf8PUmqCY=",
"lastModified": 1765121682,
"narHash": "sha256-4VBOP18BFeiPkyhy9o4ssBNQEvfvv1kXkasAYd0+rrA=",
"owner": "edolstra",
"repo": "flake-compat",
"rev": "9100a0f413b0c601e0533d1d94ffd501ce2e7885",
"rev": "65f23138d8d09a92e30f1e5c87611b23ef451bf3",
"type": "github"
},
"original": {
@@ -74,11 +74,11 @@
"nixpkgs-lib": "nixpkgs-lib"
},
"locked": {
"lastModified": 1760948891,
"narHash": "sha256-TmWcdiUUaWk8J4lpjzu4gCGxWY6/Ok7mOK4fIFfBuU4=",
"lastModified": 1765835352,
"narHash": "sha256-XswHlK/Qtjasvhd1nOa1e8MgZ8GS//jBoTqWtrS1Giw=",
"owner": "hercules-ci",
"repo": "flake-parts",
"rev": "864599284fc7c0ba6357ed89ed5e2cd5040f0c04",
"rev": "a34fae9c08a15ad73f295041fec82323541400a9",
"type": "github"
},
"original": {
@@ -89,11 +89,11 @@
},
"nixpkgs": {
"locked": {
"lastModified": 1760878510,
"narHash": "sha256-K5Osef2qexezUfs0alLvZ7nQFTGS9DL2oTVsIXsqLgs=",
"lastModified": 1766070988,
"narHash": "sha256-G/WVghka6c4bAzMhTwT2vjLccg/awmHkdKSd2JrycLc=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "5e2a59a5b1a82f89f2c7e598302a9cacebb72a67",
"rev": "c6245e83d836d0433170a16eb185cefe0572f8b8",
"type": "github"
},
"original": {
@@ -105,11 +105,11 @@
},
"nixpkgs-lib": {
"locked": {
"lastModified": 1754788789,
"narHash": "sha256-x2rJ+Ovzq0sCMpgfgGaaqgBSwY+LST+WbZ6TytnT9Rk=",
"lastModified": 1765674936,
"narHash": "sha256-k00uTP4JNfmejrCLJOwdObYC9jHRrr/5M/a/8L2EIdo=",
"owner": "nix-community",
"repo": "nixpkgs.lib",
"rev": "a73b9c743612e4244d865a2fdee11865283c04e6",
"rev": "2075416fcb47225d9b68ac469a5c4801a9c4dd85",
"type": "github"
},
"original": {
@@ -132,11 +132,11 @@
"rust-analyzer-src": {
"flake": false,
"locked": {
"lastModified": 1761077270,
"narHash": "sha256-O1uTuvI/rUlubJ8AXKyzh1WSWV3qCZX0huTFUvWLN4E=",
"lastModified": 1766253897,
"narHash": "sha256-ChK07B1aOlJ4QzWXpJo+y8IGAxp1V9yQ2YloJ+RgHRw=",
"owner": "rust-lang",
"repo": "rust-analyzer",
"rev": "39990a923c8bca38f5bd29dc4c96e20ee7808d5d",
"rev": "765b7bdb432b3740f2d564afccfae831d5a972e4",
"type": "github"
},
"original": {
@@ -153,11 +153,11 @@
]
},
"locked": {
"lastModified": 1760945191,
"narHash": "sha256-ZRVs8UqikBa4Ki3X4KCnMBtBW0ux1DaT35tgsnB1jM4=",
"lastModified": 1766000401,
"narHash": "sha256-+cqN4PJz9y0JQXfAK5J1drd0U05D5fcAGhzhfVrDlsI=",
"owner": "numtide",
"repo": "treefmt-nix",
"rev": "f56b1934f5f8fcab8deb5d38d42fd692632b47c2",
"rev": "42d96e75aa56a3f70cab7e7dc4a32868db28e8fd",
"type": "github"
},
"original": {

View File

@@ -6,6 +6,69 @@
pkgs,
...
}:
let
baseTestScript =
pkgs.writers.writePython3Bin "do_test" { libraries = [ pkgs.python3Packages.matrix-nio ]; }
''
import asyncio
import nio
async def main() -> None:
# Connect to continuwuity
client = nio.AsyncClient("http://continuwuity:6167", "alice")
# Register as user alice
response = await client.register("alice", "my-secret-password")
# Log in as user alice
response = await client.login("my-secret-password")
# Create a new room
response = await client.room_create(federate=False)
print("Matrix room create response:", response)
assert isinstance(response, nio.RoomCreateResponse)
room_id = response.room_id
# Join the room
response = await client.join(room_id)
print("Matrix join response:", response)
assert isinstance(response, nio.JoinResponse)
# Send a message to the room
response = await client.room_send(
room_id=room_id,
message_type="m.room.message",
content={
"msgtype": "m.text",
"body": "Hello continuwuity!"
}
)
print("Matrix room send response:", response)
assert isinstance(response, nio.RoomSendResponse)
# Sync responses
response = await client.sync(timeout=30000)
print("Matrix sync response:", response)
assert isinstance(response, nio.SyncResponse)
# Check the message was received by continuwuity
last_message = response.rooms.join[room_id].timeline.events[-1].body
assert last_message == "Hello continuwuity!"
# Leave the room
response = await client.room_leave(room_id)
print("Matrix room leave response:", response)
assert isinstance(response, nio.RoomLeaveResponse)
# Close the client
await client.close()
if __name__ == "__main__":
asyncio.run(main())
'';
in
{
# run some nixos tests as checks
checks = lib.pipe self'.packages [
@@ -18,106 +81,69 @@
# this test was initially yoinked from
#
# https://github.com/NixOS/nixpkgs/blob/960ce26339661b1b69c6f12b9063ca51b688615f/nixos/tests/matrix/continuwuity.nix
(builtins.map (name: {
name = "test-${name}";
value = pkgs.testers.runNixOSTest {
inherit name;
(builtins.concatMap (
name:
builtins.map
(
{ config, suffix }:
{
name = "test-${name}-${suffix}";
value = pkgs.testers.runNixOSTest {
inherit name;
nodes = {
continuwuity = {
services.matrix-continuwuity = {
enable = true;
package = self'.packages.${name};
settings.global = {
nodes = {
continuwuity = {
services.matrix-continuwuity = {
enable = true;
package = self'.packages.${name};
settings = config;
extraEnvironment.RUST_BACKTRACE = "yes";
};
networking.firewall.allowedTCPPorts = [ 6167 ];
};
client.environment.systemPackages = [ baseTestScript ];
};
testScript = ''
start_all()
with subtest("start continuwuity"):
continuwuity.wait_for_unit("continuwuity.service")
continuwuity.wait_for_open_port(6167)
with subtest("ensure messages can be exchanged"):
client.succeed("${lib.getExe baseTestScript} >&2")
'';
};
}
)
[
{
suffix = "base";
config = {
global = {
server_name = name;
address = [ "0.0.0.0" ];
allow_registration = true;
yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse = true;
};
extraEnvironment.RUST_BACKTRACE = "yes";
};
networking.firewall.allowedTCPPorts = [ 6167 ];
};
client =
{ pkgs, ... }:
{
environment.systemPackages = [
(pkgs.writers.writePython3Bin "do_test" { libraries = [ pkgs.python3Packages.matrix-nio ]; } ''
import asyncio
import nio
async def main() -> None:
# Connect to continuwuity
client = nio.AsyncClient("http://continuwuity:6167", "alice")
# Register as user alice
response = await client.register("alice", "my-secret-password")
# Log in as user alice
response = await client.login("my-secret-password")
# Create a new room
response = await client.room_create(federate=False)
print("Matrix room create response:", response)
assert isinstance(response, nio.RoomCreateResponse)
room_id = response.room_id
# Join the room
response = await client.join(room_id)
print("Matrix join response:", response)
assert isinstance(response, nio.JoinResponse)
# Send a message to the room
response = await client.room_send(
room_id=room_id,
message_type="m.room.message",
content={
"msgtype": "m.text",
"body": "Hello continuwuity!"
}
)
print("Matrix room send response:", response)
assert isinstance(response, nio.RoomSendResponse)
# Sync responses
response = await client.sync(timeout=30000)
print("Matrix sync response:", response)
assert isinstance(response, nio.SyncResponse)
# Check the message was received by continuwuity
last_message = response.rooms.join[room_id].timeline.events[-1].body
assert last_message == "Hello continuwuity!"
# Leave the room
response = await client.room_leave(room_id)
print("Matrix room leave response:", response)
assert isinstance(response, nio.RoomLeaveResponse)
# Close the client
await client.close()
if __name__ == "__main__":
asyncio.run(main())
'')
];
}
{
suffix = "with-room-version";
config = {
global = {
server_name = name;
address = [ "0.0.0.0" ];
allow_registration = true;
yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse = true;
default_room_version = "12";
};
};
};
testScript = ''
start_all()
with subtest("start continuwuity"):
continuwuity.wait_for_unit("continuwuity.service")
continuwuity.wait_for_open_port(6167)
with subtest("ensure messages can be exchanged"):
client.succeed("do_test >&2")
'';
};
}))
}
]
))
builtins.listToAttrs
];
};

1
release.toml Normal file
View File

@@ -0,0 +1 @@
tag-message = "chore: Release v{{version}}"

View File

@@ -53,14 +53,26 @@ pub(super) async fn process(command: AdminCommand, context: &Context<'_>) -> Res
use AdminCommand::*;
match command {
| Appservices(command) => appservice::process(command, context).await,
| Appservices(command) => {
// appservice commands are all restricted
context.bail_restricted()?;
appservice::process(command, context).await
},
| Media(command) => media::process(command, context).await,
| Users(command) => user::process(command, context).await,
| Users(command) => {
// user commands are all restricted
context.bail_restricted()?;
user::process(command, context).await
},
| Rooms(command) => room::process(command, context).await,
| Federation(command) => federation::process(command, context).await,
| Server(command) => server::process(command, context).await,
| Debug(command) => debug::process(command, context).await,
| Query(command) => query::process(command, context).await,
| Query(command) => {
// query commands are all restricted
context.bail_restricted()?;
query::process(command, context).await
},
| Check(command) => check::process(command, context).await,
}
}

View File

@@ -1,6 +1,6 @@
use std::{fmt, time::SystemTime};
use conduwuit::Result;
use conduwuit::{Err, Result};
use conduwuit_service::Services;
use futures::{
Future, FutureExt, TryFutureExt,
@@ -8,6 +8,7 @@
lock::Mutex,
};
use ruma::{EventId, UserId};
use service::admin::InvocationSource;
pub(crate) struct Context<'a> {
pub(crate) services: &'a Services,
@@ -16,6 +17,7 @@ pub(crate) struct Context<'a> {
pub(crate) reply_id: Option<&'a EventId>,
pub(crate) sender: Option<&'a UserId>,
pub(crate) output: Mutex<BufWriter<Vec<u8>>>,
pub(crate) source: InvocationSource,
}
impl Context<'_> {
@@ -43,4 +45,22 @@ pub(crate) fn sender_or_service_user(&self) -> &UserId {
self.sender
.unwrap_or_else(|| self.services.globals.server_user.as_ref())
}
/// Returns an Err if the [`Self::source`] of this context does not allow
/// restricted commands to be executed.
///
/// This is intended to be placed at the start of restricted commands'
/// implementations, like so:
///
/// ```ignore
/// self.bail_restricted()?;
/// // actual command impl
/// ```
pub(crate) fn bail_restricted(&self) -> Result {
if self.source.allows_restricted() {
Ok(())
} else {
Err!("This command can only be used in the admin room.")
}
}
}

View File

@@ -291,6 +291,8 @@ pub(super) async fn get_remote_pdu(
#[admin_command]
pub(super) async fn get_room_state(&self, room: OwnedRoomOrAliasId) -> Result {
self.bail_restricted()?;
let room_id = self.services.rooms.alias.resolve(&room).await?;
let room_state: Vec<Raw<AnyStateEvent>> = self
.services
@@ -417,27 +419,6 @@ pub(super) async fn change_log_level(&self, filter: Option<String>, reset: bool)
Err!("No log level was specified.")
}
#[admin_command]
pub(super) async fn sign_json(&self) -> Result {
if self.body.len() < 2
|| !self.body[0].trim().starts_with("```")
|| self.body.last().unwrap_or(&"").trim() != "```"
{
return Err!("Expected code block in command body. Add --help for details.");
}
let string = self.body[1..self.body.len().checked_sub(1).unwrap()].join("\n");
match serde_json::from_str(&string) {
| Err(e) => return Err!("Invalid json: {e}"),
| Ok(mut value) => {
self.services.server_keys.sign_json(&mut value)?;
let json_text = serde_json::to_string_pretty(&value)?;
write!(self, "{json_text}")
},
}
.await
}
#[admin_command]
pub(super) async fn verify_json(&self) -> Result {
if self.body.len() < 2
@@ -477,6 +458,8 @@ pub(super) async fn verify_pdu(&self, event_id: OwnedEventId) -> Result {
#[admin_command]
#[tracing::instrument(skip(self))]
pub(super) async fn first_pdu_in_room(&self, room_id: OwnedRoomId) -> Result {
self.bail_restricted()?;
if !self
.services
.rooms
@@ -502,6 +485,8 @@ pub(super) async fn first_pdu_in_room(&self, room_id: OwnedRoomId) -> Result {
#[admin_command]
#[tracing::instrument(skip(self))]
pub(super) async fn latest_pdu_in_room(&self, room_id: OwnedRoomId) -> Result {
self.bail_restricted()?;
if !self
.services
.rooms
@@ -532,6 +517,8 @@ pub(super) async fn force_set_room_state_from_server(
server_name: OwnedServerName,
at_event: Option<OwnedEventId>,
) -> Result {
self.bail_restricted()?;
if !self
.services
.rooms

View File

@@ -47,9 +47,9 @@ pub enum DebugCommand {
shorteventid: ShortEventId,
},
/// - Attempts to retrieve a PDU from a remote server. Inserts it into our
/// database/timeline if found and we do not have this PDU already
/// (following normal event auth rules, handles it as an incoming PDU).
/// - Attempts to retrieve a PDU from a remote server. **Does not** insert
/// it into the database
/// or persist it anywhere.
GetRemotePdu {
/// An event ID (a $ followed by the base64 reference hash)
event_id: OwnedEventId,
@@ -125,12 +125,6 @@ pub enum DebugCommand {
reset: bool,
},
/// - Sign JSON blob
///
/// This command needs a JSON blob provided in a Markdown code block below
/// the command.
SignJson,
/// - Verify JSON signatures
///
/// This command needs a JSON blob provided in a Markdown code block below

View File

@@ -8,12 +8,14 @@
#[admin_command]
pub(super) async fn disable_room(&self, room_id: OwnedRoomId) -> Result {
self.bail_restricted()?;
self.services.rooms.metadata.disable_room(&room_id, true);
self.write_str("Room disabled.").await
}
#[admin_command]
pub(super) async fn enable_room(&self, room_id: OwnedRoomId) -> Result {
self.bail_restricted()?;
self.services.rooms.metadata.disable_room(&room_id, false);
self.write_str("Room enabled.").await
}

View File

@@ -16,6 +16,8 @@ pub(super) async fn delete(
mxc: Option<OwnedMxcUri>,
event_id: Option<OwnedEventId>,
) -> Result {
self.bail_restricted()?;
if event_id.is_some() && mxc.is_some() {
return Err!("Please specify either an MXC or an event ID, not both.",);
}
@@ -176,6 +178,8 @@ pub(super) async fn delete(
#[admin_command]
pub(super) async fn delete_list(&self) -> Result {
self.bail_restricted()?;
if self.body.len() < 2
|| !self.body[0].trim().starts_with("```")
|| self.body.last().unwrap_or(&"").trim() != "```"
@@ -231,6 +235,8 @@ pub(super) async fn delete_past_remote_media(
after: bool,
yes_i_want_to_delete_local_media: bool,
) -> Result {
self.bail_restricted()?;
if before && after {
return Err!("Please only pick one argument, --before or --after.",);
}
@@ -273,6 +279,8 @@ pub(super) async fn delete_all_from_server(
server_name: OwnedServerName,
yes_i_want_to_delete_local_media: bool,
) -> Result {
self.bail_restricted()?;
if server_name == self.services.globals.server_name() && !yes_i_want_to_delete_local_media {
return Err!("This command only works for remote media by default.",);
}

View File

@@ -59,6 +59,7 @@ async fn process_command(services: Arc<Services>, input: &CommandInput) -> Proce
reply_id: input.reply_id.as_deref(),
sender: input.sender.as_deref(),
output: BufWriter::new(Vec::new()).into(),
source: input.source,
};
let (result, mut logs) = process(&context, command, &args).await;

View File

@@ -31,7 +31,7 @@ pub(super) async fn last(&self, room_id: OwnedRoomOrAliasId) -> Result {
.services
.rooms
.timeline
.last_timeline_count(None, &room_id)
.last_timeline_count(&room_id)
.await?;
self.write_str(&format!("{result:#?}")).await
@@ -52,7 +52,7 @@ pub(super) async fn pdus(
.services
.rooms
.timeline
.pdus_rev(None, &room_id, from)
.pdus_rev(&room_id, from)
.try_take(limit.unwrap_or(3))
.try_collect()
.await?;

View File

@@ -24,16 +24,39 @@ pub(super) async fn uptime(&self) -> Result {
#[admin_command]
pub(super) async fn show_config(&self) -> Result {
self.bail_restricted()?;
self.write_str(&format!("{}", *self.services.server.config))
.await
}
#[admin_command]
pub(super) async fn reload_config(&self, path: Option<PathBuf>) -> Result {
let path = path.as_deref().into_iter();
self.services.config.reload(path)?;
// The path argument is only what's optionally passed via the admin command,
// so we need to merge it with the existing paths if any were given at startup.
let mut paths = Vec::new();
self.write_str("Successfully reconfigured.").await
// Add previously saved paths to the argument list
self.services
.config
.config_paths
.clone()
.unwrap_or_default()
.iter()
.for_each(|p| paths.push(p.to_owned()));
// If a path is given, and it's not already in the list,
// add it last, so that it overrides earlier files
if let Some(p) = path {
if !paths.contains(&p) {
paths.push(p);
}
}
self.services.config.reload(&paths)?;
self.write_str(&format!("Successfully reconfigured from paths: {paths:?}"))
.await
}
#[admin_command]
@@ -97,6 +120,8 @@ pub(super) async fn list_backups(&self) -> Result {
#[admin_command]
pub(super) async fn backup_database(&self) -> Result {
self.bail_restricted()?;
let db = Arc::clone(&self.services.db);
let result = self
.services
@@ -123,6 +148,8 @@ pub(super) async fn admin_notice(&self, message: Vec<String>) -> Result {
#[admin_command]
pub(super) async fn reload_mods(&self) -> Result {
self.bail_restricted()?;
self.services.server.reload()?;
self.write_str("Reloading server...").await
@@ -147,6 +174,8 @@ pub(super) async fn restart(&self, force: bool) -> Result {
#[admin_command]
pub(super) async fn shutdown(&self) -> Result {
self.bail_restricted()?;
warn!("shutdown command");
self.services.server.shutdown()?;

View File

@@ -461,9 +461,11 @@ pub(super) async fn force_join_list_of_local_users(
);
}
let Ok(admin_room) = self.services.admin.get_admin_room().await else {
return Err!("There is not an admin room to check for server admins.",);
};
let server_admins = self.services.admin.get_admins().await;
if server_admins.is_empty() {
return Err!("There are no admins set for this server.");
}
let (room_id, servers) = self
.services
@@ -482,15 +484,6 @@ pub(super) async fn force_join_list_of_local_users(
return Err!("We are not joined in this room.");
}
let server_admins: Vec<_> = self
.services
.rooms
.state_cache
.active_local_users_in_room(&admin_room)
.map(ToOwned::to_owned)
.collect()
.await;
if !self
.services
.rooms
@@ -583,9 +576,11 @@ pub(super) async fn force_join_all_local_users(
);
}
let Ok(admin_room) = self.services.admin.get_admin_room().await else {
return Err!("There is not an admin room to check for server admins.",);
};
let server_admins = self.services.admin.get_admins().await;
if server_admins.is_empty() {
return Err!("There are no admins set for this server.");
}
let (room_id, servers) = self
.services
@@ -604,15 +599,6 @@ pub(super) async fn force_join_all_local_users(
return Err!("We are not joined in this room.");
}
let server_admins: Vec<_> = self
.services
.rooms
.state_cache
.active_local_users_in_room(&admin_room)
.map(ToOwned::to_owned)
.collect()
.await;
if !self
.services
.rooms

View File

@@ -59,7 +59,7 @@ pub(crate) async fn get_context_route(
.rooms
.timeline
.get_pdu(event_id)
.map_err(|_| err!(Request(NotFound("Base event not found."))));
.map_err(|_| err!(Request(NotFound("Event not found."))));
let visible = services
.rooms
@@ -70,7 +70,7 @@ pub(crate) async fn get_context_route(
let (base_id, base_pdu, visible) = try_join3(base_id, base_pdu, visible).await?;
if base_pdu.room_id_or_hash() != *room_id || base_pdu.event_id != *event_id {
return Err!(Request(NotFound("Base event not found.")));
return Err!(Request(NotFound("Event not found.")));
}
if !visible {
@@ -82,11 +82,25 @@ pub(crate) async fn get_context_route(
let base_event = ignored_filter(&services, (base_count, base_pdu), sender_user);
// PDUs are used to get seen user IDs and then returned in response.
let events_before = services
.rooms
.timeline
.pdus_rev(Some(sender_user), room_id, Some(base_count))
.pdus_rev(room_id, Some(base_count))
.ignore_err()
.then(async |mut pdu| {
pdu.1.set_unsigned(Some(sender_user));
if let Err(e) = services
.rooms
.pdu_metadata
.add_bundled_aggregations_to_pdu(sender_user, &mut pdu.1)
.await
{
debug_warn!("Failed to add bundled aggregations: {e}");
}
pdu
})
.ready_filter_map(|item| event_filter(item, filter))
.wide_filter_map(|item| ignored_filter(&services, item, sender_user))
.wide_filter_map(|item| visibility_filter(&services, item, sender_user))
@@ -96,8 +110,20 @@ pub(crate) async fn get_context_route(
let events_after = services
.rooms
.timeline
.pdus(Some(sender_user), room_id, Some(base_count))
.pdus(room_id, Some(base_count))
.ignore_err()
.then(async |mut pdu| {
pdu.1.set_unsigned(Some(sender_user));
if let Err(e) = services
.rooms
.pdu_metadata
.add_bundled_aggregations_to_pdu(sender_user, &mut pdu.1)
.await
{
debug_warn!("Failed to add bundled aggregations: {e}");
}
pdu
})
.ready_filter_map(|item| event_filter(item, filter))
.wide_filter_map(|item| ignored_filter(&services, item, sender_user))
.wide_filter_map(|item| visibility_filter(&services, item, sender_user))

View File

@@ -1,7 +1,15 @@
use std::collections::{BTreeMap, HashMap, HashSet};
use std::{
collections::{BTreeMap, HashMap, HashSet},
time::Duration,
};
use axum::extract::State;
use conduwuit::{Err, Error, Result, debug, debug_warn, err, result::NotFound, utils};
use conduwuit::{
Err, Error, Result, debug, debug_warn, err,
result::NotFound,
utils,
utils::{IterStream, stream::WidebandExt},
};
use conduwuit_service::{Services, users::parse_master_key};
use futures::{StreamExt, stream::FuturesUnordered};
use ruma::{
@@ -134,6 +142,7 @@ pub(crate) async fn get_keys_route(
&body.device_keys,
|u| u == sender_user,
true, // Always allow local users to see device names of other local users
body.timeout.unwrap_or(Duration::from_secs(10)),
)
.await
}
@@ -145,7 +154,12 @@ pub(crate) async fn claim_keys_route(
State(services): State<crate::State>,
body: Ruma<claim_keys::v3::Request>,
) -> Result<claim_keys::v3::Response> {
claim_keys_helper(&services, &body.one_time_keys).await
claim_keys_helper(
&services,
&body.one_time_keys,
body.timeout.unwrap_or(Duration::from_secs(10)),
)
.await
}
/// # `POST /_matrix/client/r0/keys/device_signing/upload`
@@ -421,6 +435,7 @@ pub(crate) async fn get_keys_helper<F>(
device_keys_input: &BTreeMap<OwnedUserId, Vec<OwnedDeviceId>>,
allowed_signatures: F,
include_display_names: bool,
timeout: Duration,
) -> Result<get_keys::v3::Response>
where
F: Fn(&UserId) -> bool + Send + Sync,
@@ -512,9 +527,10 @@ pub(crate) async fn get_keys_helper<F>(
let mut failures = BTreeMap::new();
let mut futures: FuturesUnordered<_> = get_over_federation
let futures = get_over_federation
.into_iter()
.map(|(server, vec)| async move {
.stream()
.wide_filter_map(|(server, vec)| async move {
let mut device_keys_input_fed = BTreeMap::new();
for (user_id, keys) in vec {
device_keys_input_fed.insert(user_id.to_owned(), keys.clone());
@@ -522,17 +538,22 @@ pub(crate) async fn get_keys_helper<F>(
let request =
federation::keys::get_keys::v1::Request { device_keys: device_keys_input_fed };
let response = tokio::time::timeout(
timeout,
services.sending.send_federation_request(server, request),
)
.await
// Need to flatten the Result<Result<V, E>, E> into Result<V, E>
.map_err(|_| err!(Request(Unknown("Timeout when getting keys over federation."))))
.and_then(|res| res);
let response = services
.sending
.send_federation_request(server, request)
.await;
(server, response)
Some((server, response))
})
.collect();
.collect::<FuturesUnordered<_>>()
.await
.into_iter();
while let Some((server, response)) = futures.next().await {
for (server, response) in futures {
match response {
| Ok(response) => {
for (user, master_key) in response.master_keys {
@@ -564,8 +585,8 @@ pub(crate) async fn get_keys_helper<F>(
self_signing_keys.extend(response.self_signing_keys);
device_keys.extend(response.device_keys);
},
| _ => {
failures.insert(server.to_string(), json!({}));
| Err(e) => {
failures.insert(server.to_string(), json!({ "error": e.to_string() }));
},
}
}
@@ -608,6 +629,7 @@ fn add_unsigned_device_display_name(
pub(crate) async fn claim_keys_helper(
services: &Services,
one_time_keys_input: &BTreeMap<OwnedUserId, BTreeMap<OwnedDeviceId, OneTimeKeyAlgorithm>>,
timeout: Duration,
) -> Result<claim_keys::v3::Response> {
let mut one_time_keys = BTreeMap::new();
@@ -638,32 +660,39 @@ pub(crate) async fn claim_keys_helper(
let mut failures = BTreeMap::new();
let mut futures: FuturesUnordered<_> = get_over_federation
let futures = get_over_federation
.into_iter()
.map(|(server, vec)| async move {
.stream()
.wide_filter_map(|(server, vec)| async move {
let mut one_time_keys_input_fed = BTreeMap::new();
for (user_id, keys) in vec {
one_time_keys_input_fed.insert(user_id.clone(), keys.clone());
}
(
server,
services
.sending
.send_federation_request(server, federation::keys::claim_keys::v1::Request {
let response = tokio::time::timeout(
timeout,
services.sending.send_federation_request(
server,
federation::keys::claim_keys::v1::Request {
one_time_keys: one_time_keys_input_fed,
})
.await,
},
),
)
.await
.map_err(|_| err!(Request(Unknown("Timeout when claiming keys over federation."))))
.and_then(|res| res);
Some((server, response))
})
.collect();
.collect::<FuturesUnordered<_>>()
.await
.into_iter();
while let Some((server, response)) = futures.next().await {
for (server, response) in futures {
match response {
| Ok(keys) => {
one_time_keys.extend(keys.one_time_keys);
},
| Err(_e) => {
failures.insert(server.to_string(), json!({}));
| Err(e) => {
failures.insert(server.to_string(), json!({"error": e.to_string()}));
},
}
}

View File

@@ -48,7 +48,7 @@
},
};
use super::banned_room_check;
use super::{banned_room_check, validate_remote_member_event_stub};
use crate::Ruma;
/// # `POST /_matrix/client/r0/rooms/{roomId}/join`
@@ -837,6 +837,13 @@ async fn join_room_by_id_helper_local(
err!(BadServerResponse("Invalid make_join event json received from server: {e:?}"))
})?;
validate_remote_member_event_stub(
&MembershipState::Join,
sender_user,
room_id,
&join_event_stub,
)?;
let join_authorized_via_users_server = join_event_stub
.get("content")
.map(|s| {

View File

@@ -38,7 +38,7 @@
},
};
use super::{banned_room_check, join::join_room_by_id_helper};
use super::{banned_room_check, join::join_room_by_id_helper, validate_remote_member_event_stub};
use crate::Ruma;
/// # `POST /_matrix/client/*/knock/{roomIdOrAlias}`
@@ -408,6 +408,13 @@ async fn knock_room_helper_local(
err!(BadServerResponse("Invalid make_knock event json received from server: {e:?}"))
})?;
validate_remote_member_event_stub(
&MembershipState::Knock,
sender_user,
room_id,
&knock_event_stub,
)?;
knock_event_stub.insert(
"origin".to_owned(),
CanonicalJsonValue::String(services.globals.server_name().as_str().to_owned()),

View File

@@ -21,6 +21,7 @@
};
use service::Services;
use super::validate_remote_member_event_stub;
use crate::Ruma;
/// # `POST /_matrix/client/v3/rooms/{roomId}/leave`
@@ -324,6 +325,13 @@ pub async fn remote_leave_room<S: ::std::hash::BuildHasher>(
)))
})?;
validate_remote_member_event_stub(
&MembershipState::Leave,
user_id,
room_id,
&leave_event_stub,
)?;
// TODO: Is origin needed?
leave_event_stub.insert(
"origin".to_owned(),

View File

@@ -13,7 +13,14 @@
use axum::extract::State;
use conduwuit::{Err, Result, warn};
use futures::{FutureExt, StreamExt};
use ruma::{OwnedRoomId, RoomId, ServerName, UserId, api::client::membership::joined_rooms};
use ruma::{
CanonicalJsonObject, OwnedRoomId, RoomId, ServerName, UserId,
api::client::membership::joined_rooms,
events::{
StaticEventContent,
room::member::{MembershipState, RoomMemberEventContent},
},
};
use service::Services;
pub(crate) use self::{
@@ -153,3 +160,80 @@ pub(crate) async fn banned_room_check(
Ok(())
}
/// Validates that an event returned from a remote server by `/make_*`
/// actually is a membership event with the expected fields.
///
/// Without checking this, the remote server could use the remote membership
/// mechanism to trick our server into signing arbitrary malicious events.
pub(crate) fn validate_remote_member_event_stub(
membership: &MembershipState,
user_id: &UserId,
room_id: &RoomId,
event_stub: &CanonicalJsonObject,
) -> Result<()> {
let Some(event_type) = event_stub.get("type") else {
return Err!(BadServerResponse(
"Remote server returned member event with missing type field"
));
};
if event_type != &RoomMemberEventContent::TYPE {
return Err!(BadServerResponse(
"Remote server returned member event with invalid event type"
));
}
let Some(sender) = event_stub.get("sender") else {
return Err!(BadServerResponse(
"Remote server returned member event with missing sender field"
));
};
if sender != &user_id.as_str() {
return Err!(BadServerResponse(
"Remote server returned member event with incorrect sender"
));
}
let Some(state_key) = event_stub.get("state_key") else {
return Err!(BadServerResponse(
"Remote server returned member event with missing state_key field"
));
};
if state_key != &user_id.as_str() {
return Err!(BadServerResponse(
"Remote server returned member event with incorrect state_key"
));
}
let Some(event_room_id) = event_stub.get("room_id") else {
return Err!(BadServerResponse(
"Remote server returned member event with missing room_id field"
));
};
if event_room_id != &room_id.as_str() {
return Err!(BadServerResponse(
"Remote server returned member event with incorrect room_id"
));
}
let Some(content) = event_stub
.get("content")
.and_then(|content| content.as_object())
else {
return Err!(BadServerResponse(
"Remote server returned member event with missing content field"
));
};
let Some(event_membership) = content.get("membership") else {
return Err!(BadServerResponse(
"Remote server returned member event with missing membership field"
));
};
if event_membership != &membership.as_str() {
return Err!(BadServerResponse(
"Remote server returned member event with incorrect room_id"
));
}
Ok(())
}

View File

@@ -1,7 +1,7 @@
use axum::extract::State;
use axum_client_ip::InsecureClientIp;
use conduwuit::{
Err, Result, at,
Err, Result, at, debug_warn,
matrix::{
event::{Event, Matches},
pdu::PduCount,
@@ -122,14 +122,14 @@ pub(crate) async fn get_message_events_route(
| Direction::Forward => services
.rooms
.timeline
.pdus(Some(sender_user), room_id, Some(from))
.pdus(room_id, Some(from))
.ignore_err()
.boxed(),
| Direction::Backward => services
.rooms
.timeline
.pdus_rev(Some(sender_user), room_id, Some(from))
.pdus_rev(room_id, Some(from))
.ignore_err()
.boxed(),
};
@@ -140,6 +140,18 @@ pub(crate) async fn get_message_events_route(
.wide_filter_map(|item| ignored_filter(&services, item, sender_user))
.wide_filter_map(|item| visibility_filter(&services, item, sender_user))
.take(limit)
.then(async |mut pdu| {
pdu.1.set_unsigned(Some(sender_user));
if let Err(e) = services
.rooms
.pdu_metadata
.add_bundled_aggregations_to_pdu(sender_user, &mut pdu.1)
.await
{
debug_warn!("Failed to add bundled aggregations: {e}");
}
pdu
})
.collect()
.await;

View File

@@ -1,6 +1,6 @@
use axum::extract::State;
use conduwuit::{
Result, at,
Err, Result, at, debug_warn,
matrix::{Event, event::RelationTypeEqual, pdu::PduCount},
utils::{IterStream, ReadyExt, result::FlatOk, stream::WidebandExt},
};
@@ -109,6 +109,16 @@ async fn paginate_relations_with_filter(
recurse: bool,
dir: Direction,
) -> Result<get_relating_events::v1::Response> {
if !services
.rooms
.state_accessor
.user_can_see_event(sender_user, room_id, target)
.await
{
debug_warn!(req_evt = ?target, ?room_id, "Event relations requested by {sender_user} but is not allowed to see it, returning 404");
return Err!(Request(NotFound("Event not found.")));
}
let start: PduCount = from
.map(str::parse)
.transpose()?
@@ -129,11 +139,6 @@ async fn paginate_relations_with_filter(
// Spec (v1.10) recommends depth of at least 3
let depth: u8 = if recurse { 3 } else { 1 };
// Check if this is a thread request
let is_thread = filter_rel_type
.as_ref()
.is_some_and(|rel| *rel == RelationType::Thread);
let events: Vec<_> = services
.rooms
.pdu_metadata
@@ -152,40 +157,24 @@ async fn paginate_relations_with_filter(
})
.stream()
.ready_take_while(|(count, _)| Some(*count) != to)
.wide_filter_map(|item| visibility_filter(services, sender_user, item))
.take(limit)
.wide_filter_map(|item| visibility_filter(services, sender_user, item))
.then(async |mut pdu| {
if let Err(e) = services
.rooms
.pdu_metadata
.add_bundled_aggregations_to_pdu(sender_user, &mut pdu.1)
.await
{
debug_warn!("Failed to add bundled aggregations to relation: {e}");
}
pdu
})
.collect()
.await;
// For threads, check if we should include the root event
let mut root_event = None;
if is_thread && dir == Direction::Backward {
// Check if we've reached the beginning of the thread
// (fewer events than requested means we've exhausted the thread)
if events.len() < limit {
// Try to get the thread root event
if let Ok(root_pdu) = services.rooms.timeline.get_pdu(target).await {
// Check visibility
if services
.rooms
.state_accessor
.user_can_see_event(sender_user, room_id, target)
.await
{
// Store the root event to add to the response
root_event = Some(root_pdu);
}
}
}
}
// Determine if there are more events to fetch
let has_more = if root_event.is_some() {
false // We've included the root, no more events
} else {
// Check if we got a full page of results (might be more)
events.len() >= limit
};
let has_more = events.len() >= limit;
let next_batch = if has_more {
match dir {
@@ -197,11 +186,10 @@ async fn paginate_relations_with_filter(
None
};
// Build the response chunk with thread root if needed
let chunk: Vec<_> = root_event
let chunk: Vec<_> = events
.into_iter()
.map(at!(1))
.map(Event::into_format)
.chain(events.into_iter().map(at!(1)).map(Event::into_format))
.collect();
Ok(get_relating_events::v1::Response {

View File

@@ -1,5 +1,5 @@
use axum::extract::State;
use conduwuit::{Err, Event, Result, err};
use conduwuit::{Err, Event, Result, debug_warn, err};
use futures::{FutureExt, TryFutureExt, future::try_join};
use ruma::api::client::room::get_room_event;
@@ -33,7 +33,16 @@ pub(crate) async fn get_room_event_route(
return Err!(Request(Forbidden("You don't have permission to view this event.")));
}
event.add_age().ok();
if let Err(e) = services
.rooms
.pdu_metadata
.add_bundled_aggregations_to_pdu(body.sender_user(), &mut event)
.await
{
debug_warn!("Failed to add bundled aggregations to event: {e}");
}
event.set_unsigned(body.sender_user.as_deref());
Ok(get_room_event::v3::Response { event: event.into_format() })
}

View File

@@ -1,6 +1,6 @@
use axum::extract::State;
use conduwuit::{
Err, Event, Result, at,
Err, Event, Result, at, debug_warn,
utils::{BoolExt, stream::TryTools},
};
use futures::{FutureExt, TryStreamExt, future::try_join4};
@@ -40,12 +40,28 @@ pub(crate) async fn room_initial_sync_route(
.map_ok(Event::into_format)
.try_collect::<Vec<_>>();
// Events are returned in body
let limit = LIMIT_MAX;
let events = services
.rooms
.timeline
.pdus_rev(None, room_id, None)
.pdus_rev(room_id, None)
.try_take(limit)
.and_then(async |mut pdu| {
pdu.1.set_unsigned(body.sender_user.as_deref());
if let Some(sender_user) = body.sender_user.as_deref() {
if let Err(e) = services
.rooms
.pdu_metadata
.add_bundled_aggregations_to_pdu(sender_user, &mut pdu.1)
.await
{
debug_warn!("Failed to add bundled aggregations: {e}");
}
}
Ok(pdu)
})
.try_collect::<Vec<_>>();
let (membership, visibility, state, events) =

View File

@@ -1,11 +1,11 @@
use axum::extract::State;
use axum_client_ip::InsecureClientIp;
use conduwuit::{
Err, Result, debug_warn, trace,
Err, Result, debug, debug_warn, info, trace,
utils::{IterStream, future::TryExtExt},
};
use futures::{
FutureExt, StreamExt,
FutureExt, StreamExt, TryFutureExt,
future::{OptionFuture, join3},
stream::FuturesUnordered,
};
@@ -79,9 +79,15 @@ async fn room_summary_response(
.server_in_room(services.globals.server_name(), room_id)
.await
{
return local_room_summary_response(services, room_id, sender_user)
match local_room_summary_response(services, room_id, sender_user)
.boxed()
.await;
.await
{
| Ok(response) => return Ok(response),
| Err(e) => {
debug_warn!("Failed to get local room summary: {e:?}, falling back to remote");
},
}
}
let room =
@@ -111,26 +117,27 @@ async fn local_room_summary_response(
sender_user: Option<&UserId>,
) -> Result<get_summary::msc3266::Response> {
trace!(?sender_user, "Sending local room summary response for {room_id:?}");
let join_rule = services.rooms.state_accessor.get_join_rules(room_id);
let world_readable = services.rooms.state_accessor.is_world_readable(room_id);
let guest_can_join = services.rooms.state_accessor.guest_can_join(room_id);
let (join_rule, world_readable, guest_can_join) =
join3(join_rule, world_readable, guest_can_join).await;
trace!("{join_rule:?}, {world_readable:?}, {guest_can_join:?}");
user_can_see_summary(
services,
room_id,
&join_rule.clone().into(),
guest_can_join,
world_readable,
join_rule.allowed_rooms(),
sender_user,
let (join_rule, world_readable, guest_can_join) = join3(
services.rooms.state_accessor.get_join_rules(room_id),
services.rooms.state_accessor.is_world_readable(room_id),
services.rooms.state_accessor.guest_can_join(room_id),
)
.await?;
.await;
// Synapse allows server admins to bypass visibility checks.
// That seems neat so we'll copy that behaviour.
if sender_user.is_none() || !services.users.is_admin(sender_user.unwrap()).await {
user_can_see_summary(
services,
room_id,
&join_rule.clone().into(),
guest_can_join,
world_readable,
join_rule.allowed_rooms(),
sender_user,
)
.await?;
}
let canonical_alias = services
.rooms
@@ -231,15 +238,27 @@ async fn remote_room_summary_hierarchy_response(
"Federaton of room {room_id} is currently disabled on this server."
)));
}
if servers.is_empty() {
return Err!(Request(MissingParam(
"No servers were provided to fetch the room over federation"
)));
}
let request = get_hierarchy::v1::Request::new(room_id.to_owned());
let mut requests: FuturesUnordered<_> = servers
.iter()
.map(|server| {
info!("Fetching room summary for {room_id} from server {server}");
services
.sending
.send_federation_request(server, request.clone())
.inspect_ok(move |v| {
debug!("Fetched room summary for {room_id} from server {server}: {v:?}");
})
.inspect_err(move |e| {
info!("Failed to fetch room summary for {room_id} from server {server}: {e}");
})
})
.collect();
@@ -255,23 +274,23 @@ async fn remote_room_summary_hierarchy_response(
continue;
}
return user_can_see_summary(
services,
room_id,
&room.join_rule,
room.guest_can_join,
room.world_readable,
room.allowed_room_ids.iter().map(AsRef::as_ref),
sender_user,
)
.await
.map(|()| room);
if sender_user.is_none() || !services.users.is_admin(sender_user.unwrap()).await {
return user_can_see_summary(
services,
room_id,
&room.join_rule,
room.guest_can_join,
room.world_readable,
room.allowed_room_ids.iter().map(AsRef::as_ref),
sender_user,
)
.await
.map(|()| room);
}
return Ok(room);
}
Err!(Request(NotFound(
"Room is unknown to this server and was unable to fetch over federation with the \
provided servers available"
)))
Err!(Request(NotFound("Room not found or is not accessible")))
}
async fn user_can_see_summary<'a, I>(
@@ -311,21 +330,14 @@ async fn user_can_see_summary<'a, I>(
return Ok(());
}
Err!(Request(Forbidden(
"Room is not world readable, not publicly accessible/joinable, restricted room \
conditions not met, and guest access is forbidden. Not allowed to see details \
of this room."
)))
Err!(Request(Forbidden("Room is not accessible")))
},
| None => {
if is_public_room || world_readable {
return Ok(());
}
Err!(Request(Forbidden(
"Room is not world readable or publicly accessible/joinable, authentication is \
required"
)))
Err!(Request(Forbidden("Room is not accessible")))
},
}
}

View File

@@ -68,6 +68,12 @@ pub(crate) async fn upgrade_room_route(
return Err!(Request(UserSuspended("You cannot perform this action while suspended.")));
}
// Make sure this isn't the admin room
// Admin room upgrades are hacky and should be done manually instead.
if services.admin.is_admin_room(&body.room_id).await {
return Err!(Request(Forbidden("Upgrading the admin room this way is not allowed.")));
}
// First, check if the user has permission to upgrade the room (send tombstone
// event)
let old_room_state_lock = services.rooms.state.mutex.lock(&body.room_id).await;
@@ -266,7 +272,7 @@ pub(crate) async fn upgrade_room_route(
.room_state_keys(&body.room_id, event_type)
.await?;
for state_key in state_keys {
let event_content = match services
let mut event_content = match services
.rooms
.state_accessor
.room_state_get(&body.room_id, event_type, &state_key)
@@ -279,6 +285,21 @@ pub(crate) async fn upgrade_room_route(
// If the event content is empty, we skip it
continue;
}
// If this is a power levels event, and the new room version has creators,
// we need to make sure they dont appear in the users block of power levels.
if *event_type == StateEventType::RoomPowerLevels {
// TODO(v12): additional creators
let creators = vec![sender_user];
let mut power_levels_event_content: RoomPowerLevelsEventContent =
serde_json::from_str(event_content.get()).map_err(|_| {
err!(Request(BadJson("Power levels event content is not valid")))
})?;
for creator in creators {
power_levels_event_content.users.remove(creator);
}
event_content = to_raw_value(&power_levels_event_content)
.expect("event is valid, we just deserialized and modified it");
}
services
.rooms

View File

@@ -2,7 +2,7 @@
use axum::extract::State;
use conduwuit::{
Err, Result, at, is_true,
Err, Result, at, debug_warn, is_true,
matrix::Event,
result::FlatOk,
utils::{IterStream, stream::ReadyExt},
@@ -50,7 +50,7 @@ pub(crate) async fn search_events_route(
Ok(Response {
search_categories: ResultCategories {
room_events: room_events_result
room_events: Box::pin(room_events_result)
.await
.unwrap_or_else(|| Ok(ResultRoomEvents::default()))?,
},
@@ -110,7 +110,12 @@ async fn category_room_events(
limit,
};
let (count, results) = services.rooms.search.search_pdus(&query).await.ok()?;
let (count, results) = services
.rooms
.search
.search_pdus(&query, sender_user)
.await
.ok()?;
results
.collect::<Vec<_>>()
@@ -144,6 +149,17 @@ async fn category_room_events(
.map(at!(2))
.flatten()
.stream()
.then(|mut pdu| async {
if let Err(e) = services
.rooms
.pdu_metadata
.add_bundled_aggregations_to_pdu(sender_user, &mut pdu)
.await
{
debug_warn!("Failed to add bundled aggregations to search result: {e}");
}
pdu
})
.map(Event::into_format)
.map(|result| SearchResult {
rank: None,

View File

@@ -158,7 +158,7 @@ pub(crate) async fn get_state_events_for_key_route(
"content": event.content(),
"event_id": event.event_id(),
"origin_server_ts": event.origin_server_ts(),
"room_id": event.room_id(),
"room_id": event.room_id_or_hash(),
"sender": event.sender(),
"state_key": event.state_key(),
"type": event.kind(),

View File

@@ -4,7 +4,7 @@
use std::collections::VecDeque;
use conduwuit::{
Event, PduCount, Result, err,
Event, PduCount, Result, debug_warn, err,
matrix::pdu::PduEvent,
ref_at, trace,
utils::stream::{BroadbandExt, ReadyExt, TryIgnore},
@@ -53,7 +53,7 @@ async fn load_timeline(
let last_timeline_count = services
.rooms
.timeline
.last_timeline_count(Some(sender_user), room_id)
.last_timeline_count(room_id)
.await
.map_err(|err| {
err!(Database(warn!("Failed to fetch end of room timeline: {}", err)))
@@ -71,13 +71,24 @@ async fn load_timeline(
services
.rooms
.timeline
.pdus_rev(
Some(sender_user),
room_id,
ending_count.map(|count| count.saturating_add(1)),
)
.pdus_rev(room_id, ending_count.map(|count| count.saturating_add(1)))
.ignore_err()
.ready_take_while(move |&(pducount, _)| pducount > starting_count)
.map(move |mut pdu| {
pdu.1.set_unsigned(Some(sender_user));
pdu
})
.then(async move |mut pdu| {
if let Err(e) = services
.rooms
.pdu_metadata
.add_bundled_aggregations_to_pdu(sender_user, &mut pdu.1)
.await
{
debug_warn!("Failed to add bundled aggregations: {e}");
}
pdu
})
.boxed()
},
| None => {
@@ -86,12 +97,23 @@ async fn load_timeline(
services
.rooms
.timeline
.pdus_rev(
Some(sender_user),
room_id,
ending_count.map(|count| count.saturating_add(1)),
)
.pdus_rev(room_id, ending_count.map(|count| count.saturating_add(1)))
.ignore_err()
.map(move |mut pdu| {
pdu.1.set_unsigned(Some(sender_user));
pdu
})
.then(async move |mut pdu| {
if let Err(e) = services
.rooms
.pdu_metadata
.add_bundled_aggregations_to_pdu(sender_user, &mut pdu.1)
.await
{
debug_warn!("Failed to add bundled aggregations: {e}");
}
pdu
})
.boxed()
},
};

View File

@@ -127,7 +127,7 @@ pub(super) async fn build_state_incremental<'a>(
let last_pdu_of_last_sync = services
.rooms
.timeline
.pdus_rev(Some(sender_user), room_id, Some(last_sync_end_count.saturating_add(1)))
.pdus_rev(room_id, Some(last_sync_end_count.saturating_add(1)))
.boxed()
.next()
.await

View File

@@ -1,6 +1,6 @@
use axum::extract::State;
use conduwuit::{
Result, at,
Result, at, debug_warn,
matrix::{
Event,
pdu::{PduCount, PduEvent},
@@ -45,6 +45,17 @@ pub(crate) async fn get_threads_route(
.await
.then_some((count, pdu))
})
.then(|(count, mut pdu)| async move {
if let Err(e) = services
.rooms
.pdu_metadata
.add_bundled_aggregations_to_pdu(body.sender_user(), &mut pdu)
.await
{
debug_warn!("Failed to add bundled aggregations to thread: {e}");
}
(count, pdu)
})
.collect()
.await;

View File

@@ -1,6 +1,5 @@
use axum::{Json, extract::State, response::IntoResponse};
use conduwuit::{Error, Result};
use futures::StreamExt;
use ruma::api::client::{
discovery::{
discover_homeserver::{self, HomeserverInfo, SlidingSyncProxyInfo},
@@ -71,21 +70,18 @@ pub(crate) async fn well_known_support(
// Try to add admin users as contacts if no contacts are configured
if contacts.is_empty() {
if let Ok(admin_room) = services.admin.get_admin_room().await {
let admin_users = services.rooms.state_cache.room_members(&admin_room);
let mut stream = admin_users;
let admin_users = services.admin.get_admins().await;
while let Some(user_id) = stream.next().await {
// Skip server user
if *user_id == services.globals.server_user {
continue;
}
contacts.push(Contact {
role: role_value.clone(),
email_address: None,
matrix_id: Some(user_id.to_owned()),
});
for user_id in &admin_users {
if *user_id == services.globals.server_user {
continue;
}
contacts.push(Contact {
role: role_value.clone(),
email_address: None,
matrix_id: Some(user_id.to_owned()),
});
}
}

View File

@@ -3,6 +3,7 @@
use axum::extract::State;
use conduwuit::{
Event, PduCount, Result,
result::LogErr,
utils::{IterStream, ReadyExt, stream::TryTools},
};
use futures::{FutureExt, StreamExt, TryStreamExt};
@@ -62,7 +63,7 @@ pub(crate) async fn get_backfill_route(
pdus: services
.rooms
.timeline
.pdus_rev(None, &body.room_id, Some(from.saturating_add(1)))
.pdus_rev(&body.room_id, Some(from.saturating_add(1)))
.try_take(limit)
.try_filter_map(|(_, pdu)| async move {
Ok(services
@@ -72,6 +73,15 @@ pub(crate) async fn get_backfill_route(
.await
.then_some(pdu))
})
.and_then(async |mut pdu| {
// Strip the transaction ID, as that is private
pdu.remove_transaction_id().log_err().ok();
// Add age, as this is specified
pdu.add_age().log_err().ok();
// It's not clear if we should strip or add any more data, leave as is.
// In particular: Redaction?
Ok(pdu)
})
.try_filter_map(|pdu| async move {
Ok(services
.rooms

View File

@@ -10,6 +10,7 @@
use ruma::{
CanonicalJsonValue, OwnedUserId, UserId,
api::{client::error::ErrorKind, federation::membership::create_invite},
events::room::member::{MembershipState, RoomMemberEventContent},
serde::JsonObject,
};
@@ -60,6 +61,46 @@ pub(crate) async fn create_invite_route(
let mut signed_event = utils::to_canonical_object(&body.event)
.map_err(|_| err!(Request(InvalidParam("Invite event is invalid."))))?;
// Ensure this is a membership event
if signed_event
.get("type")
.expect("event must have a type")
.as_str()
.expect("type must be a string")
!= "m.room.member"
{
return Err!(Request(BadJson(
"Not allowed to send non-membership event to invite endpoint."
)));
}
let content: RoomMemberEventContent = serde_json::from_value(
signed_event
.get("content")
.ok_or_else(|| err!(Request(BadJson("Event missing content property"))))?
.clone()
.into(),
)
.map_err(|e| err!(Request(BadJson(warn!("Event content is empty or invalid: {e}")))))?;
// Ensure this is an invite membership event
if content.membership != MembershipState::Invite {
return Err!(Request(BadJson(
"Not allowed to send a non-invite membership event to invite endpoint."
)));
}
// Ensure the sending user isn't a lying bozo
let sender_server = signed_event
.get("sender")
.try_into()
.map(UserId::server_name)
.map_err(|e| err!(Request(InvalidParam("Invalid sender property: {e}"))))?;
if sender_server != body.origin() {
return Err!(Request(Forbidden("Sender's server does not match the origin server.",)));
}
// Ensure the target user belongs to this server
let recipient_user: OwnedUserId = signed_event
.get("state_key")
.try_into()

View File

@@ -1,3 +1,5 @@
use std::time::Duration;
use axum::extract::State;
use conduwuit::{Error, Result};
use futures::{FutureExt, StreamExt, TryFutureExt};
@@ -96,6 +98,7 @@ pub(crate) async fn get_keys_route(
&body.device_keys,
|u| Some(u.server_name()) == body.origin.as_deref(),
services.globals.allow_device_name_federation(),
Duration::from_secs(0),
)
.await?;
@@ -124,7 +127,8 @@ pub(crate) async fn claim_keys_route(
));
}
let result = claim_keys_helper(&services, &body.one_time_keys).await?;
let result =
claim_keys_helper(&services, &body.one_time_keys, Duration::from_secs(0)).await?;
Ok(claim_keys::v1::Response { one_time_keys: result.one_time_keys })
}

View File

@@ -6,7 +6,7 @@
use std::{
collections::{BTreeMap, BTreeSet},
net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr},
path::{Path, PathBuf},
path::PathBuf,
};
use conduwuit_macros::config_example_generator;
@@ -53,9 +53,13 @@
### For more information, see:
### https://continuwuity.org/configuration.html
"#,
ignore = "catchall well_known tls blurhashing allow_invalid_tls_certificates_yes_i_know_what_the_fuck_i_am_doing_with_this_and_i_know_this_is_insecure"
ignore = "config_paths catchall well_known tls blurhashing allow_invalid_tls_certificates_yes_i_know_what_the_fuck_i_am_doing_with_this_and_i_know_this_is_insecure"
)]
pub struct Config {
// Paths to config file(s). Not supposed to be set manually in the config file,
// only updated dynamically from the --config option given at runtime.
pub config_paths: Option<Vec<PathBuf>>,
/// The server_name is the pretty name of this server. It is used as a
/// suffix for user and room IDs/aliases.
///
@@ -65,8 +69,8 @@ pub struct Config {
/// Also see the `[global.well_known]` config section at the very bottom.
///
/// Examples of delegation:
/// - https://puppygock.gay/.well-known/matrix/server
/// - https://puppygock.gay/.well-known/matrix/client
/// - https://continuwuity.org/.well-known/matrix/server
/// - https://continuwuity.org/.well-known/matrix/client
///
/// YOU NEED TO EDIT THIS. THIS CANNOT BE CHANGED AFTER WITHOUT A DATABASE
/// WIPE.
@@ -496,7 +500,19 @@ pub struct Config {
#[serde(default = "default_appservice_idle_timeout")]
pub appservice_idle_timeout: u64,
/// Notification gateway pusher idle connection pool timeout.
/// Notification gateway pusher request connection timeout (seconds).
///
/// default: 15
#[serde(default = "default_pusher_conn_timeout")]
pub pusher_conn_timeout: u64,
/// Notification gateway pusher total request timeout (seconds).
///
/// default: 60
#[serde(default = "default_pusher_timeout")]
pub pusher_timeout: u64,
/// Notification gateway pusher idle connection pool timeout (seconds).
///
/// default: 15
#[serde(default = "default_pusher_idle_timeout")]
@@ -703,10 +719,13 @@ pub struct Config {
pub allow_unstable_room_versions: bool,
/// Default room version continuwuity will create rooms with.
/// Note that this has to be a string since the room version is a string
/// rather than an integer. Forgetting the quotes will make the server fail
/// to start!
///
/// Per spec, room version 11 is the default.
/// Per spec, room version "11" is the default.
///
/// default: 11
/// default: "11"
#[serde(default = "default_default_room_version")]
pub default_room_version: RoomVersionId,
@@ -730,6 +749,13 @@ pub struct Config {
#[serde(default = "default_otlp_filter", alias = "jaeger_filter")]
pub otlp_filter: String,
/// Protocol to use for OTLP tracing export. Options are "http" or "grpc".
/// The HTTP protocol uses port 4318 by default, while gRPC uses port 4317.
///
/// default: "http"
#[serde(default = "default_otlp_protocol")]
pub otlp_protocol: String,
/// If the 'perf_measurements' compile-time feature is enabled, enables
/// collecting folded stack trace profile of tracing spans using
/// tracing_flame. The resulting profile can be visualized with inferno[1],
@@ -1224,6 +1250,12 @@ pub struct Config {
#[serde(default)]
pub rocksdb_repair: bool,
#[serde(default)]
pub rocksdb_read_only: bool,
#[serde(default)]
pub rocksdb_secondary: bool,
/// Enables idle CPU priority for compaction thread. This is not enabled by
/// default to prevent compaction from falling too far behind on busy
/// systems.
@@ -1650,6 +1682,13 @@ pub struct Config {
#[serde(default = "default_url_preview_max_spider_size")]
pub url_preview_max_spider_size: usize,
/// Total request timeout for URL previews (seconds). This includes
/// connection, request, and response body reading time.
///
/// default: 120
#[serde(default = "default_url_preview_timeout")]
pub url_preview_timeout: u64,
/// Option to decide whether you would like to run the domain allowlist
/// checks (contains and explicit) on the root domain or not. Does not apply
/// to URL contains allowlist. Defaults to false.
@@ -1739,7 +1778,7 @@ pub struct Config {
/// a normal continuwuity admin command. The reply will be publicly visible
/// to the room, originating from the sender.
///
/// example: \\!admin debug ping puppygock.gay
/// example: \\!admin debug ping continuwuity.org
#[serde(default = "true_fn")]
pub admin_escape_commands: bool,
@@ -1757,7 +1796,8 @@ pub struct Config {
/// For example: `./continuwuity --execute "server admin-notice continuwuity
/// has started up at $(date)"`
///
/// example: admin_execute = ["debug ping puppygock.gay", "debug echo hi"]`
/// example: admin_execute = ["debug ping continuwuity.org", "debug echo
/// hi"]`
///
/// default: []
#[serde(default)]
@@ -1798,6 +1838,22 @@ pub struct Config {
#[serde(default = "default_admin_room_tag")]
pub admin_room_tag: String,
/// A list of Matrix IDs that are qualified as server admins.
///
/// Any Matrix IDs within this list are regarded as an admin
/// regardless of whether they are in the admin room or not
///
/// default: []
#[serde(default)]
pub admins_list: Vec<OwnedUserId>,
/// Defines whether those within the admin room are added to the
/// admins_list.
///
/// default: true
#[serde(default = "true_fn")]
pub admins_from_room: bool,
/// Sentry.io crash/panic reporting, performance monitoring/metrics, etc.
/// This is NOT enabled by default.
#[serde(default)]
@@ -2217,26 +2273,24 @@ struct ListeningAddr {
impl Config {
/// Pre-initialize config
pub fn load<'a, I>(paths: I) -> Result<Figment>
where
I: Iterator<Item = &'a Path>,
{
pub fn load(paths: &[PathBuf]) -> Result<Figment> {
let envs = [
Env::var("CONDUIT_CONFIG"),
Env::var("CONDUWUIT_CONFIG"),
Env::var("CONTINUWUITY_CONFIG"),
];
let config = envs
let mut config = envs
.into_iter()
.flatten()
.map(Toml::file)
.chain(paths.map(Toml::file))
.chain(paths.iter().cloned().map(Toml::file))
.fold(Figment::new(), |config, file| config.merge(file.nested()))
.merge(Env::prefixed("CONDUIT_").global().split("__"))
.merge(Env::prefixed("CONDUWUIT_").global().split("__"))
.merge(Env::prefixed("CONTINUWUITY_").global().split("__"));
config = config.join(("config_paths", paths));
Ok(config)
}
@@ -2389,6 +2443,10 @@ fn default_appservice_timeout() -> u64 { 35 }
fn default_appservice_idle_timeout() -> u64 { 300 }
fn default_pusher_conn_timeout() -> u64 { 15 }
fn default_pusher_timeout() -> u64 { 60 }
fn default_pusher_idle_timeout() -> u64 { 15 }
fn default_max_fetch_prev_events() -> u16 { 192_u16 }
@@ -2407,6 +2465,8 @@ fn default_otlp_filter() -> String {
.to_owned()
}
fn default_otlp_protocol() -> String { "http".to_owned() }
fn default_tracing_flame_output_path() -> String { "./tracing.folded".to_owned() }
fn default_trusted_servers() -> Vec<OwnedServerName> {
@@ -2514,6 +2574,8 @@ fn default_url_preview_max_spider_size() -> usize {
256_000 // 256KB
}
fn default_url_preview_timeout() -> u64 { 120 }
fn default_new_user_displayname_suffix() -> String { "🏳️‍⚧️".to_owned() }
fn default_sentry_endpoint() -> Option<Url> { None }

View File

@@ -14,11 +14,12 @@
RoomVersionId::V9,
RoomVersionId::V10,
RoomVersionId::V11,
RoomVersionId::V12,
];
/// Experimental, partially supported room versions
pub const UNSTABLE_ROOM_VERSIONS: &[RoomVersionId] =
&[RoomVersionId::V3, RoomVersionId::V4, RoomVersionId::V5, RoomVersionId::V12];
&[RoomVersionId::V3, RoomVersionId::V4, RoomVersionId::V5];
type RoomVersion = (RoomVersionId, RoomVersionStability);

View File

@@ -56,7 +56,7 @@ fn from(event: Ref<'a, E>) -> Self {
"content": content,
"event_id": event.event_id(),
"origin_server_ts": event.origin_server_ts(),
"room_id": event.room_id(),
"room_id": event.room_id_or_hash(),
"sender": event.sender(),
"type": event.kind(),
});
@@ -117,7 +117,7 @@ fn from(event: Ref<'a, E>) -> Self {
"content": event.content(),
"event_id": event.event_id(),
"origin_server_ts": event.origin_server_ts(),
"room_id": event.room_id(),
"room_id": event.room_id_or_hash(),
"sender": event.sender(),
"state_key": event.state_key(),
"type": event.kind(),

View File

@@ -1,10 +1,23 @@
use std::collections::BTreeMap;
use std::{borrow::Borrow, collections::BTreeMap};
use ruma::MilliSecondsSinceUnixEpoch;
use serde_json::value::{RawValue as RawJsonValue, Value as JsonValue, to_raw_value};
use super::Pdu;
use crate::{Result, err, implement};
use crate::{Result, err, implement, result::LogErr};
/// Set the `unsigned` field of the PDU using only information in the PDU.
/// Some unsigned data is already set within the database (eg. prev events,
/// threads). Once this is done, other data must be calculated from the database
/// (eg. relations) This is for server-to-client events.
/// Backfill handles this itself.
#[implement(Pdu)]
pub fn set_unsigned(&mut self, user_id: Option<&ruma::UserId>) {
if Some(self.sender.borrow()) != user_id {
self.remove_transaction_id().log_err().ok();
}
self.add_age().log_err().ok();
}
#[implement(Pdu)]
pub fn remove_transaction_id(&mut self) -> Result {

View File

@@ -558,12 +558,19 @@ pub async fn auth_check<E, F, Fut>(
// If type is m.room.power_levels
if *incoming_event.event_type() == TimelineEventType::RoomPowerLevels {
debug!("starting m.room.power_levels check");
let mut creators = BTreeSet::new();
if room_version.explicitly_privilege_room_creators {
creators.insert(create_event.sender().to_owned());
for creator in room_create_content.additional_creators.iter().flatten() {
creators.insert(creator.deserialize()?);
}
}
match check_power_levels(
room_version,
incoming_event,
power_levels_event.as_ref(),
sender_power_level,
&creators,
) {
| Some(required_pwr_lvl) =>
if !required_pwr_lvl {
@@ -1221,8 +1228,8 @@ fn check_power_levels(
power_event: &impl Event,
previous_power_event: Option<&impl Event>,
user_level: Int,
creators: &BTreeSet<OwnedUserId>,
) -> Option<bool> {
// TODO(hydra): This function does not care about creators!
match power_event.state_key() {
| Some("") => {},
| Some(key) => {
@@ -1287,6 +1294,10 @@ fn check_power_levels(
for user in user_levels_to_check {
let old_level = old_state.users.get(user);
let new_level = new_state.users.get(user);
if new_level.is_some() && creators.contains(user) {
warn!("creators cannot appear in the users list of m.room.power_levels");
return Some(false); // cannot alter creator power level
}
if old_level.is_some() && new_level.is_some() && old_level == new_level {
continue;
}

View File

@@ -19,7 +19,7 @@
use conduwuit::{Err, Result, debug, info, warn};
use rocksdb::{
AsColumnFamilyRef, BoundColumnFamily, DBCommon, MultiThreaded, OptimisticTransactionDB,
AsColumnFamilyRef, BoundColumnFamily, DBCommon, DBWithThreadMode, MultiThreaded,
WaitForCompactOptions,
};
@@ -33,11 +33,13 @@ pub struct Engine {
pub(crate) db: Db,
pub(crate) pool: Arc<Pool>,
pub(crate) ctx: Arc<Context>,
pub(super) read_only: bool,
pub(super) secondary: bool,
pub(crate) checksums: bool,
corks: AtomicU32,
}
pub(crate) type Db = OptimisticTransactionDB<MultiThreaded>;
pub(crate) type Db = DBWithThreadMode<MultiThreaded>;
impl Engine {
#[tracing::instrument(
@@ -127,6 +129,14 @@ pub fn current_sequence(&self) -> u64 {
sequence
}
#[inline]
#[must_use]
pub fn is_read_only(&self) -> bool { self.secondary || self.read_only }
#[inline]
#[must_use]
pub fn is_secondary(&self) -> bool { self.secondary }
}
impl Drop for Engine {

View File

@@ -12,8 +12,9 @@ pub fn backup(&self) -> Result {
let mut engine = self.backup_engine()?;
let config = &self.ctx.server.config;
if config.database_backups_to_keep > 0 {
let flush = !self.is_read_only();
engine
.create_new_backup_flush(&self.db, true)
.create_new_backup_flush(&self.db, flush)
.map_err(map_err)?;
let engine_info = engine.get_backup_info();

View File

@@ -1,7 +1,7 @@
use std::fmt::Write;
use conduwuit::{Result, implement};
use rocksdb::perf::MemoryUsageBuilder;
use rocksdb::perf::get_memory_usage_stats;
use super::Engine;
use crate::or_else;
@@ -9,21 +9,16 @@
#[implement(Engine)]
pub fn memory_usage(&self) -> Result<String> {
let mut res = String::new();
let mut builder = MemoryUsageBuilder::new().or_else(or_else)?;
builder.add_db(&self.db);
builder.add_cache(&self.ctx.row_cache.lock());
let usage = builder.build().or_else(or_else)?;
let stats = get_memory_usage_stats(Some(&[&self.db]), Some(&[&*self.ctx.row_cache.lock()]))
.or_else(or_else)?;
let mibs = |input| f64::from(u32::try_from(input / 1024).unwrap_or(0)) / 1024.0;
writeln!(
res,
"Memory buffers: {:.2} MiB\nPending write: {:.2} MiB\nTable readers: {:.2} MiB\nRow \
cache: {:.2} MiB",
mibs(usage.approximate_mem_table_total()),
mibs(usage.approximate_mem_table_unflushed()),
mibs(usage.approximate_mem_table_readers_total()),
mibs(stats.mem_table_total),
mibs(stats.mem_table_unflushed),
mibs(stats.mem_table_readers_total),
mibs(u64::try_from(self.ctx.row_cache.lock().get_usage())?),
)?;

View File

@@ -35,7 +35,14 @@ pub(crate) async fn open(ctx: Arc<Context>, desc: &[Descriptor]) -> Result<Arc<S
}
debug!("Opening database...");
let db = Db::open_cf_descriptors(&db_opts, path, cfds).or_else(or_else)?;
let db = if config.rocksdb_read_only {
Db::open_cf_descriptors_read_only(&db_opts, path, cfds, false)
} else if config.rocksdb_secondary {
Db::open_cf_descriptors_as_secondary(&db_opts, path, path, cfds)
} else {
Db::open_cf_descriptors(&db_opts, path, cfds)
}
.or_else(or_else)?;
info!(
columns = num_cfds,
@@ -48,6 +55,8 @@ pub(crate) async fn open(ctx: Arc<Context>, desc: &[Descriptor]) -> Result<Arc<S
db,
pool: ctx.pool.clone(),
ctx: ctx.clone(),
read_only: config.rocksdb_read_only,
secondary: config.rocksdb_secondary,
checksums: config.rocksdb_checksums,
corks: AtomicU32::new(0),
}))

View File

@@ -219,7 +219,7 @@ pub fn insert_batch<'a, I, K, V>(&'a self, iter: I)
K: AsRef<[u8]> + Sized + Debug + 'a,
V: AsRef<[u8]> + Sized + 'a,
{
let mut batch = WriteBatchWithTransaction::<true>::default();
let mut batch = WriteBatchWithTransaction::<false>::default();
for (key, val) in iter {
batch.put_cf(&self.cf(), key.as_ref(), val.as_ref());
}

View File

@@ -77,6 +77,14 @@ pub fn iter(&self) -> impl Iterator<Item = (&MapsKey, &MapsVal)> + Send + '_ {
#[inline]
pub fn keys(&self) -> impl Iterator<Item = &MapsKey> + Send + '_ { self.maps.keys() }
#[inline]
#[must_use]
pub fn is_read_only(&self) -> bool { self.db.is_read_only() }
#[inline]
#[must_use]
pub fn is_secondary(&self) -> bool { self.db.is_secondary() }
}
impl Index<&str> for Database {

View File

@@ -63,15 +63,16 @@ standard = [
"systemd",
"url_preview",
"zstd_compression",
"sentry_telemetry"
"sentry_telemetry",
"otlp_telemetry",
"console",
]
full = [
"standard",
# "hardened_malloc", # Conflicts with jemalloc
"jemalloc_prof",
"perf_measurements",
"tokio_console"
# sentry_telemetry
"tokio_console",
]
blurhashing = [
@@ -124,12 +125,15 @@ ldap = [
media_thumbnail = [
"conduwuit-service/media_thumbnail",
]
perf_measurements = [
otlp_telemetry = [
"dep:opentelemetry",
"dep:tracing-flame",
"dep:tracing-opentelemetry",
"dep:opentelemetry_sdk",
"dep:opentelemetry-otlp",
]
perf_measurements = [
"dep:tracing-flame",
"otlp_telemetry",
"conduwuit-core/perf_measurements",
"conduwuit-core/sentry_telemetry",
]

View File

@@ -7,8 +7,10 @@
log::{ConsoleFormat, ConsoleWriter, LogLevelReloadHandles, capture, fmt_span},
result::UnwrapOrErr,
};
#[cfg(feature = "perf_measurements")]
#[cfg(feature = "otlp_telemetry")]
use opentelemetry::trace::TracerProvider;
#[cfg(feature = "otlp_telemetry")]
use opentelemetry_otlp::WithExportConfig;
use tracing_subscriber::{EnvFilter, Layer, Registry, fmt, layer::SubscriberExt, reload};
#[cfg(feature = "perf_measurements")]
@@ -70,6 +72,57 @@ pub(crate) fn init(
subscriber.with(sentry_layer.with_filter(sentry_reload_filter))
};
#[cfg(feature = "otlp_telemetry")]
let subscriber = {
let otlp_filter = EnvFilter::try_new(&config.otlp_filter)
.map_err(|e| err!(Config("otlp_filter", "{e}.")))?;
let otlp_layer = config.allow_otlp.then(|| {
opentelemetry::global::set_text_map_propagator(
opentelemetry_sdk::propagation::TraceContextPropagator::new(),
);
let exporter = match config.otlp_protocol.as_str() {
| "grpc" => opentelemetry_otlp::SpanExporter::builder()
.with_tonic()
.with_protocol(opentelemetry_otlp::Protocol::Grpc)
.build()
.expect("Failed to create OTLP gRPC exporter"),
| "http" => opentelemetry_otlp::SpanExporter::builder()
.with_http()
.build()
.expect("Failed to create OTLP HTTP exporter"),
| protocol => {
debug_warn!(
"Invalid OTLP protocol '{}', falling back to HTTP. Valid options are \
'http' or 'grpc'.",
protocol
);
opentelemetry_otlp::SpanExporter::builder()
.with_http()
.build()
.expect("Failed to create OTLP HTTP exporter")
},
};
let provider = opentelemetry_sdk::trace::SdkTracerProvider::builder()
.with_batch_exporter(exporter)
.build();
let tracer = provider.tracer(conduwuit_core::name());
let telemetry = tracing_opentelemetry::layer().with_tracer(tracer);
let (otlp_reload_filter, otlp_reload_handle) =
reload::Layer::new(otlp_filter.clone());
reload_handles.add("otlp", Box::new(otlp_reload_handle));
Some(telemetry.with_filter(otlp_reload_filter))
});
subscriber.with(otlp_layer)
};
#[cfg(feature = "perf_measurements")]
let (subscriber, flame_guard) = {
let (flame_layer, flame_guard) = if config.tracing_flame {
@@ -89,35 +142,7 @@ pub(crate) fn init(
(None, None)
};
let otlp_filter = EnvFilter::try_new(&config.otlp_filter)
.map_err(|e| err!(Config("otlp_filter", "{e}.")))?;
let otlp_layer = config.allow_otlp.then(|| {
opentelemetry::global::set_text_map_propagator(
opentelemetry_sdk::propagation::TraceContextPropagator::new(),
);
let exporter = opentelemetry_otlp::SpanExporter::builder()
.with_http()
.build()
.expect("Failed to create OTLP exporter");
let provider = opentelemetry_sdk::trace::SdkTracerProvider::builder()
.with_batch_exporter(exporter)
.build();
let tracer = provider.tracer(conduwuit_core::name());
let telemetry = tracing_opentelemetry::layer().with_tracer(tracer);
let (otlp_reload_filter, otlp_reload_handle) =
reload::Layer::new(otlp_filter.clone());
reload_handles.add("otlp", Box::new(otlp_reload_handle));
Some(telemetry.with_filter(otlp_reload_filter))
});
let subscriber = subscriber.with(flame_layer).with(otlp_layer);
let subscriber = subscriber.with(flame_layer);
(subscriber, flame_guard)
};

View File

@@ -1,4 +1,4 @@
use std::{path::PathBuf, sync::Arc};
use std::sync::Arc;
use conduwuit_core::{
Error, Result,
@@ -38,14 +38,9 @@ pub(crate) fn new(
) -> Result<Arc<Self>, Error> {
let _runtime_guard = runtime.map(runtime::Handle::enter);
let config_paths = args
.config
.as_deref()
.into_iter()
.flat_map(<[_]>::iter)
.map(PathBuf::as_path);
let config_paths = args.config.clone().unwrap_or_default();
let config = Config::load(config_paths)
let config = Config::load(&config_paths)
.and_then(|raw| update(raw, args))
.and_then(|raw| Config::new(&raw))?;

View File

@@ -66,7 +66,10 @@ pub(crate) fn build(services: &Arc<Services>) -> Result<(Router, Guard)> {
.layer(RequestBodyTimeoutLayer::new(Duration::from_secs(
server.config.client_receive_timeout,
)))
.layer(TimeoutLayer::with_status_code(StatusCode::REQUEST_TIMEOUT, Duration::from_secs(server.config.client_request_timeout)))
.layer(TimeoutLayer::with_status_code(
StatusCode::REQUEST_TIMEOUT,
Duration::from_secs(server.config.client_request_timeout),
))
.layer(SetResponseHeaderLayer::if_not_present(
HeaderName::from_static("origin-agent-cluster"), // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Origin-Agent-Cluster
HeaderValue::from_static("?1"),

View File

@@ -9,7 +9,10 @@
use termimad::MadSkin;
use tokio::task::JoinHandle;
use crate::{Dep, admin};
use crate::{
Dep,
admin::{self, InvocationSource},
};
pub struct Console {
server: Arc<Server>,
@@ -160,7 +163,11 @@ async fn handle(self: Arc<Self>, line: String) {
}
async fn process(self: Arc<Self>, line: String) {
match self.admin.command_in_place(line, None).await {
match self
.admin
.command_in_place(line, None, InvocationSource::Console)
.await
{
| Ok(Some(ref content)) => self.output(content),
| Err(ref content) => self.output_err(content),
| _ => unreachable!(),

View File

@@ -2,6 +2,8 @@
use ruma::events::room::message::RoomMessageEventContent;
use tokio::time::{Duration, sleep};
use crate::admin::InvocationSource;
pub(super) const SIGNAL: &str = "SIGUSR2";
/// Possibly spawn the terminal console at startup if configured.
@@ -88,7 +90,10 @@ pub(super) async fn signal_execute(&self) -> Result {
async fn execute_command(&self, i: usize, command: String) -> Result {
debug!("Execute command #{i}: executing {command:?}");
match self.command_in_place(command, None).await {
match self
.command_in_place(command, None, InvocationSource::Console)
.await
{
| Ok(Some(output)) => Self::execute_command_output(i, &output),
| Err(output) => Self::execute_command_error(i, &output),
| Ok(None) => {

View File

@@ -1,6 +1,8 @@
use std::collections::BTreeMap;
use conduwuit::{Err, Result, debug_info, debug_warn, error, implement, matrix::pdu::PduBuilder};
use conduwuit::{
Err, Result, debug_info, debug_warn, error, implement, matrix::pdu::PduBuilder, warn,
};
use ruma::{
RoomId, UserId,
events::{
@@ -176,6 +178,19 @@ async fn set_room_tag(&self, room_id: &RoomId, user_id: &UserId, tag: &str) -> R
pub async fn revoke_admin(&self, user_id: &UserId) -> Result {
use MembershipState::{Invite, Join, Knock, Leave};
if self
.services
.server
.config
.admins_list
.contains(&user_id.to_owned())
{
warn!(
"Revoking the admin status of {user_id} will not work correctly as they are within \
the admins_list config."
);
}
let Ok(room_id) = self.get_admin_room().await else {
return Err!(error!("No admin room available or created."));
};

View File

@@ -14,10 +14,10 @@
Error, Event, Result, Server, debug, err, error, error::default_log, pdu::PduBuilder,
};
pub use create::create_admin_room;
use futures::{Future, FutureExt, TryFutureExt};
use futures::{Future, FutureExt, StreamExt, TryFutureExt};
use loole::{Receiver, Sender};
use ruma::{
Mxc, OwnedEventId, OwnedMxcUri, OwnedRoomId, RoomId, UInt, UserId,
Mxc, OwnedEventId, OwnedMxcUri, OwnedRoomId, OwnedUserId, RoomId, UInt, UserId,
events::{
Mentions,
room::{
@@ -54,15 +54,37 @@ struct Services {
media: Dep<crate::media::Service>,
}
/// Inputs to a command are a multi-line string, optional reply_id, and optional
/// sender.
/// Inputs to a command are a multi-line string, invocation source, optional
/// reply_id, and optional sender.
#[derive(Debug)]
pub struct CommandInput {
pub command: String,
pub reply_id: Option<OwnedEventId>,
pub source: InvocationSource,
pub sender: Option<Box<UserId>>,
}
/// Where a command is being invoked from.
#[derive(Debug, Clone, Copy)]
pub enum InvocationSource {
/// The server's private admin room
AdminRoom,
/// An escaped `\!admin` command in a public room
EscapedCommand,
/// The server's admin console
Console,
/// Some other trusted internal source
Internal,
}
impl InvocationSource {
/// Returns whether this invocation source allows "restricted"
/// commands, i.e. ones that could be potentially dangerous if executed by
/// an attacker or in a public room.
#[must_use]
pub fn allows_restricted(&self) -> bool { !matches!(self, Self::EscapedCommand) }
}
/// Prototype of the tab-completer. The input is buffered text when tab
/// asserted; the output will fully replace the input buffer.
pub type Completer = fn(&str) -> String;
@@ -276,10 +298,15 @@ pub async fn text_to_file(&self, body: &str) -> Result<OwnedMxcUri> {
/// Posts a command to the command processor queue and returns. Processing
/// will take place on the service worker's task asynchronously. Errors if
/// the queue is full.
pub fn command(&self, command: String, reply_id: Option<OwnedEventId>) -> Result<()> {
pub fn command(
&self,
command: String,
reply_id: Option<OwnedEventId>,
source: InvocationSource,
) -> Result<()> {
self.channel
.0
.send(CommandInput { command, reply_id, sender: None })
.send(CommandInput { command, reply_id, source, sender: None })
.map_err(|e| err!("Failed to enqueue admin command: {e:?}"))
}
@@ -290,11 +317,17 @@ pub fn command_with_sender(
&self,
command: String,
reply_id: Option<OwnedEventId>,
source: InvocationSource,
sender: Box<UserId>,
) -> Result<()> {
self.channel
.0
.send(CommandInput { command, reply_id, sender: Some(sender) })
.send(CommandInput {
command,
reply_id,
source,
sender: Some(sender),
})
.map_err(|e| err!("Failed to enqueue admin command: {e:?}"))
}
@@ -304,8 +337,9 @@ pub async fn command_in_place(
&self,
command: String,
reply_id: Option<OwnedEventId>,
source: InvocationSource,
) -> ProcessorResult {
self.process_command(CommandInput { command, reply_id, sender: None })
self.process_command(CommandInput { command, reply_id, source, sender: None })
.await
}
@@ -349,16 +383,50 @@ async fn process_command(&self, command: CommandInput) -> ProcessorResult {
handle(services, command).await
}
/// Returns the list of admins for this server. First loads
/// the admin_list from the configuration, then adds users from
/// the admin room if applicable.
pub async fn get_admins(&self) -> Vec<OwnedUserId> {
let mut generated_admin_list: Vec<OwnedUserId> =
self.services.server.config.admins_list.clone();
if self.services.server.config.admins_from_room {
if let Ok(admin_room) = self.get_admin_room().await {
let admin_users = self.services.state_cache.room_members(&admin_room);
let mut stream = admin_users;
while let Some(user_id) = stream.next().await {
generated_admin_list.push(user_id.to_owned());
}
}
}
generated_admin_list
}
/// Checks whether a given user is an admin of this server
pub async fn user_is_admin(&self, user_id: &UserId) -> bool {
let Ok(admin_room) = self.get_admin_room().await else {
return false;
};
if self
.services
.server
.config
.admins_list
.contains(&user_id.to_owned())
{
return true;
}
self.services
.state_cache
.is_joined(user_id, &admin_room)
.await
if self.services.server.config.admins_from_room {
if let Ok(admin_room) = self.get_admin_room().await {
return self
.services
.state_cache
.is_joined(user_id, &admin_room)
.await;
}
}
false
}
/// Gets the room ID of the admin room
@@ -459,59 +527,59 @@ async fn handle_response_error(
Ok(())
}
pub async fn is_admin_command<E>(&self, event: &E, body: &str) -> bool
pub async fn is_admin_command<E>(&self, event: &E, body: &str) -> Option<InvocationSource>
where
E: Event + Send + Sync,
{
// Server-side command-escape with public echo
let is_escape = body.starts_with('\\');
let is_public_escape = is_escape && body.trim_start_matches('\\').starts_with("!admin");
// Admin command with public echo (in admin room)
let server_user = &self.services.globals.server_user;
let is_public_prefix =
body.starts_with("!admin") || body.starts_with(server_user.as_str());
// Expected backward branch
if !is_public_escape && !is_public_prefix {
return false;
}
let user_is_local = self.services.globals.user_is_local(event.sender());
// only allow public escaped commands by local admins
if is_public_escape && !user_is_local {
return false;
}
// Check if server-side command-escape is disabled by configuration
if is_public_escape && !self.services.server.config.admin_escape_commands {
return false;
}
// Prevent unescaped !admin from being used outside of the admin room
if event.room_id().is_some()
&& is_public_prefix
&& !self.is_admin_room(event.room_id().unwrap()).await
{
return false;
}
// Only senders who are admin can proceed
// If the user isn't an admin they definitely can't run admin commands
if !self.user_is_admin(event.sender()).await {
return false;
return None;
}
// This will evaluate to false if the emergency password is set up so that
// the administrator can execute commands as the server user
let emergency_password_set = self.services.server.config.emergency_password.is_some();
let from_server = event.sender() == server_user && !emergency_password_set;
if from_server && self.is_admin_room(event.room_id().unwrap()).await {
return false;
}
if let Some(room_id) = event.room_id()
&& self.is_admin_room(room_id).await
{
// This is a message in the admin room
// Authentic admin command
true
// Ignore messages which aren't admin commands
let server_user = &self.services.globals.server_user;
if !(body.starts_with("!admin") || body.starts_with(server_user.as_str())) {
return None;
}
// Ignore messages from the server user _unless_ the emergency password is set
let emergency_password_set = self.services.server.config.emergency_password.is_some();
if event.sender() == server_user && !emergency_password_set {
return None;
}
// Looks good
Some(InvocationSource::AdminRoom)
} else {
// This is a message outside the admin room
// Is it an escaped admin command? i.e. `\!admin --help`
let is_public_escape =
body.starts_with('\\') && body.trim_start_matches('\\').starts_with("!admin");
// Ignore the message if it's not
if !is_public_escape {
return None;
}
// Only admin users belonging to this server can use escaped commands
if !self.services.globals.user_is_local(event.sender()) {
return None;
}
// Check if escaped commands are disabled in the config
if !self.services.server.config.admin_escape_commands {
return None;
}
// Looks good
Some(InvocationSource::EscapedCommand)
}
}
#[must_use]

View File

@@ -47,6 +47,7 @@ fn build(args: crate::Args<'_>) -> Result<Arc<Self>> {
})?
.local_address(url_preview_bind_addr)
.dns_resolver(resolver.resolver.clone())
.timeout(Duration::from_secs(config.url_preview_timeout))
.redirect(redirect::Policy::limited(3))
.build()?,
@@ -68,6 +69,11 @@ fn build(args: crate::Args<'_>) -> Result<Arc<Self>> {
.dns_resolver(resolver.resolver.hooked.clone())
.connect_timeout(Duration::from_secs(config.federation_conn_timeout))
.read_timeout(Duration::from_secs(config.federation_timeout))
.timeout(Duration::from_secs(
config
.federation_timeout
.saturating_add(config.federation_conn_timeout),
))
.pool_max_idle_per_host(config.federation_idle_per_host.into())
.pool_idle_timeout(Duration::from_secs(config.federation_idle_timeout))
.redirect(redirect::Policy::limited(3))
@@ -77,6 +83,7 @@ fn build(args: crate::Args<'_>) -> Result<Arc<Self>> {
.dns_resolver(resolver.resolver.hooked.clone())
.connect_timeout(Duration::from_secs(config.federation_conn_timeout))
.read_timeout(Duration::from_secs(305))
.timeout(Duration::from_secs(120))
.pool_max_idle_per_host(0)
.redirect(redirect::Policy::limited(3))
.build()?,
@@ -103,6 +110,8 @@ fn build(args: crate::Args<'_>) -> Result<Arc<Self>> {
pusher: base(config)?
.dns_resolver(resolver.resolver.clone())
.connect_timeout(Duration::from_secs(config.pusher_conn_timeout))
.timeout(Duration::from_secs(config.pusher_timeout))
.pool_max_idle_per_host(1)
.pool_idle_timeout(Duration::from_secs(config.pusher_idle_timeout))
.redirect(redirect::Policy::limited(2))

View File

@@ -1,4 +1,4 @@
use std::{iter, ops::Deref, path::Path, sync::Arc};
use std::{ops::Deref, path::PathBuf, sync::Arc};
use async_trait::async_trait;
use conduwuit::{
@@ -51,7 +51,8 @@ fn handle_reload(&self) -> Result {
])
.expect("failed to notify systemd of reloading state");
self.reload(iter::empty())?;
let config_paths = self.server.config.config_paths.clone().unwrap_or_default();
self.reload(&config_paths)?;
#[cfg(all(feature = "systemd", target_os = "linux"))]
sd_notify::notify(false, &[sd_notify::NotifyState::Ready])
@@ -62,10 +63,7 @@ fn handle_reload(&self) -> Result {
}
#[implement(Service)]
pub fn reload<'a, I>(&self, paths: I) -> Result<Arc<Config>>
where
I: Iterator<Item = &'a Path>,
{
pub fn reload(&self, paths: &[PathBuf]) -> Result<Arc<Config>> {
let old = self.server.config.clone();
let new = Config::load(paths).and_then(|raw| Config::new(&raw))?;

View File

@@ -37,6 +37,10 @@ fn build(args: crate::Args<'_>) -> Result<Arc<Self>> {
}
async fn worker(self: Arc<Self>) -> Result {
if self.services.globals.is_read_only() {
return Ok(());
}
if self.services.config.ldap.enable {
warn!("emergency password feature not available with LDAP enabled.");
return Ok(());

View File

@@ -3,7 +3,7 @@
use bytes::Bytes;
use conduwuit::{
Err, Error, Result, debug, debug::INFO_SPAN_LEVEL, debug_error, debug_warn, err,
error::inspect_debug_log, implement, trace, utils::string::EMPTY,
error::inspect_debug_log, implement, trace,
};
use http::{HeaderValue, header::AUTHORIZATION};
use ipaddress::IPAddress;
@@ -90,7 +90,9 @@ async fn perform<T>(
debug!(?method, ?url, "Sending request");
match client.execute(request).await {
| Ok(response) => handle_response::<T>(dest, actual, &method, &url, response).await,
| Ok(response) =>
self.handle_response::<T>(dest, actual, &method, &url, response)
.await,
| Err(error) =>
Err(handle_error(actual, &method, &url, error).expect_err("always returns error")),
}
@@ -119,7 +121,9 @@ fn validate_url(&self, url: &Url) -> Result<()> {
Ok(())
}
#[implement(super::Service)]
async fn handle_response<T>(
&self,
dest: &ServerName,
actual: &ActualDest,
method: &Method,
@@ -162,7 +166,6 @@ async fn into_http_response(
.expect("http::response::Builder is usable"),
);
// TODO: handle timeout
trace!("Waiting for response body...");
let body = response
.bytes()
@@ -286,10 +289,13 @@ fn into_http_request<T>(actual: &ActualDest, request: T) -> Result<http::Request
T: OutgoingRequest + Send,
{
const VERSIONS: [MatrixVersion; 1] = [MatrixVersion::V1_11];
const SATIR: SendAccessToken<'_> = SendAccessToken::IfRequired(EMPTY);
let http_request = request
.try_into_http_request::<Vec<u8>>(actual.string().as_str(), SATIR, &VERSIONS)
.try_into_http_request::<Vec<u8>>(
actual.string().as_str(),
SendAccessToken::None,
&VERSIONS,
)
.map_err(|e| err!(BadServerResponse("Invalid destination: {e:?}")))?;
Ok(http_request)

View File

@@ -171,4 +171,7 @@ pub fn user_is_local(&self, user_id: &UserId) -> bool {
pub fn server_is_ours(&self, server_name: &ServerName) -> bool {
server_name == self.server_name()
}
#[inline]
pub fn is_read_only(&self) -> bool { self.db.db.is_read_only() }
}

View File

@@ -198,7 +198,7 @@ pub async fn send_request<T>(&self, dest: &str, request: T) -> Result<T::Incomin
trace!("Push gateway destination: {dest}");
let http_request = request
.try_into_http_request::<BytesMut>(&dest, SendAccessToken::IfRequired(""), &VERSIONS)
.try_into_http_request::<BytesMut>(&dest, SendAccessToken::None, &VERSIONS)
.map_err(|e| {
err!(BadServerResponse(warn!(
"Failed to find destination {dest} for push gateway: {e}"
@@ -245,7 +245,7 @@ pub async fn send_request<T>(&self, dest: &str, request: T) -> Result<T::Incomin
.expect("http::response::Builder is usable"),
);
let body = response.bytes().await?; // TODO: handle timeout
let body = response.bytes().await?;
if !status.is_success() {
debug_warn!("Push gateway response body: {:?}", string_from_bytes(&body));
@@ -288,7 +288,7 @@ pub async fn send_push_notice<E>(
let mut notify = None;
let mut tweaks = Vec::new();
if event.room_id().is_none() {
// TODO(hydra): does this matter?
// This only affects v12+ create events
return Ok(());
}
@@ -427,20 +427,20 @@ async fn send_notice<E>(
}
let d = vec![device];
let mut notifi = Notification::new(d);
let mut notify = Notification::new(d);
notifi.event_id = Some(event.event_id().to_owned());
notifi.room_id = Some(event.room_id().unwrap().to_owned());
notify.event_id = Some(event.event_id().to_owned());
notify.room_id = Some(event.room_id().unwrap().to_owned());
if http
.data
.get("org.matrix.msc4076.disable_badge_count")
.is_none() && http.data.get("disable_badge_count").is_none()
{
notifi.counts = NotificationCounts::new(unread, uint!(0));
notify.counts = NotificationCounts::new(unread, uint!(0));
} else {
// counts will not be serialised if it's the default (0, 0)
// skip_serializing_if = "NotificationCounts::is_default"
notifi.counts = NotificationCounts::default();
notify.counts = NotificationCounts::default();
}
if !event_id_only {
@@ -449,30 +449,30 @@ async fn send_notice<E>(
.iter()
.any(|t| matches!(t, Tweak::Highlight(true) | Tweak::Sound(_)))
{
notifi.prio = NotificationPriority::High;
notify.prio = NotificationPriority::High;
} else {
notifi.prio = NotificationPriority::Low;
notify.prio = NotificationPriority::Low;
}
notifi.sender = Some(event.sender().to_owned());
notifi.event_type = Some(event.kind().to_owned());
notifi.content = serde_json::value::to_raw_value(event.content()).ok();
notify.sender = Some(event.sender().to_owned());
notify.event_type = Some(event.kind().to_owned());
notify.content = serde_json::value::to_raw_value(event.content()).ok();
if *event.kind() == TimelineEventType::RoomMember {
notifi.user_is_target =
notify.user_is_target =
event.state_key() == Some(event.sender().as_str());
}
notifi.sender_display_name =
notify.sender_display_name =
self.services.users.displayname(event.sender()).await.ok();
notifi.room_name = self
notify.room_name = self
.services
.state_accessor
.get_name(event.room_id().unwrap())
.await
.ok();
notifi.room_alias = self
notify.room_alias = self
.services
.state_accessor
.get_canonical_alias(event.room_id().unwrap())
@@ -480,7 +480,7 @@ async fn send_notice<E>(
.ok();
}
self.send_request(&http.url, send_event_notification::v1::Request::new(notifi))
self.send_request(&http.url, send_event_notification::v1::Request::new(notify))
.await?;
Ok(())

View File

@@ -3,14 +3,21 @@
//! This module implements a check against a room-specific policy server, as
//! described in the relevant Matrix spec proposal (see: https://github.com/matrix-org/matrix-spec-proposals/pull/4284).
use std::time::Duration;
use std::{collections::BTreeMap, time::Duration};
use conduwuit::{Err, Event, PduEvent, Result, debug, debug_info, implement, trace, warn};
use conduwuit::{
Err, Event, PduEvent, Result, debug, debug_error, debug_info, debug_warn, implement, trace,
warn,
};
use ruma::{
CanonicalJsonObject, RoomId, ServerName,
api::federation::room::policy::v1::Request as PolicyRequest,
CanonicalJsonObject, CanonicalJsonValue, KeyId, RoomId, ServerName, SigningKeyId,
api::federation::room::{
policy_check::unstable::Request as PolicyCheckRequest,
policy_sign::unstable::Request as PolicySignRequest,
},
events::{StateEventType, room::policy::RoomPolicyEventContent},
};
use serde_json::value::RawValue;
/// Asks a remote policy server if the event is allowed.
///
@@ -24,25 +31,18 @@
/// contacted for whatever reason, Err(e) is returned, which generally is a
/// fail-open operation.
#[implement(super::Service)]
#[tracing::instrument(skip_all, level = "debug")]
#[tracing::instrument(skip(self, pdu, pdu_json, room_id))]
pub async fn ask_policy_server(
&self,
pdu: &PduEvent,
pdu_json: &CanonicalJsonObject,
pdu_json: &mut CanonicalJsonObject,
room_id: &RoomId,
incoming: bool,
) -> Result<bool> {
if !self.services.server.config.enable_msc4284_policy_servers {
trace!("policy server checking is disabled");
return Ok(true); // don't ever contact policy servers
}
if self.services.server.config.policy_server_check_own_events
&& pdu.origin.is_some()
&& self
.services
.server
.is_ours(pdu.origin.as_ref().unwrap().as_str())
{
return Ok(true); // don't contact policy servers for locally generated events
}
if *pdu.event_type() == StateEventType::RoomPolicy.into() {
debug!(
@@ -52,16 +52,29 @@ pub async fn ask_policy_server(
);
return Ok(true);
}
let Ok(policyserver) = self
.services
.state_accessor
.room_state_get_content(room_id, &StateEventType::RoomPolicy, "")
.await
.inspect_err(|e| debug_error!("failed to load room policy server state event: {e}"))
.map(|c: RoomPolicyEventContent| c)
else {
debug!("room has no policy server configured");
return Ok(true);
};
if self.services.server.config.policy_server_check_own_events
&& !incoming
&& policyserver.public_key.is_none()
{
// don't contact policy servers for locally generated events, but only when the
// policy server does not require signatures
trace!("won't contact policy server for locally generated event");
return Ok(true);
}
let via = match policyserver.via {
| Some(ref via) => ServerName::parse(via)?,
| None => {
@@ -75,7 +88,6 @@ pub async fn ask_policy_server(
}
if !self.services.state_cache.server_in_room(via, room_id).await {
debug!(
room_id = %room_id,
via = %via,
"Policy server is not in the room, skipping spam check"
);
@@ -86,17 +98,43 @@ pub async fn ask_policy_server(
.sending
.convert_to_outgoing_federation_event(pdu_json.clone())
.await;
if policyserver.public_key.is_some() {
if !incoming {
debug_info!(
via = %via,
outgoing = ?pdu_json,
"Getting policy server signature on event"
);
return self
.fetch_policy_server_signature(pdu, pdu_json, via, outgoing, room_id)
.await;
}
// for incoming events, is it signed by <via> with the key
// "ed25519:policy_server"?
if let Some(CanonicalJsonValue::Object(sigs)) = pdu_json.get("signatures") {
if let Some(CanonicalJsonValue::Object(server_sigs)) = sigs.get(via.as_str()) {
let wanted_key_id: &KeyId<ruma::SigningKeyAlgorithm, ruma::Base64PublicKey> =
SigningKeyId::parse("ed25519:policy_server")?;
if let Some(CanonicalJsonValue::String(_sig_value)) =
server_sigs.get(wanted_key_id.as_str())
{
// TODO: verify signature
}
}
}
debug!(
"Event is not local and has no policy server signature, performing legacy spam check"
);
}
debug_info!(
room_id = %room_id,
via = %via,
outgoing = ?pdu_json,
"Checking event for spam with policy server"
"Checking event for spam with policy server via legacy check"
);
let response = tokio::time::timeout(
Duration::from_secs(self.services.server.config.policy_server_request_timeout),
self.services
.sending
.send_federation_request(via, PolicyRequest {
.send_federation_request(via, PolicyCheckRequest {
event_id: pdu.event_id().to_owned(),
pdu: Some(outgoing),
}),
@@ -142,3 +180,120 @@ pub async fn ask_policy_server(
Ok(true)
}
/// Asks a remote policy server for a signature on this event.
/// If the policy server signs this event, the original data is mutated.
#[implement(super::Service)]
#[tracing::instrument(skip_all, fields(event_id=%pdu.event_id(), via=%via))]
pub async fn fetch_policy_server_signature(
&self,
pdu: &PduEvent,
pdu_json: &mut CanonicalJsonObject,
via: &ServerName,
outgoing: Box<RawValue>,
room_id: &RoomId,
) -> Result<bool> {
debug!("Requesting policy server signature");
let response = tokio::time::timeout(
Duration::from_secs(self.services.server.config.policy_server_request_timeout),
self.services
.sending
.send_federation_request(via, PolicySignRequest { pdu: outgoing }),
)
.await;
let response = match response {
| Ok(Ok(response)) => {
debug!("Response from policy server: {:?}", response);
response
},
| Ok(Err(e)) => {
warn!(
via = %via,
event_id = %pdu.event_id(),
room_id = %room_id,
"Failed to contact policy server: {e}"
);
// Network or policy server errors are treated as non-fatal: event is allowed by
// default.
return Err(e);
},
| Err(elapsed) => {
warn!(
%via,
event_id = %pdu.event_id(),
%room_id,
%elapsed,
"Policy server request timed out after 10 seconds"
);
return Err!("Request to policy server timed out");
},
};
if response.signatures.is_none() {
debug!("Policy server refused to sign event");
return Ok(false);
}
let sigs: ruma::Signatures<ruma::OwnedServerName, ruma::ServerSigningKeyVersion> =
response.signatures.unwrap();
if !sigs.contains_key(via) {
debug_warn!(
"Policy server returned signatures, but did not include the expected server name \
'{}': {:?}",
via,
sigs
);
return Ok(false);
}
let keypairs = sigs.get(via).unwrap();
let wanted_key_id = KeyId::parse("ed25519:policy_server")?;
if !keypairs.contains_key(wanted_key_id) {
debug_warn!(
"Policy server returned signature, but did not use the key ID \
'ed25519:policy_server'."
);
return Ok(false);
}
let signatures_entry = pdu_json
.entry("signatures".to_owned())
.or_insert_with(|| CanonicalJsonValue::Object(BTreeMap::default()));
if let CanonicalJsonValue::Object(signatures_map) = signatures_entry {
let sig_value = keypairs.get(wanted_key_id).unwrap().to_owned();
match signatures_map.get_mut(via.as_str()) {
| Some(CanonicalJsonValue::Object(inner_map)) => {
trace!("inserting PS signature: {}", sig_value);
inner_map.insert(
"ed25519:policy_server".to_owned(),
CanonicalJsonValue::String(sig_value),
);
},
| Some(_) => {
debug_warn!(
"Existing `signatures[{}]` field is not an object; cannot insert policy \
signature",
via
);
return Ok(false);
},
| None => {
let mut inner = BTreeMap::new();
inner.insert(
"ed25519:policy_server".to_owned(),
CanonicalJsonValue::String(sig_value.clone()),
);
trace!(
"created new signatures object for {via} with the signature {}",
sig_value
);
signatures_map.insert(via.as_str().to_owned(), CanonicalJsonValue::Object(inner));
},
}
} else {
debug_warn!(
"Existing `signatures` field is not an object; cannot insert policy signature"
);
return Ok(false);
}
Ok(true)
}

View File

@@ -256,7 +256,12 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu<Pdu>(
if incoming_pdu.state_key.is_none() {
debug!(event_id = %incoming_pdu.event_id, "Checking policy server for event");
match self
.ask_policy_server(&incoming_pdu, &incoming_pdu.to_canonical_object(), room_id)
.ask_policy_server(
&incoming_pdu,
&mut incoming_pdu.to_canonical_object(),
room_id,
true,
)
.await
{
| Ok(false) => {

View File

@@ -0,0 +1,746 @@
use conduwuit::{Event, PduEvent, Result, err};
use ruma::{
UserId,
api::Direction,
events::relation::{BundledMessageLikeRelations, BundledReference, ReferenceChunk},
};
use crate::rooms::timeline::PdusIterItem;
const MAX_BUNDLED_RELATIONS: usize = 50;
impl super::Service {
/// Gets bundled aggregations for an event according to the Matrix
/// specification.
/// - m.replace relations are bundled to include the most recent replacement
/// event.
/// - m.reference relations are bundled to include a chunk of event IDs.
#[tracing::instrument(skip(self), level = "debug")]
pub async fn get_bundled_aggregations(
&self,
user_id: &UserId,
pdu: &PduEvent,
) -> Result<Option<BundledMessageLikeRelations<Box<serde_json::value::RawValue>>>> {
// Events that can never get bundled aggregations
if pdu.state_key().is_some() || Self::is_replacement_event(pdu) {
return Ok(None);
}
let relations = self
.get_relations(
user_id,
&pdu.room_id_or_hash(),
pdu.event_id(),
conduwuit::PduCount::max(),
MAX_BUNDLED_RELATIONS,
0,
Direction::Backward,
)
.await;
// The relations database code still handles the basic unsigned data
// We don't want to recursively fetch relations
if relations.is_empty() {
return Ok(None);
}
// Partition relations by type
let (replace_events, reference_events): (Vec<_>, Vec<_>) = relations
.iter()
.filter_map(|relation| {
let pdu = &relation.1;
let content = pdu.get_content_as_value();
content
.get("m.relates_to")
.and_then(|relates_to| relates_to.get("rel_type"))
.and_then(|rel_type| rel_type.as_str())
.and_then(|rel_type_str| match rel_type_str {
| "m.replace" => Some(RelationType::Replace(relation)),
| "m.reference" => Some(RelationType::Reference(relation)),
| _ => None, /* Ignore other relation types (threads are in DB but not
* handled here) */
})
})
.fold((Vec::new(), Vec::new()), |(mut replaces, mut references), rel_type| {
match rel_type {
| RelationType::Replace(r) => replaces.push(r),
| RelationType::Reference(r) => references.push(r),
}
(replaces, references)
});
// If no relations to bundle, return None
if replace_events.is_empty() && reference_events.is_empty() {
return Ok(None);
}
let mut bundled = BundledMessageLikeRelations::<Box<serde_json::value::RawValue>>::new();
// Handle m.replace relations - find the most recent valid one (lazy load
// original event)
if !replace_events.is_empty() {
if let Some(replacement) = self
.find_most_recent_valid_replacement(user_id, pdu, &replace_events)
.await?
{
bundled.replace = Some(Self::serialize_replacement(replacement)?);
}
}
// Handle m.reference relations - collect event IDs
if !reference_events.is_empty() {
let reference_chunk: Vec<_> = reference_events
.into_iter()
.map(|relation| BundledReference::new(relation.1.event_id().to_owned()))
.collect();
if !reference_chunk.is_empty() {
bundled.reference = Some(Box::new(ReferenceChunk::new(reference_chunk)));
}
}
// TODO: Handle other relation types (m.annotation, etc.) when specified
Ok(Some(bundled))
}
/// Serialize a replacement event to the bundled format
fn serialize_replacement(pdu: &PduEvent) -> Result<Box<Box<serde_json::value::RawValue>>> {
let replacement_json = serde_json::to_string(pdu)
.map_err(|e| err!(Database("Failed to serialize replacement event: {e}")))?;
let raw_value = serde_json::value::RawValue::from_string(replacement_json)
.map_err(|e| err!(Database("Failed to create RawValue: {e}")))?;
Ok(Box::new(raw_value))
}
/// Find the most recent valid replacement event based on origin_server_ts
/// and lexicographic event_id ordering
async fn find_most_recent_valid_replacement<'a>(
&self,
user_id: &UserId,
original_event: &PduEvent,
replacement_events: &[&'a PdusIterItem],
) -> Result<Option<&'a PduEvent>> {
// Filter valid replacements and find the maximum in a single pass
let mut result: Option<&PduEvent> = None;
for relation in replacement_events {
let pdu = &relation.1;
// Validate replacement
if !Self::is_valid_replacement_event(original_event, pdu).await? {
continue;
}
let next = match result {
| None => Some(pdu),
| Some(current) => {
// Compare by origin_server_ts first, then event_id lexicographically
match pdu.origin_server_ts().cmp(&current.origin_server_ts()) {
| std::cmp::Ordering::Greater => Some(pdu),
| std::cmp::Ordering::Equal if pdu.event_id() > current.event_id() =>
Some(pdu),
| _ => None,
}
},
};
if let Some(pdu) = next
&& self
.services
.state_accessor
.user_can_see_event(user_id, &pdu.room_id_or_hash(), pdu.event_id())
.await
{
result = Some(pdu);
}
}
Ok(result)
}
/// Adds bundled aggregations to a PDU's unsigned field
#[tracing::instrument(skip(self, pdu), level = "debug")]
pub async fn add_bundled_aggregations_to_pdu(
&self,
user_id: &UserId,
pdu: &mut PduEvent,
) -> Result<()> {
if pdu.is_redacted() {
return Ok(());
}
let bundled_aggregations = self.get_bundled_aggregations(user_id, pdu).await?;
if let Some(aggregations) = bundled_aggregations {
let aggregations_json = serde_json::to_value(aggregations)
.map_err(|e| err!(Database("Failed to serialize bundled aggregations: {e}")))?;
Self::add_bundled_aggregations_to_unsigned(pdu, aggregations_json)?;
}
Ok(())
}
/// Helper method to add bundled aggregations to a PDU's unsigned field
fn add_bundled_aggregations_to_unsigned(
pdu: &mut PduEvent,
aggregations_json: serde_json::Value,
) -> Result<()> {
use serde_json::{
Map, Value as JsonValue,
value::{RawValue as RawJsonValue, to_raw_value},
};
let mut unsigned: Map<String, JsonValue> = pdu
.unsigned
.as_deref()
.map(RawJsonValue::get)
.map_or_else(|| Ok(Map::new()), serde_json::from_str)
.map_err(|e| err!(Database("Invalid unsigned in pdu event: {e}")))?;
let relations = unsigned
.entry("m.relations")
.or_insert_with(|| JsonValue::Object(Map::new()))
.as_object_mut()
.ok_or_else(|| err!(Database("m.relations is not an object")))?;
if let JsonValue::Object(aggregations_map) = aggregations_json {
relations.extend(aggregations_map);
}
pdu.unsigned = Some(to_raw_value(&unsigned)?);
Ok(())
}
/// Validates that an event is acceptable as a replacement for another event
/// See C/S spec "Validity of replacement events"
#[tracing::instrument(level = "debug")]
async fn is_valid_replacement_event(
original_event: &PduEvent,
replacement_event: &PduEvent,
) -> Result<bool> {
Ok(
// 1. Same room_id
original_event.room_id() == replacement_event.room_id()
// 2. Same sender
&& original_event.sender() == replacement_event.sender()
// 3. Same type
&& original_event.event_type() == replacement_event.event_type()
// 4. Neither event should have a state_key property
&& original_event.state_key().is_none()
&& replacement_event.state_key().is_none()
// 5. Original event must not have rel_type of m.replace
&& !Self::is_replacement_event(original_event)
// 6. Replacement event must have m.new_content property (skip for encrypted)
&& Self::has_new_content_or_encrypted(replacement_event),
)
}
/// Check if an event is itself a replacement
#[inline]
fn is_replacement_event(event: &PduEvent) -> bool {
event
.get_content_as_value()
.get("m.relates_to")
.and_then(|relates_to| relates_to.get("rel_type"))
.and_then(|rel_type| rel_type.as_str())
.is_some_and(|rel_type| rel_type == "m.replace")
}
/// Check if event has m.new_content or is encrypted (where m.new_content
/// would be in the encrypted payload)
#[inline]
fn has_new_content_or_encrypted(event: &PduEvent) -> bool {
event.event_type() == &ruma::events::TimelineEventType::RoomEncrypted
|| event.get_content_as_value().get("m.new_content").is_some()
}
}
/// Helper enum for partitioning relations
enum RelationType<'a> {
Replace(&'a PdusIterItem),
Reference(&'a PdusIterItem),
}
#[cfg(test)]
mod tests {
use conduwuit_core::pdu::{EventHash, PduEvent};
use ruma::{UInt, events::TimelineEventType, owned_event_id, owned_room_id, owned_user_id};
use serde_json::{Value as JsonValue, json, value::to_raw_value};
fn create_test_pdu(unsigned_content: Option<JsonValue>) -> PduEvent {
PduEvent {
event_id: owned_event_id!("$test:example.com"),
room_id: Some(owned_room_id!("!test:example.com")),
sender: owned_user_id!("@test:example.com"),
origin_server_ts: UInt::try_from(1_234_567_890_u64).unwrap(),
kind: TimelineEventType::RoomMessage,
content: to_raw_value(&json!({"msgtype": "m.text", "body": "test"})).unwrap(),
state_key: None,
prev_events: vec![],
depth: UInt::from(1_u32),
auth_events: vec![],
redacts: None,
unsigned: unsigned_content.map(|content| to_raw_value(&content).unwrap()),
hashes: EventHash { sha256: "test_hash".to_owned() },
signatures: None,
origin: None,
}
}
fn create_bundled_aggregations() -> JsonValue {
json!({
"m.replace": {
"event_id": "$replace:example.com",
"origin_server_ts": 1_234_567_890,
"sender": "@replacer:example.com"
},
"m.reference": {
"count": 5,
"chunk": [
"$ref1:example.com",
"$ref2:example.com"
]
}
})
}
#[test]
fn test_add_bundled_aggregations_to_unsigned_no_existing_unsigned() {
let mut pdu = create_test_pdu(None);
let aggregations = create_bundled_aggregations();
let result = super::super::Service::add_bundled_aggregations_to_unsigned(
&mut pdu,
aggregations.clone(),
);
assert!(result.is_ok(), "Should succeed when no unsigned field exists");
assert!(pdu.unsigned.is_some(), "Unsigned field should be created");
let unsigned_str = pdu.unsigned.as_ref().unwrap().get();
let unsigned: JsonValue = serde_json::from_str(unsigned_str).unwrap();
assert!(unsigned.get("m.relations").is_some(), "m.relations should exist");
assert_eq!(
unsigned["m.relations"], aggregations,
"Relations should match the aggregations"
);
}
#[test]
fn test_add_bundled_aggregations_to_unsigned_overwrite_same_relation_type() {
let existing_unsigned = json!({
"m.relations": {
"m.replace": {
"event_id": "$old_replace:example.com",
"origin_server_ts": 1_111_111_111,
"sender": "@old_replacer:example.com"
}
}
});
let mut pdu = create_test_pdu(Some(existing_unsigned));
let new_aggregations = create_bundled_aggregations();
let result = super::super::Service::add_bundled_aggregations_to_unsigned(
&mut pdu,
new_aggregations.clone(),
);
assert!(result.is_ok(), "Should succeed when overwriting same relation type");
let unsigned_str = pdu.unsigned.as_ref().unwrap().get();
let unsigned: JsonValue = serde_json::from_str(unsigned_str).unwrap();
let relations = &unsigned["m.relations"];
assert_eq!(
relations["m.replace"], new_aggregations["m.replace"],
"m.replace should be updated"
);
assert_eq!(
relations["m.replace"]["event_id"], "$replace:example.com",
"Should have new event_id"
);
assert!(relations.get("m.reference").is_some(), "New m.reference should be added");
}
#[test]
fn test_add_bundled_aggregations_to_unsigned_preserve_other_unsigned_fields() {
// Test case: Other unsigned fields should be preserved
let existing_unsigned = json!({
"age": 98765,
"prev_content": {"msgtype": "m.text", "body": "old message"},
"redacted_because": {"event_id": "$redaction:example.com"},
"m.relations": {
"m.annotation": {"count": 1}
}
});
let mut pdu = create_test_pdu(Some(existing_unsigned));
let new_aggregations = json!({
"m.replace": {"event_id": "$new:example.com"}
});
let result = super::super::Service::add_bundled_aggregations_to_unsigned(
&mut pdu,
new_aggregations,
);
assert!(result.is_ok(), "Should succeed while preserving other fields");
let unsigned_str = pdu.unsigned.as_ref().unwrap().get();
let unsigned: JsonValue = serde_json::from_str(unsigned_str).unwrap();
// Verify all existing fields are preserved
assert_eq!(unsigned["age"], 98765, "age should be preserved");
assert!(unsigned.get("prev_content").is_some(), "prev_content should be preserved");
assert!(
unsigned.get("redacted_because").is_some(),
"redacted_because should be preserved"
);
// Verify relations were merged correctly
let relations = &unsigned["m.relations"];
assert!(
relations.get("m.annotation").is_some(),
"Existing m.annotation should be preserved"
);
assert!(relations.get("m.replace").is_some(), "New m.replace should be added");
}
#[test]
fn test_add_bundled_aggregations_to_unsigned_invalid_existing_unsigned() {
// Test case: Invalid JSON in existing unsigned should result in error
let mut pdu = create_test_pdu(None);
// Manually set invalid unsigned data
pdu.unsigned = Some(to_raw_value(&"invalid json").unwrap());
let aggregations = create_bundled_aggregations();
let result =
super::super::Service::add_bundled_aggregations_to_unsigned(&mut pdu, aggregations);
assert!(result.is_err(), "fails when existing unsigned is invalid");
// Should we ignore the error and overwrite anyway?
}
// Test helper function to create test PDU events
fn create_test_event(
event_id: &str,
room_id: &str,
sender: &str,
event_type: TimelineEventType,
content: &JsonValue,
state_key: Option<&str>,
) -> PduEvent {
PduEvent {
event_id: event_id.try_into().unwrap(),
room_id: Some(room_id.try_into().unwrap()),
sender: sender.try_into().unwrap(),
origin_server_ts: UInt::try_from(1_234_567_890_u64).unwrap(),
kind: event_type,
content: to_raw_value(&content).unwrap(),
state_key: state_key.map(Into::into),
prev_events: vec![],
depth: UInt::from(1_u32),
auth_events: vec![],
redacts: None,
unsigned: None,
hashes: EventHash { sha256: "test_hash".to_owned() },
signatures: None,
origin: None,
}
}
/// Test that a valid replacement event passes validation
#[tokio::test]
async fn test_valid_replacement_event() {
let original = create_test_event(
"$original:example.com",
"!room:example.com",
"@user:example.com",
TimelineEventType::RoomMessage,
&json!({"msgtype": "m.text", "body": "original message"}),
None,
);
let replacement = create_test_event(
"$replacement:example.com",
"!room:example.com",
"@user:example.com",
TimelineEventType::RoomMessage,
&json!({
"msgtype": "m.text",
"body": "* edited message",
"m.new_content": {
"msgtype": "m.text",
"body": "edited message"
},
"m.relates_to": {
"rel_type": "m.replace",
"event_id": "$original:example.com"
}
}),
None,
);
let result =
super::super::Service::is_valid_replacement_event(&original, &replacement).await;
assert!(result.is_ok(), "Validation should succeed");
assert!(result.unwrap(), "Valid replacement event should be accepted");
}
/// Test replacement event with different room ID is rejected
#[tokio::test]
async fn test_replacement_event_different_room() {
let original = create_test_event(
"$original:example.com",
"!room1:example.com",
"@user:example.com",
TimelineEventType::RoomMessage,
&json!({"msgtype": "m.text", "body": "original message"}),
None,
);
let replacement = create_test_event(
"$replacement:example.com",
"!room2:example.com", // Different room
"@user:example.com",
TimelineEventType::RoomMessage,
&json!({
"msgtype": "m.text",
"body": "* edited message",
"m.new_content": {
"msgtype": "m.text",
"body": "edited message"
}
}),
None,
);
let result =
super::super::Service::is_valid_replacement_event(&original, &replacement).await;
assert!(result.is_ok(), "Validation should succeed");
assert!(!result.unwrap(), "Different room ID should be rejected");
}
/// Test replacement event with different sender is rejected
#[tokio::test]
async fn test_replacement_event_different_sender() {
let original = create_test_event(
"$original:example.com",
"!room:example.com",
"@user1:example.com",
TimelineEventType::RoomMessage,
&json!({"msgtype": "m.text", "body": "original message"}),
None,
);
let replacement = create_test_event(
"$replacement:example.com",
"!room:example.com",
"@user2:example.com", // Different sender
TimelineEventType::RoomMessage,
&json!({
"msgtype": "m.text",
"body": "* edited message",
"m.new_content": {
"msgtype": "m.text",
"body": "edited message"
}
}),
None,
);
let result =
super::super::Service::is_valid_replacement_event(&original, &replacement).await;
assert!(result.is_ok(), "Validation should succeed");
assert!(!result.unwrap(), "Different sender should be rejected");
}
/// Test replacement event with different type is rejected
#[tokio::test]
async fn test_replacement_event_different_type() {
let original = create_test_event(
"$original:example.com",
"!room:example.com",
"@user:example.com",
TimelineEventType::RoomMessage,
&json!({"msgtype": "m.text", "body": "original message"}),
None,
);
let replacement = create_test_event(
"$replacement:example.com",
"!room:example.com",
"@user:example.com",
TimelineEventType::RoomTopic, // Different event type
&json!({
"topic": "new topic",
"m.new_content": {
"topic": "new topic"
}
}),
None,
);
let result =
super::super::Service::is_valid_replacement_event(&original, &replacement).await;
assert!(result.is_ok(), "Validation should succeed");
assert!(!result.unwrap(), "Different event type should be rejected");
}
/// Test replacement event with state key is rejected
#[tokio::test]
async fn test_replacement_event_with_state_key() {
let original = create_test_event(
"$original:example.com",
"!room:example.com",
"@user:example.com",
TimelineEventType::RoomName,
&json!({"name": "room name"}),
Some(""), // Has state key
);
let replacement = create_test_event(
"$replacement:example.com",
"!room:example.com",
"@user:example.com",
TimelineEventType::RoomName,
&json!({
"name": "new room name",
"m.new_content": {
"name": "new room name"
}
}),
None,
);
let result =
super::super::Service::is_valid_replacement_event(&original, &replacement).await;
assert!(result.is_ok(), "Validation should succeed");
assert!(!result.unwrap(), "Event with state key should be rejected");
}
/// Test replacement of an event that is already a replacement is rejected
#[tokio::test]
async fn test_replacement_event_original_is_replacement() {
let original = create_test_event(
"$original:example.com",
"!room:example.com",
"@user:example.com",
TimelineEventType::RoomMessage,
&json!({
"msgtype": "m.text",
"body": "* edited message",
"m.relates_to": {
"rel_type": "m.replace", // Original is already a replacement
"event_id": "$some_other:example.com"
}
}),
None,
);
let replacement = create_test_event(
"$replacement:example.com",
"!room:example.com",
"@user:example.com",
TimelineEventType::RoomMessage,
&json!({
"msgtype": "m.text",
"body": "* edited again",
"m.new_content": {
"msgtype": "m.text",
"body": "edited again"
}
}),
None,
);
let result =
super::super::Service::is_valid_replacement_event(&original, &replacement).await;
assert!(result.is_ok(), "Validation should succeed");
assert!(!result.unwrap(), "Replacement of replacement should be rejected");
}
/// Test replacement event missing m.new_content is rejected
#[tokio::test]
async fn test_replacement_event_missing_new_content() {
let original = create_test_event(
"$original:example.com",
"!room:example.com",
"@user:example.com",
TimelineEventType::RoomMessage,
&json!({"msgtype": "m.text", "body": "original message"}),
None,
);
let replacement = create_test_event(
"$replacement:example.com",
"!room:example.com",
"@user:example.com",
TimelineEventType::RoomMessage,
&json!({
"msgtype": "m.text",
"body": "* edited message"
// Missing m.new_content
}),
None,
);
let result =
super::super::Service::is_valid_replacement_event(&original, &replacement).await;
assert!(result.is_ok(), "Validation should succeed");
assert!(!result.unwrap(), "Missing m.new_content should be rejected");
}
/// Test encrypted replacement event without m.new_content is accepted
#[tokio::test]
async fn test_replacement_event_encrypted_missing_new_content_is_valid() {
let original = create_test_event(
"$original:example.com",
"!room:example.com",
"@user:example.com",
TimelineEventType::RoomEncrypted,
&json!({
"algorithm": "m.megolm.v1.aes-sha2",
"ciphertext": "encrypted_payload_base64",
"sender_key": "sender_key",
"session_id": "session_id"
}),
None,
);
let replacement = create_test_event(
"$replacement:example.com",
"!room:example.com",
"@user:example.com",
TimelineEventType::RoomEncrypted,
&json!({
"algorithm": "m.megolm.v1.aes-sha2",
"ciphertext": "encrypted_replacement_payload_base64",
"sender_key": "sender_key",
"session_id": "session_id",
"m.relates_to": {
"rel_type": "m.replace",
"event_id": "$original:example.com"
}
// No m.new_content in cleartext - this is valid for encrypted events
}),
None,
);
let result =
super::super::Service::is_valid_replacement_event(&original, &replacement).await;
assert!(result.is_ok(), "Validation should succeed");
assert!(
result.unwrap(),
"Encrypted replacement without cleartext m.new_content should be accepted"
);
}
}

View File

@@ -3,7 +3,6 @@
use conduwuit::{
arrayvec::ArrayVec,
matrix::{Event, PduCount},
result::LogErr,
utils::{
ReadyExt,
stream::{TryIgnore, WidebandExt},
@@ -15,10 +14,11 @@
use ruma::{EventId, RoomId, UserId, api::Direction};
use crate::{
Dep, rooms,
Dep,
rooms::{
self,
short::{ShortEventId, ShortRoomId},
timeline::{PduId, RawPduId},
timeline::{PduId, PdusIterItem, RawPduId},
},
};
@@ -60,7 +60,7 @@ pub(super) fn get_relations<'a>(
target: ShortEventId,
from: PduCount,
dir: Direction,
) -> impl Stream<Item = (PduCount, impl Event)> + Send + 'a {
) -> impl Stream<Item = PdusIterItem> + Send + 'a {
// Query from exact position then filter excludes it (saturating_inc could skip
// events at min/max boundaries)
let from_unsigned = from.into_unsigned();
@@ -92,9 +92,7 @@ pub(super) fn get_relations<'a>(
let mut pdu = self.services.timeline.get_pdu_from_id(&pdu_id).await.ok()?;
if pdu.sender() != user_id {
pdu.as_mut_pdu().remove_transaction_id().log_err().ok();
}
pdu.as_mut_pdu().set_unsigned(Some(user_id));
Some((shorteventid, pdu))
})

View File

@@ -1,15 +1,16 @@
mod bundled_aggregations;
mod data;
use std::sync::Arc;
use conduwuit::{
Result,
matrix::{Event, PduCount},
};
use conduwuit::{Result, matrix::PduCount};
use futures::{StreamExt, future::try_join};
use ruma::{EventId, RoomId, UserId, api::Direction};
use self::data::Data;
use crate::{Dep, rooms};
use crate::{
Dep,
rooms::{self, timeline::PdusIterItem},
};
pub struct Service {
services: Services,
@@ -19,6 +20,7 @@ pub struct Service {
struct Services {
short: Dep<rooms::short::Service>,
timeline: Dep<rooms::timeline::Service>,
state_accessor: Dep<rooms::state_accessor::Service>,
}
impl crate::Service for Service {
@@ -27,6 +29,8 @@ fn build(args: crate::Args<'_>) -> Result<Arc<Self>> {
services: Services {
short: args.depend::<rooms::short::Service>("rooms::short"),
timeline: args.depend::<rooms::timeline::Service>("rooms::timeline"),
state_accessor: args
.depend::<rooms::state_accessor::Service>("rooms::state_accessor"),
},
db: Data::new(&args),
}))
@@ -56,7 +60,7 @@ pub async fn get_relations<'a>(
limit: usize,
max_depth: u8,
dir: Direction,
) -> Vec<(PduCount, impl Event)> {
) -> Vec<PdusIterItem> {
let room_id = self.services.short.get_shortroomid(room_id);
let target = self.services.timeline.get_pdu_count(target);

View File

@@ -1,9 +1,9 @@
use std::sync::Arc;
use conduwuit::{
PduCount, Result,
PduCount, PduEvent, Result,
arrayvec::ArrayVec,
implement,
debug_warn, implement,
matrix::event::{Event, Matches},
utils::{
ArrayVecExt, IterStream, ReadyExt, set,
@@ -35,6 +35,7 @@ struct Services {
short: Dep<rooms::short::Service>,
state_accessor: Dep<rooms::state_accessor::Service>,
timeline: Dep<rooms::timeline::Service>,
pdu_metadata: Dep<rooms::pdu_metadata::Service>,
}
#[derive(Clone, Debug)]
@@ -61,6 +62,7 @@ fn build(args: crate::Args<'_>) -> Result<Arc<Self>> {
state_accessor: args
.depend::<rooms::state_accessor::Service>("rooms::state_accessor"),
timeline: args.depend::<rooms::timeline::Service>("rooms::timeline"),
pdu_metadata: args.depend::<rooms::pdu_metadata::Service>("rooms::pdu_metadata"),
},
}))
}
@@ -104,7 +106,8 @@ pub fn deindex_pdu(&self, shortroomid: ShortRoomId, pdu_id: &RawPduId, message_b
pub async fn search_pdus<'a>(
&'a self,
query: &'a RoomQuery<'a>,
) -> Result<(usize, impl Stream<Item = impl Event + use<>> + Send + 'a)> {
sender_user: &'a UserId,
) -> Result<(usize, impl Stream<Item = PduEvent> + Send + 'a)> {
let pdu_ids: Vec<_> = self.search_pdu_ids(query).await?.collect().await;
let filter = &query.criteria.filter;
@@ -129,7 +132,23 @@ pub async fn search_pdus<'a>(
.then_some(pdu)
})
.skip(query.skip)
.take(query.limit);
.take(query.limit)
.map(move |mut pdu| {
pdu.set_unsigned(query.user_id);
pdu
})
.then(async move |mut pdu| {
if let Err(e) = self
.services
.pdu_metadata
.add_bundled_aggregations_to_pdu(sender_user, &mut pdu)
.await
{
debug_warn!("Failed to add bundled aggregations: {e}");
}
pdu
});
Ok((count, pdus))
}

View File

@@ -264,6 +264,8 @@ fn get_space_child_events<'a>(
if content.via.is_empty() {
return None;
}
} else {
return None;
}
if RoomId::parse(&state_key).is_err() {

View File

@@ -163,9 +163,7 @@ pub async fn threads_until<'a>(
let mut pdu = self.services.timeline.get_pdu_from_id(&pdu_id).await.ok()?;
let pdu_id: PduId = pdu_id.into();
if pdu.sender() != user_id {
pdu.as_mut_pdu().remove_transaction_id().ok();
}
pdu.as_mut_pdu().set_unsigned(Some(user_id));
Some((pdu_id.shorteventid, pdu))
});

View File

@@ -335,10 +335,11 @@ pub async fn append_pdu<'a, Leaves>(
if let Some(body) = content.body {
self.services.search.index_pdu(shortroomid, &pdu_id, &body);
if self.services.admin.is_admin_command(pdu, &body).await {
if let Some(source) = self.services.admin.is_admin_command(pdu, &body).await {
self.services.admin.command_with_sender(
body,
Some((pdu.event_id()).into()),
source,
pdu.sender.clone().into(),
)?;
}
@@ -347,6 +348,10 @@ pub async fn append_pdu<'a, Leaves>(
| _ => {},
}
// CONCERN: If we receive events with a relation out-of-order, we never write
// their relation / thread. We need some kind of way to trigger when we receive
// this event, and potentially a way to rebuild the table entirely.
if let Ok(content) = pdu.get_content::<ExtractRelatesToEventId>() {
if let Ok(related_pducount) = self.get_pdu_count(&content.relates_to.event_id).await {
self.services

View File

@@ -308,7 +308,7 @@ fn from_evt(
match self
.services
.event_handler
.ask_policy_server(&pdu, &pdu_json, pdu.room_id().expect("has room ID"))
.ask_policy_server(&pdu, &mut pdu_json, pdu.room_id().expect("has room ID"), false)
.await
{
| Ok(true) => {},

View File

@@ -1,13 +1,13 @@
use std::{borrow::Borrow, sync::Arc};
use std::sync::Arc;
use conduwuit::{
Err, PduCount, PduEvent, Result, at, err,
result::{LogErr, NotFound},
result::NotFound,
utils::{self, stream::TryReadyExt},
};
use database::{Database, Deserialized, Json, KeyVal, Map};
use futures::{FutureExt, Stream, TryFutureExt, TryStreamExt, future::select_ok, pin_mut};
use ruma::{CanonicalJsonObject, EventId, OwnedUserId, RoomId, UserId, api::Direction};
use ruma::{CanonicalJsonObject, EventId, OwnedUserId, RoomId, api::Direction};
use super::{PduId, RawPduId};
use crate::{Dep, rooms, rooms::short::ShortRoomId};
@@ -45,12 +45,8 @@ pub(super) fn new(args: &crate::Args<'_>) -> Self {
}
#[inline]
pub(super) async fn last_timeline_count(
&self,
sender_user: Option<&UserId>,
room_id: &RoomId,
) -> Result<PduCount> {
let pdus_rev = self.pdus_rev(sender_user, room_id, PduCount::max());
pub(super) async fn last_timeline_count(&self, room_id: &RoomId) -> Result<PduCount> {
let pdus_rev = self.pdus_rev(room_id, PduCount::max());
pin_mut!(pdus_rev);
let last_count = pdus_rev
@@ -64,12 +60,8 @@ pub(super) async fn last_timeline_count(
}
#[inline]
pub(super) async fn latest_pdu_in_room(
&self,
sender_user: Option<&UserId>,
room_id: &RoomId,
) -> Result<PduEvent> {
let pdus_rev = self.pdus_rev(sender_user, room_id, PduCount::max());
pub(super) async fn latest_pdu_in_room(&self, room_id: &RoomId) -> Result<PduEvent> {
let pdus_rev = self.pdus_rev(room_id, PduCount::max());
pin_mut!(pdus_rev);
pdus_rev
@@ -221,7 +213,6 @@ pub(super) async fn replace_pdu(
/// order.
pub(super) fn pdus_rev<'a>(
&'a self,
user_id: Option<&'a UserId>,
room_id: &'a RoomId,
until: PduCount,
) -> impl Stream<Item = Result<PdusIterItem>> + Send + 'a {
@@ -231,14 +222,13 @@ pub(super) fn pdus_rev<'a>(
self.pduid_pdu
.rev_raw_stream_from(&current)
.ready_try_take_while(move |(key, _)| Ok(key.starts_with(&prefix)))
.ready_and_then(move |item| Self::each_pdu(item, user_id))
.ready_and_then(Self::from_json_slice)
})
.try_flatten_stream()
}
pub(super) fn pdus<'a>(
&'a self,
user_id: Option<&'a UserId>,
room_id: &'a RoomId,
from: PduCount,
) -> impl Stream<Item = Result<PdusIterItem>> + Send + 'a {
@@ -248,21 +238,15 @@ pub(super) fn pdus<'a>(
self.pduid_pdu
.raw_stream_from(&current)
.ready_try_take_while(move |(key, _)| Ok(key.starts_with(&prefix)))
.ready_and_then(move |item| Self::each_pdu(item, user_id))
.ready_and_then(Self::from_json_slice)
})
.try_flatten_stream()
}
fn each_pdu((pdu_id, pdu): KeyVal<'_>, user_id: Option<&UserId>) -> Result<PdusIterItem> {
fn from_json_slice((pdu_id, pdu): KeyVal<'_>) -> Result<PdusIterItem> {
let pdu_id: RawPduId = pdu_id.into();
let mut pdu = serde_json::from_slice::<PduEvent>(pdu)?;
if Some(pdu.sender.borrow()) != user_id {
pdu.remove_transaction_id().log_err().ok();
}
pdu.add_age().log_err().ok();
let pdu = serde_json::from_slice::<PduEvent>(pdu)?;
Ok((pdu_id.pdu_count(), pdu))
}

View File

@@ -20,7 +20,7 @@
};
use futures::{Future, Stream, TryStreamExt, pin_mut};
use ruma::{
CanonicalJsonObject, EventId, OwnedEventId, OwnedRoomId, RoomId, UserId,
CanonicalJsonObject, EventId, OwnedEventId, OwnedRoomId, RoomId,
events::room::encrypted::Relation,
};
use serde::Deserialize;
@@ -138,7 +138,7 @@ pub async fn first_pdu_in_room(&self, room_id: &RoomId) -> Result<impl Event> {
#[tracing::instrument(skip(self), level = "debug")]
pub async fn first_item_in_room(&self, room_id: &RoomId) -> Result<(PduCount, impl Event)> {
let pdus = self.pdus(None, room_id, None);
let pdus = self.pdus(room_id, None);
pin_mut!(pdus);
pdus.try_next()
@@ -148,16 +148,12 @@ pub async fn first_item_in_room(&self, room_id: &RoomId) -> Result<(PduCount, im
#[tracing::instrument(skip(self), level = "debug")]
pub async fn latest_pdu_in_room(&self, room_id: &RoomId) -> Result<impl Event> {
self.db.latest_pdu_in_room(None, room_id).await
self.db.latest_pdu_in_room(room_id).await
}
#[tracing::instrument(skip(self), level = "debug")]
pub async fn last_timeline_count(
&self,
sender_user: Option<&UserId>,
room_id: &RoomId,
) -> Result<PduCount> {
self.db.last_timeline_count(sender_user, room_id).await
pub async fn last_timeline_count(&self, room_id: &RoomId) -> Result<PduCount> {
self.db.last_timeline_count(room_id).await
}
/// Returns the `count` of this pdu's id.
@@ -235,33 +231,29 @@ pub async fn replace_pdu(&self, pdu_id: &RawPduId, pdu_json: &CanonicalJsonObjec
#[inline]
pub fn all_pdus<'a>(
&'a self,
user_id: &'a UserId,
room_id: &'a RoomId,
) -> impl Stream<Item = PdusIterItem> + Send + 'a {
self.pdus(Some(user_id), room_id, None).ignore_err()
self.pdus(room_id, None).ignore_err()
}
/// Reverse iteration starting after `until`.
#[tracing::instrument(skip(self), level = "debug")]
pub fn pdus_rev<'a>(
&'a self,
user_id: Option<&'a UserId>,
room_id: &'a RoomId,
until: Option<PduCount>,
) -> impl Stream<Item = Result<PdusIterItem>> + Send + 'a {
self.db
.pdus_rev(user_id, room_id, until.unwrap_or_else(PduCount::max))
.pdus_rev(room_id, until.unwrap_or_else(PduCount::max))
}
/// Forward iteration starting after `from`.
#[tracing::instrument(skip(self), level = "debug")]
pub fn pdus<'a>(
&'a self,
user_id: Option<&'a UserId>,
room_id: &'a RoomId,
from: Option<PduCount>,
) -> impl Stream<Item = Result<PdusIterItem>> + Send + 'a {
self.db
.pdus(user_id, room_id, from.unwrap_or_else(PduCount::min))
self.db.pdus(room_id, from.unwrap_or_else(PduCount::min))
}
}

View File

@@ -1,8 +1,7 @@
use std::{fmt::Debug, mem};
use bytes::BytesMut;
use conduwuit::{Err, Result, debug_error, err, trace, utils, warn};
use reqwest::Client;
use conduwuit::{Err, Result, debug_error, err, implement, trace, utils, warn};
use ruma::api::{
IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken, appservice::Registration,
};
@@ -11,8 +10,9 @@
///
/// Only returns Ok(None) if there is no url specified in the appservice
/// registration file
pub(crate) async fn send_request<T>(
client: &Client,
#[implement(super::Service)]
pub async fn send_appservice_request<T>(
&self,
registration: Registration,
request: T,
) -> Result<Option<T::IncomingResponse>>
@@ -32,10 +32,10 @@ pub(crate) async fn send_request<T>(
trace!("Appservice URL \"{dest}\", Appservice ID: {}", registration.id);
let hs_token = registration.hs_token.as_str();
let mut http_request = request
let http_request = request
.try_into_http_request::<BytesMut>(
&dest,
SendAccessToken::IfRequired(hs_token),
SendAccessToken::Appservice(hs_token),
&VERSIONS,
)
.map_err(|e| {
@@ -45,19 +45,10 @@ pub(crate) async fn send_request<T>(
})?
.map(BytesMut::freeze);
let mut parts = http_request.uri().clone().into_parts();
let old_path_and_query = parts.path_and_query.unwrap().as_str().to_owned();
let symbol = if old_path_and_query.contains('?') { "&" } else { "?" };
parts.path_and_query = Some(
(old_path_and_query + symbol + "access_token=" + hs_token)
.parse()
.unwrap(),
);
*http_request.uri_mut() = parts.try_into().expect("our manipulation is always valid");
let reqwest_request = reqwest::Request::try_from(http_request)?;
let client = &self.services.client.appservice;
let mut response = client.execute(reqwest_request).await.map_err(|e| {
warn!("Could not send request to appservice \"{}\" at {dest}: {e:?}", registration.id);
e
@@ -75,7 +66,7 @@ pub(crate) async fn send_request<T>(
.expect("http::response::Builder is usable"),
);
let body = response.bytes().await?; // TODO: handle timeout
let body = response.bytes().await?;
if !status.is_success() {
debug_error!("Appservice response bytes: {:?}", utils::string_from_bytes(&body));

View File

@@ -18,10 +18,7 @@
warn,
};
use futures::{FutureExt, Stream, StreamExt};
use ruma::{
RoomId, ServerName, UserId,
api::{OutgoingRequest, appservice::Registration},
};
use ruma::{RoomId, ServerName, UserId, api::OutgoingRequest};
use tokio::{task, task::JoinSet};
use self::data::Data;
@@ -318,22 +315,6 @@ pub async fn send_synapse_request<T>(
.await
}
/// Sends a request to an appservice
///
/// Only returns None if there is no url specified in the appservice
/// registration file
pub async fn send_appservice_request<T>(
&self,
registration: Registration,
request: T,
) -> Result<Option<T::IncomingResponse>>
where
T: OutgoingRequest + Debug + Send,
{
let client = &self.services.client.appservice;
appservice::send_request(client, registration, request).await
}
/// Clean up queued sending event data
///
/// Used after we remove an appservice registration or a user deletes a push

View File

@@ -50,9 +50,7 @@
};
use serde_json::value::{RawValue as RawJsonValue, to_raw_value};
use super::{
Destination, EduBuf, EduVec, Msg, SendingEvent, Service, appservice, data::QueueItem,
};
use super::{Destination, EduBuf, EduVec, Msg, SendingEvent, Service, data::QueueItem};
#[derive(Debug)]
enum TransactionStatus {
@@ -720,18 +718,18 @@ async fn send_events_dest_appservice(
//debug_assert!(pdu_jsons.len() + edu_jsons.len() > 0, "sending empty
// transaction");
let client = &self.services.client.appservice;
match appservice::send_request(
client,
appservice,
ruma::api::appservice::event::push_events::v1::Request {
events: pdu_jsons,
txn_id: txn_id.into(),
ephemeral: edu_jsons,
to_device: Vec::new(), // TODO
},
)
.await
match self
.send_appservice_request(
appservice,
ruma::api::appservice::event::push_events::v1::Request {
events: pdu_jsons,
txn_id: txn_id.into(),
ephemeral: edu_jsons,
to_device: Vec::new(), // TODO
},
)
.await
{
| Ok(_) => Ok(Destination::Appservice(id)),
| Err(e) => Err((Destination::Appservice(id), e)),
@@ -781,7 +779,7 @@ async fn send_events_dest_push(
for pdu in pdus {
// Redacted events are not notification targets (we don't send push for them)
if pdu.contains_unsigned_property("redacted_because", serde_json::Value::is_string) {
if pdu.is_redacted() {
continue;
}

View File

@@ -130,7 +130,7 @@ pub async fn start(self: &Arc<Self>) -> Result<Arc<Self>> {
// reset dormant online/away statuses to offline, and set the server user as
// online
if self.server.config.allow_local_presence {
if self.server.config.allow_local_presence && !self.db.is_read_only() {
self.presence.unset_all_presence().await;
_ = self
.presence
@@ -146,7 +146,7 @@ pub async fn stop(&self) {
info!("Shutting down services...");
// set the server user as offline
if self.server.config.allow_local_presence {
if self.server.config.allow_local_presence && !self.db.is_read_only() {
_ = self
.presence
.ping_presence(&self.globals.server_user, &ruma::presence::PresenceState::Offline)

4
towncrier.toml Normal file
View File

@@ -0,0 +1,4 @@
[tool.towncrier]
name = "Continuwuity"
directory = "changelog.d"
filename = "CHANGELOG.md"