Compare commits

..

1 Commits

Author SHA1 Message Date
Jade Ellis
7a8c409ff9 refactor: Use snafu
This should gradtly improve the debugging experience by allowing
tracking backtraces to get more context from the error.
A lot of the touced files here are just cleaning up the old way of
creating errors.
2026-02-22 14:06:15 +00:00
107 changed files with 1270 additions and 1886 deletions

View File

@@ -30,22 +30,22 @@ jobs:
echo "version=$VERSION" >> $GITHUB_OUTPUT
echo "distribution=$DISTRIBUTION" >> $GITHUB_OUTPUT
echo "Debian distribution: $DISTRIBUTION ($VERSION)"
#- name: Work around llvm-project#153385
# id: llvm-workaround
# run: |
# if [ -f /usr/share/apt/default-sequoia.config ]; then
# echo "Applying workaround for llvm-project#153385"
# mkdir -p /etc/crypto-policies/back-ends/
# cp /usr/share/apt/default-sequoia.config /etc/crypto-policies/back-ends/apt-sequoia.config
# sed -i 's/\(sha1\.second_preimage_resistance = \)2026-02-01/\12026-06-01/' /etc/crypto-policies/back-ends/apt-sequoia.config
# else
# echo "No workaround needed for llvm-project#153385"
# fi
- name: Work around llvm-project#153385
id: llvm-workaround
run: |
if [ -f /usr/share/apt/default-sequoia.config ]; then
echo "Applying workaround for llvm-project#153385"
mkdir -p /etc/crypto-policies/back-ends/
cp /usr/share/apt/default-sequoia.config /etc/crypto-policies/back-ends/apt-sequoia.config
sed -i 's/\(sha1\.second_preimage_resistance = \)2026-02-01/\12026-06-01/' /etc/crypto-policies/back-ends/apt-sequoia.config
else
echo "No workaround needed for llvm-project#153385"
fi
- name: Pick compatible clang version
id: clang-version
run: |
# both latest need to use clang-23, but oldstable and previous can just use clang
if [[ "${{ matrix.container }}" == "ubuntu-latest" ]]; then
if [[ "${{ matrix.container }}" == "ubuntu-latest" || "${{ matrix.container }}" == "debian-latest" ]]; then
echo "Using clang-23 package for ${{ matrix.container }}"
echo "version=clang-23" >> $GITHUB_OUTPUT
else

View File

@@ -1,32 +1,3 @@
# Continuwuity 0.5.6 (2026-03-03)
## Security
- Admin escape commands received over federation will never be executed, as this is never valid in a genuine situation. Contributed by @Jade.
- Fixed data amplification vulnerability (CWE-409) that affected configurations with server-side compression enabled (non-default). Contributed by @nex.
## Features
- Outgoing presence is now disabled by default, and the config option documentation has been adjusted to more accurately represent the weight of presence, typing indicators, and read receipts. Contributed by @nex. ([#1399](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1399))
- Improved the concurrency handling of federation transactions, vastly improving performance and reliability by more accurately handling inbound transactions and reducing the amount of repeated wasted work. Contributed by @nex and @Jade. ([#1428](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1428))
- Added [MSC3202](https://github.com/matrix-org/matrix-spec-proposals/pull/3202) Device masquerading (not all of MSC3202). This should fix issues with enabling [MSC4190](https://github.com/matrix-org/matrix-spec-proposals/pull/4190) for some Mautrix bridges. Contributed by @Jade ([#1435](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1435))
- Added [MSC3814](https://github.com/matrix-org/matrix-spec-proposals/pull/3814) Dehydrated Devices - you can now decrypt messages sent while all devices were logged out. ([#1436](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1436))
- Implement [MSC4143](https://github.com/matrix-org/matrix-spec-proposals/pull/4143) MatrixRTC transport discovery endpoint. Move RTC foci configuration from `[global.well_known]` to a new `[global.matrix_rtc]` section with a `foci` field. Contributed by @0xnim ([#1442](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1442))
- Updated `list-backups` admin command to output one backup per line. ([#1394](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1394))
- Improved URL preview fetching with a more compatible user agent for sites like YouTube Music. Added `!admin media delete-url-preview <url>` command to clear cached URL previews that were stuck and broken. ([#1434](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1434))
## Bugfixes
- Removed non-compliant nor functional room alias lookups over federation. Contributed by @nex ([#1393](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1393))
- Removed ability to set rocksdb as read only. Doing so would cause unintentional and buggy behaviour. Contributed by @Terryiscool160. ([#1418](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1418))
- Fixed a startup crash in the sender service if we can't detect the number of CPU cores, even if the `sender_workers` config option is set correctly. Contributed by @katie. ([#1421](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1421))
- Removed the `allow_public_room_directory_without_auth` config option. Contributed by @0xnim. ([#1441](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1441))
- Fixed sliding sync v5 list ranges always starting from 0, causing extra rooms to be unnecessarily processed and returned. Contributed by @0xnim ([#1445](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1445))
- Fixed a bug that (repairably) caused a room split between continuwuity and non-continuwuity servers when the room had both `m.room.policy` and `org.matrix.msc4284.policy` in its room state. Contributed by @nex ([#1481](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1481))
- Fixed `!admin media delete --mxc <url>` responding with an error message when the media was deleted successfully. Contributed by @lynxize
- Fixed spurious 404 media errors in the logs. Contributed by @benbot.
- Fixed spurious warn about needed backfill via federation for non-federated rooms. Contributed by @kraem.
# Continuwuity v0.5.5 (2026-02-15)
## Features

98
Cargo.lock generated
View File

@@ -445,14 +445,13 @@ dependencies = [
[[package]]
name = "axum-extra"
version = "0.12.5"
version = "0.10.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fef252edff26ddba56bbcdf2ee3307b8129acb86f5749b68990c168a6fcc9c76"
checksum = "9963ff19f40c6102c76756ef0a46004c0d58957d87259fc9208ff8441c12ab96"
dependencies = [
"axum",
"axum-core",
"bytes",
"futures-core",
"futures-util",
"headers",
"http",
@@ -460,6 +459,8 @@ dependencies = [
"http-body-util",
"mime",
"pin-project-lite",
"rustversion",
"serde_core",
"tower-layer",
"tower-service",
"tracing",
@@ -887,7 +888,7 @@ dependencies = [
[[package]]
name = "conduwuit"
version = "0.5.6"
version = "0.5.5"
dependencies = [
"clap",
"conduwuit_admin",
@@ -919,7 +920,7 @@ dependencies = [
[[package]]
name = "conduwuit_admin"
version = "0.5.6"
version = "0.5.5"
dependencies = [
"clap",
"conduwuit_api",
@@ -940,7 +941,7 @@ dependencies = [
[[package]]
name = "conduwuit_api"
version = "0.5.6"
version = "0.5.5"
dependencies = [
"async-trait",
"axum",
@@ -972,14 +973,14 @@ dependencies = [
[[package]]
name = "conduwuit_build_metadata"
version = "0.5.6"
version = "0.5.5"
dependencies = [
"built",
]
[[package]]
name = "conduwuit_core"
version = "0.5.6"
version = "0.5.5"
dependencies = [
"argon2",
"arrayvec",
@@ -1013,6 +1014,7 @@ dependencies = [
"nix",
"num-traits",
"parking_lot",
"paste",
"rand 0.10.0",
"rand_core 0.6.4",
"regex",
@@ -1026,7 +1028,7 @@ dependencies = [
"serde_regex",
"smallstr",
"smallvec",
"thiserror 2.0.18",
"snafu",
"tikv-jemalloc-ctl",
"tikv-jemalloc-sys",
"tikv-jemallocator",
@@ -1041,7 +1043,7 @@ dependencies = [
[[package]]
name = "conduwuit_database"
version = "0.5.6"
version = "0.5.5"
dependencies = [
"async-channel",
"conduwuit_core",
@@ -1059,7 +1061,7 @@ dependencies = [
[[package]]
name = "conduwuit_macros"
version = "0.5.6"
version = "0.5.5"
dependencies = [
"itertools 0.14.0",
"proc-macro2",
@@ -1069,7 +1071,7 @@ dependencies = [
[[package]]
name = "conduwuit_router"
version = "0.5.6"
version = "0.5.5"
dependencies = [
"axum",
"axum-client-ip",
@@ -1103,7 +1105,7 @@ dependencies = [
[[package]]
name = "conduwuit_service"
version = "0.5.6"
version = "0.5.5"
dependencies = [
"askama",
"async-trait",
@@ -1145,7 +1147,7 @@ dependencies = [
[[package]]
name = "conduwuit_web"
version = "0.5.6"
version = "0.5.5"
dependencies = [
"askama",
"axum",
@@ -1153,7 +1155,7 @@ dependencies = [
"conduwuit_service",
"futures",
"rand 0.10.0",
"thiserror 2.0.18",
"snafu",
"tracing",
]
@@ -1221,7 +1223,7 @@ dependencies = [
[[package]]
name = "continuwuity-admin-api"
version = "0.1.0"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=bb12ed288a31a23aa11b10ba0fad22b7f985eb88#bb12ed288a31a23aa11b10ba0fad22b7f985eb88"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=e087ff15888156942ca2ffe6097d1b4c3fd27628#e087ff15888156942ca2ffe6097d1b4c3fd27628"
dependencies = [
"ruma-common",
"serde",
@@ -1600,7 +1602,7 @@ dependencies = [
[[package]]
name = "draupnir-antispam"
version = "0.1.0"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=bb12ed288a31a23aa11b10ba0fad22b7f985eb88#bb12ed288a31a23aa11b10ba0fad22b7f985eb88"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=e087ff15888156942ca2ffe6097d1b4c3fd27628#e087ff15888156942ca2ffe6097d1b4c3fd27628"
dependencies = [
"ruma-common",
"serde",
@@ -3002,7 +3004,7 @@ checksum = "f8ca58f447f06ed17d5fc4043ce1b10dd205e060fb3ce5b979b8ed8e59ff3f79"
[[package]]
name = "meowlnir-antispam"
version = "0.1.0"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=bb12ed288a31a23aa11b10ba0fad22b7f985eb88#bb12ed288a31a23aa11b10ba0fad22b7f985eb88"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=e087ff15888156942ca2ffe6097d1b4c3fd27628#e087ff15888156942ca2ffe6097d1b4c3fd27628"
dependencies = [
"ruma-common",
"serde",
@@ -4055,14 +4057,12 @@ dependencies = [
"sync_wrapper",
"tokio",
"tokio-rustls",
"tokio-util",
"tower",
"tower-http",
"tower-service",
"url",
"wasm-bindgen",
"wasm-bindgen-futures",
"wasm-streams",
"web-sys",
"webpki-roots",
]
@@ -4096,7 +4096,7 @@ dependencies = [
[[package]]
name = "ruma"
version = "0.10.1"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=bb12ed288a31a23aa11b10ba0fad22b7f985eb88#bb12ed288a31a23aa11b10ba0fad22b7f985eb88"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=e087ff15888156942ca2ffe6097d1b4c3fd27628#e087ff15888156942ca2ffe6097d1b4c3fd27628"
dependencies = [
"assign",
"continuwuity-admin-api",
@@ -4119,7 +4119,7 @@ dependencies = [
[[package]]
name = "ruma-appservice-api"
version = "0.10.0"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=bb12ed288a31a23aa11b10ba0fad22b7f985eb88#bb12ed288a31a23aa11b10ba0fad22b7f985eb88"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=e087ff15888156942ca2ffe6097d1b4c3fd27628#e087ff15888156942ca2ffe6097d1b4c3fd27628"
dependencies = [
"js_int",
"ruma-common",
@@ -4131,7 +4131,7 @@ dependencies = [
[[package]]
name = "ruma-client-api"
version = "0.18.0"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=bb12ed288a31a23aa11b10ba0fad22b7f985eb88#bb12ed288a31a23aa11b10ba0fad22b7f985eb88"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=e087ff15888156942ca2ffe6097d1b4c3fd27628#e087ff15888156942ca2ffe6097d1b4c3fd27628"
dependencies = [
"as_variant",
"assign",
@@ -4154,7 +4154,7 @@ dependencies = [
[[package]]
name = "ruma-common"
version = "0.13.0"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=bb12ed288a31a23aa11b10ba0fad22b7f985eb88#bb12ed288a31a23aa11b10ba0fad22b7f985eb88"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=e087ff15888156942ca2ffe6097d1b4c3fd27628#e087ff15888156942ca2ffe6097d1b4c3fd27628"
dependencies = [
"as_variant",
"base64 0.22.1",
@@ -4186,7 +4186,7 @@ dependencies = [
[[package]]
name = "ruma-events"
version = "0.28.1"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=bb12ed288a31a23aa11b10ba0fad22b7f985eb88#bb12ed288a31a23aa11b10ba0fad22b7f985eb88"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=e087ff15888156942ca2ffe6097d1b4c3fd27628#e087ff15888156942ca2ffe6097d1b4c3fd27628"
dependencies = [
"as_variant",
"indexmap",
@@ -4211,7 +4211,7 @@ dependencies = [
[[package]]
name = "ruma-federation-api"
version = "0.9.0"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=bb12ed288a31a23aa11b10ba0fad22b7f985eb88#bb12ed288a31a23aa11b10ba0fad22b7f985eb88"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=e087ff15888156942ca2ffe6097d1b4c3fd27628#e087ff15888156942ca2ffe6097d1b4c3fd27628"
dependencies = [
"bytes",
"headers",
@@ -4233,7 +4233,7 @@ dependencies = [
[[package]]
name = "ruma-identifiers-validation"
version = "0.9.5"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=bb12ed288a31a23aa11b10ba0fad22b7f985eb88#bb12ed288a31a23aa11b10ba0fad22b7f985eb88"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=e087ff15888156942ca2ffe6097d1b4c3fd27628#e087ff15888156942ca2ffe6097d1b4c3fd27628"
dependencies = [
"js_int",
"thiserror 2.0.18",
@@ -4242,7 +4242,7 @@ dependencies = [
[[package]]
name = "ruma-identity-service-api"
version = "0.9.0"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=bb12ed288a31a23aa11b10ba0fad22b7f985eb88#bb12ed288a31a23aa11b10ba0fad22b7f985eb88"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=e087ff15888156942ca2ffe6097d1b4c3fd27628#e087ff15888156942ca2ffe6097d1b4c3fd27628"
dependencies = [
"js_int",
"ruma-common",
@@ -4252,7 +4252,7 @@ dependencies = [
[[package]]
name = "ruma-macros"
version = "0.13.0"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=bb12ed288a31a23aa11b10ba0fad22b7f985eb88#bb12ed288a31a23aa11b10ba0fad22b7f985eb88"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=e087ff15888156942ca2ffe6097d1b4c3fd27628#e087ff15888156942ca2ffe6097d1b4c3fd27628"
dependencies = [
"cfg-if",
"proc-macro-crate",
@@ -4267,7 +4267,7 @@ dependencies = [
[[package]]
name = "ruma-push-gateway-api"
version = "0.9.0"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=bb12ed288a31a23aa11b10ba0fad22b7f985eb88#bb12ed288a31a23aa11b10ba0fad22b7f985eb88"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=e087ff15888156942ca2ffe6097d1b4c3fd27628#e087ff15888156942ca2ffe6097d1b4c3fd27628"
dependencies = [
"js_int",
"ruma-common",
@@ -4279,7 +4279,7 @@ dependencies = [
[[package]]
name = "ruma-signatures"
version = "0.15.0"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=bb12ed288a31a23aa11b10ba0fad22b7f985eb88#bb12ed288a31a23aa11b10ba0fad22b7f985eb88"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=e087ff15888156942ca2ffe6097d1b4c3fd27628#e087ff15888156942ca2ffe6097d1b4c3fd27628"
dependencies = [
"base64 0.22.1",
"ed25519-dalek",
@@ -4912,6 +4912,27 @@ dependencies = [
"serde",
]
[[package]]
name = "snafu"
version = "0.8.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6e84b3f4eacbf3a1ce05eac6763b4d629d60cbc94d632e4092c54ade71f1e1a2"
dependencies = [
"snafu-derive",
]
[[package]]
name = "snafu-derive"
version = "0.8.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c1c97747dbf44bb1ca44a561ece23508e99cb592e862f22222dcf42f51d1e451"
dependencies = [
"heck",
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "socket2"
version = "0.5.10"
@@ -5870,19 +5891,6 @@ dependencies = [
"wasmparser",
]
[[package]]
name = "wasm-streams"
version = "0.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "15053d8d85c7eccdbefef60f06769760a563c7f0a9d6902a13d35c7800b0ad65"
dependencies = [
"futures-util",
"js-sys",
"wasm-bindgen",
"wasm-bindgen-futures",
"web-sys",
]
[[package]]
name = "wasmparser"
version = "0.244.0"
@@ -6347,7 +6355,7 @@ dependencies = [
[[package]]
name = "xtask"
version = "0.5.6"
version = "0.5.5"
dependencies = [
"askama",
"cargo_metadata",

View File

@@ -12,7 +12,7 @@ license = "Apache-2.0"
# See also `rust-toolchain.toml`
readme = "README.md"
repository = "https://forgejo.ellis.link/continuwuation/continuwuity"
version = "0.5.6"
version = "0.5.5"
[workspace.metadata.crane]
name = "conduwuit"
@@ -97,7 +97,7 @@ features = [
]
[workspace.dependencies.axum-extra]
version = "0.12.0"
version = "0.10.1"
default-features = false
features = ["typed-header", "tracing"]
@@ -144,7 +144,6 @@ features = [
"socks",
"hickory-dns",
"http2",
"stream",
]
[workspace.dependencies.serde]
@@ -308,9 +307,14 @@ features = [
]
# Used for conduwuit::Error type
[workspace.dependencies.thiserror]
version = "2.0.12"
[workspace.dependencies.snafu]
version = "0.8"
default-features = false
features = ["std", "rust_1_81"]
# Used for macro name generation
[workspace.dependencies.paste]
version = "1.0"
# Used when hashing the state
[workspace.dependencies.ring]
@@ -344,7 +348,7 @@ version = "0.1.2"
[workspace.dependencies.ruma]
git = "https://forgejo.ellis.link/continuwuation/ruwuma"
#branch = "conduwuit-changes"
rev = "bb12ed288a31a23aa11b10ba0fad22b7f985eb88"
rev = "e087ff15888156942ca2ffe6097d1b4c3fd27628"
features = [
"compat",
"rand",
@@ -364,7 +368,6 @@ features = [
"unstable-msc2870",
"unstable-msc3026",
"unstable-msc3061",
"unstable-msc3814",
"unstable-msc3245",
"unstable-msc3266",
"unstable-msc3381", # polls
@@ -383,7 +386,6 @@ features = [
"unstable-pdu",
"unstable-msc4155",
"unstable-msc4143", # livekit well_known response
"unstable-msc4284"
]
[workspace.dependencies.rust-rocksdb]

View File

@@ -57,15 +57,10 @@ ### What are the project's goals?
### Can I try it out?
Check out the [documentation](https://continuwuity.org) for installation instructions.
Check out the [documentation](https://continuwuity.org) for installation instructions, or join one of these vetted public homeservers running Continuwuity to get a feel for things!
If you want to try it out as a user, we have some partnered homeservers you can use:
* You can head over to [https://federated.nexus](https://federated.nexus/) in your browser.
* Hit the `Apply to Join` button. Once your request has been accepted, you will receive an email with your username and password.
* Head over to [https://app.federated.nexus](https://app.federated.nexus/) and you can sign in there, or use any other matrix chat client you wish elsewhere.
* Your username for matrix will be in the form of `@username:federated.nexus`, however you can simply use the `username` part to log in. Your password is your password.
* There's also [https://continuwuity.rocks/](https://continuwuity.rocks/). You can register a new account using Cinny via [this convenient link](https://app.cinny.in/register/continuwuity.rocks), or you can use Element or another matrix client *that supports registration*.
- https://continuwuity.rocks -- A public demo server operated by the Continuwuity Team.
- https://federated.nexus -- Federated Nexus is a community resource hosting multiple FOSS (especially federated) services, including Matrix and Forgejo.
### What are we working on?

1
changelog.d/1393.bugfix Normal file
View File

@@ -0,0 +1 @@
Removed non-compliant nor functional room alias lookups over federation. Contributed by @nex

1
changelog.d/1399.feature Normal file
View File

@@ -0,0 +1 @@
Outgoing presence is now disabled by default, and the config option documentation has been adjusted to more accurately represent the weight of presence, typing indicators, and read receipts. Contributed by @nex.

1
changelog.d/1418.bugfix Normal file
View File

@@ -0,0 +1 @@
Removed ability to set rocksdb as read only. Doing so would cause unintentional and buggy behaviour. Contributed by @Terryiscool160.

1
changelog.d/1421.bugfix Normal file
View File

@@ -0,0 +1 @@
Fixed a startup crash in the sender service if we can't detect the number of CPU cores, even if the `sender_workers' config option is set correctly. Contributed by @katie.

View File

@@ -0,0 +1 @@
Updated `list-backups` admin command to output one backup per line.

View File

@@ -15,18 +15,6 @@ disallowed-macros = [
{ path = "log::trace", reason = "use conduwuit_core::trace" },
]
[[disallowed-methods]]
path = "tokio::spawn"
reason = "use and pass conduwuit_core::server::Server::runtime() to spawn from"
[[disallowed-methods]]
path = "reqwest::Response::bytes"
reason = "bytes is unsafe, use limit_read via the conduwuit_core::utils::LimitReadExt trait instead"
[[disallowed-methods]]
path = "reqwest::Response::text"
reason = "text is unsafe, use limit_read_text via the conduwuit_core::utils::LimitReadExt trait instead"
[[disallowed-methods]]
path = "reqwest::Response::json"
reason = "json is unsafe, use limit_read_text via the conduwuit_core::utils::LimitReadExt trait instead"
disallowed-methods = [
{ path = "tokio::spawn", reason = "use and pass conduuwit_core::server::Server::runtime() to spawn from" },
]

View File

@@ -9,6 +9,7 @@ address = "0.0.0.0"
allow_device_name_federation = true
allow_guest_registration = true
allow_public_room_directory_over_federation = true
allow_public_room_directory_without_auth = true
allow_registration = true
database_path = "/database"
log = "trace,h2=debug,hyper=debug"

View File

@@ -290,25 +290,6 @@
#
#max_fetch_prev_events = 192
# How many incoming federation transactions the server is willing to be
# processing at any given time before it becomes overloaded and starts
# rejecting further transactions until some slots become available.
#
# Setting this value too low or too high may result in unstable
# federation, and setting it too high may cause runaway resource usage.
#
#max_concurrent_inbound_transactions = 150
# Maximum age (in seconds) for cached federation transaction responses.
# Entries older than this will be removed during cleanup.
#
#transaction_id_cache_max_age_secs = 7200 (2 hours)
# Maximum number of cached federation transaction responses.
# When the cache exceeds this limit, older entries will be removed.
#
#transaction_id_cache_max_entries = 8192
# Default/base connection timeout (seconds). This is used only by URL
# previews and update/news endpoint checks.
#
@@ -546,6 +527,12 @@
#
#allow_public_room_directory_over_federation = false
# Set this to true to allow your server's public room directory to be
# queried without client authentication (access token) through the Client
# APIs. Set this to false to protect against /publicRooms spiders.
#
#allow_public_room_directory_without_auth = false
# Allow guests/unauthenticated users to access TURN credentials.
#
# This is the equivalent of Synapse's `turn_allow_guests` config option.
@@ -1844,13 +1831,14 @@
#
#support_mxid =
# **DEPRECATED**: Use `[global.matrix_rtc].foci` instead.
#
# A list of MatrixRTC foci URLs which will be served as part of the
# MSC4143 client endpoint at /.well-known/matrix/client.
# MSC4143 client endpoint at /.well-known/matrix/client. If you're
# setting up livekit, you'd want something like:
# rtc_focus_server_urls = [
# { type = "livekit", livekit_service_url = "https://livekit.example.com" },
# ]
#
# This option is deprecated and will be removed in a future release.
# Please migrate to the new `[global.matrix_rtc]` config section.
# To disable, set this to be an empty vector (`[]`).
#
#rtc_focus_server_urls = []
@@ -1872,23 +1860,6 @@
#
#blurhash_max_raw_size = 33554432
[global.matrix_rtc]
# A list of MatrixRTC foci (transports) which will be served via the
# MSC4143 RTC transports endpoint at
# `/_matrix/client/v1/rtc/transports`. If you're setting up livekit,
# you'd want something like:
# ```toml
# [global.matrix_rtc]
# foci = [
# { type = "livekit", livekit_service_url = "https://livekit.example.com" },
# ]
# ```
#
# To disable, set this to an empty list (`[]`).
#
#foci = []
[global.ldap]
# Whether to enable LDAP login.

View File

@@ -180,11 +180,6 @@ RUN --mount=type=cache,target=/usr/local/cargo/registry \
export RUSTFLAGS="${RUSTFLAGS}"
fi
RUST_PROFILE_DIR="${RUST_PROFILE}"
if [[ "${RUST_PROFILE}" == "dev" ]]; then
RUST_PROFILE_DIR="debug"
fi
TARGET_DIR=($(cargo metadata --no-deps --format-version 1 | \
jq -r ".target_directory"))
mkdir /out/sbin
@@ -196,8 +191,8 @@ RUN --mount=type=cache,target=/usr/local/cargo/registry \
jq -r ".packages[] | select(.name == \"$PACKAGE\") | .targets[] | select( .kind | map(. == \"bin\") | any ) | .name"))
for BINARY in "${BINARIES[@]}"; do
echo $BINARY
xx-verify $TARGET_DIR/$(xx-cargo --print-target-triple)/${RUST_PROFILE_DIR}/$BINARY
cp $TARGET_DIR/$(xx-cargo --print-target-triple)/${RUST_PROFILE_DIR}/$BINARY /out/sbin/$BINARY
xx-verify $TARGET_DIR/$(xx-cargo --print-target-triple)/${RUST_PROFILE}/$BINARY
cp $TARGET_DIR/$(xx-cargo --print-target-triple)/${RUST_PROFILE}/$BINARY /out/sbin/$BINARY
done
EOF

View File

@@ -78,19 +78,47 @@ #### Firewall hints
### 3. Telling clients where to find LiveKit
To tell clients where to find LiveKit, you need to add the address of your `lk-jwt-service` to the `[global.matrix_rtc]` config section using the `foci` option.
To tell clients where to find LiveKit, you need to add the address of your `lk-jwt-service` to your client .well-known file. To do so, in the config section `global.well-known`, add (or modify) the option `rtc_focus_server_urls`.
The variable should be a list of servers serving as MatrixRTC endpoints. Clients discover these via the `/_matrix/client/v1/rtc/transports` endpoint (MSC4143).
The variable should be a list of servers serving as MatrixRTC endpoints to serve in the well-known file to the client.
```toml
[global.matrix_rtc]
foci = [
rtc_focus_server_urls = [
{ type = "livekit", livekit_service_url = "https://livekit.example.com" },
]
```
Remember to replace the URL with the address you are deploying your instance of lk-jwt-service to.
#### Serving .well-known manually
If you don't let Continuwuity serve your `.well-known` files, you need to add the following lines to your `.well-known/matrix/client` file, remembering to replace the URL with your own `lk-jwt-service` deployment:
```json
"org.matrix.msc4143.rtc_foci": [
{
"type": "livekit",
"livekit_service_url": "https://livekit.example.com"
}
]
```
The final file should look something like this:
```json
{
"m.homeserver": {
"base_url":"https://matrix.example.com"
},
"org.matrix.msc4143.rtc_foci": [
{
"type": "livekit",
"livekit_service_url": "https://livekit.example.com"
}
]
}
```
### 4. Configure your Reverse Proxy
Reverse proxies can be configured in many different ways - so we can't provide a step by step for this.

View File

@@ -51,13 +51,7 @@ ## Can I try it out?
Check out the [documentation](https://continuwuity.org) for installation instructions.
If you want to try it out as a user, we have some partnered homeservers you can use:
* You can head over to [https://federated.nexus](https://federated.nexus/) in your browser.
* Hit the `Apply to Join` button. Once your request has been accepted, you will receive an email with your username and password.
* Head over to [https://app.federated.nexus](https://app.federated.nexus/) and you can sign in there, or use any other matrix chat client you wish elsewhere.
* Your username for matrix will be in the form of `@username:federated.nexus`, however you can simply use the `username` part to log in. Your password is your password.
* There's also [https://continuwuity.rocks/](https://continuwuity.rocks/). You can register a new account using Cinny via [this convenient link](https://app.cinny.in/register/continuwuity.rocks), or you can use Element or another matrix client *that supports registration*.
There are currently no open registration continuwuity instances available.
## What are we working on?

View File

@@ -36,7 +36,3 @@ ## `!admin media delete-all-from-user`
## `!admin media delete-all-from-server`
Deletes all remote media from the specified remote server. This will always ignore errors by default
## `!admin media delete-url-preview`
Deletes a cached URL preview, forcing it to be re-fetched. Use --all to purge all cached URL previews

View File

@@ -1,6 +1,6 @@
use std::fmt::Write;
use conduwuit::{Err, Result, utils::response::LimitReadExt};
use conduwuit::{Err, Result};
use futures::StreamExt;
use ruma::{OwnedRoomId, OwnedServerName, OwnedUserId};
@@ -30,15 +30,12 @@ pub(super) async fn incoming_federation(&self) -> Result {
.federation_handletime
.read();
let mut msg = format!(
"Handling {} incoming PDUs across {} active transactions:\n",
map.len(),
self.services.transactions.txn_active_handle_count()
);
let mut msg = format!("Handling {} incoming pdus:\n", map.len());
for (r, (e, i)) in map.iter() {
let elapsed = i.elapsed();
writeln!(msg, "{} {}: {}m{}s", r, e, elapsed.as_secs() / 60, elapsed.as_secs() % 60)?;
}
msg
};
@@ -55,15 +52,7 @@ pub(super) async fn fetch_support_well_known(&self, server_name: OwnedServerName
.send()
.await?;
let text = response
.limit_read_text(
self.services
.config
.max_request_size
.try_into()
.expect("u64 fits into usize"),
)
.await?;
let text = response.text().await?;
if text.is_empty() {
return Err!("Response text/body is empty.");

View File

@@ -29,9 +29,7 @@ pub(super) async fn delete(
.delete(&mxc.as_str().try_into()?)
.await?;
return self
.write_str("Deleted the MXC from our database and on our filesystem.")
.await;
return Err!("Deleted the MXC from our database and on our filesystem.",);
}
if let Some(event_id) = event_id {
@@ -390,19 +388,3 @@ pub(super) async fn get_remote_thumbnail(
self.write_str(&format!("```\n{result:#?}\nreceived {len} bytes for file content.\n```"))
.await
}
#[admin_command]
pub(super) async fn delete_url_preview(&self, url: Option<String>, all: bool) -> Result {
if all {
self.services.media.clear_url_previews().await;
return self.write_str("Deleted all cached URL previews.").await;
}
let url = url.expect("clap enforces url is required unless --all");
self.services.media.remove_url_preview(&url).await?;
self.write_str(&format!("Deleted cached URL preview for: {url}"))
.await
}

View File

@@ -108,16 +108,4 @@ pub enum MediaCommand {
#[arg(long, default_value("800"))]
height: u32,
},
/// Deletes a cached URL preview, forcing it to be re-fetched.
/// Use --all to purge all cached URL previews.
DeleteUrlPreview {
/// The URL to clear from the saved preview data
#[arg(required_unless_present = "all")]
url: Option<String>,
/// Purge all cached URL previews
#[arg(long, conflicts_with = "url")]
all: bool,
},
}

View File

@@ -209,7 +209,7 @@ pub(super) async fn compact(
let parallelism = parallelism.unwrap_or(1);
let results = maps
.into_iter()
.try_stream::<conduwuit::Error>()
.try_stream()
.paralleln_and_then(runtime, parallelism, move |map| {
map.compact_blocking(options.clone())?;
Ok(map.name().to_owned())

View File

@@ -20,17 +20,7 @@ pub enum ResolverCommand {
name: Option<String>,
},
/// Flush a given server from the resolver caches or flush them completely
///
/// * Examples:
/// * Flush a specific server:
///
/// `!admin query resolver flush-cache matrix.example.com`
///
/// * Flush all resolver caches completely:
///
/// `!admin query resolver flush-cache --all`
#[command(verbatim_doc_comment)]
/// Flush a specific server from the resolver caches or everything
FlushCache {
name: Option<OwnedServerName>,

View File

@@ -3,7 +3,7 @@
use axum::extract::State;
use axum_client_ip::InsecureClientIp;
use conduwuit::{
Err, Error, Event, Result, debug_info, err, error, info,
Err, Event, Result, debug_info, err, error, info,
matrix::pdu::PduBuilder,
utils::{self, ReadyExt, stream::BroadbandExt},
warn,
@@ -387,7 +387,7 @@ pub(crate) async fn register_route(
)
.await?;
if !worked {
return Err(Error::Uiaa(uiaainfo));
return Err!(Uiaa(uiaainfo));
}
// Success!
},
@@ -401,7 +401,7 @@ pub(crate) async fn register_route(
&uiaainfo,
json,
);
return Err(Error::Uiaa(uiaainfo));
return Err!(Uiaa(uiaainfo));
},
| _ => {
return Err!(Request(NotJson("JSON body is not valid")));
@@ -661,7 +661,7 @@ pub(crate) async fn change_password_route(
.await?;
if !worked {
return Err(Error::Uiaa(uiaainfo));
return Err!(Uiaa(uiaainfo));
}
// Success!
@@ -673,7 +673,7 @@ pub(crate) async fn change_password_route(
.uiaa
.create(sender_user, body.sender_device(), &uiaainfo, json);
return Err(Error::Uiaa(uiaainfo));
return Err!(Uiaa(uiaainfo));
},
| _ => {
return Err!(Request(NotJson("JSON body is not valid")));
@@ -791,7 +791,7 @@ pub(crate) async fn deactivate_route(
.await?;
if !worked {
return Err(Error::Uiaa(uiaainfo));
return Err!(Uiaa(uiaainfo));
}
// Success!
},
@@ -802,7 +802,7 @@ pub(crate) async fn deactivate_route(
.uiaa
.create(sender_user, body.sender_device(), &uiaainfo, json);
return Err(Error::Uiaa(uiaainfo));
return Err!(Uiaa(uiaainfo));
},
| _ => {
return Err!(Request(NotJson("JSON body is not valid")));

View File

@@ -9,7 +9,7 @@
},
events::{
AnyGlobalAccountDataEventContent, AnyRoomAccountDataEventContent,
RoomAccountDataEventType,
GlobalAccountDataEventType, RoomAccountDataEventType,
},
serde::Raw,
};
@@ -126,6 +126,12 @@ async fn set_account_data(
)));
}
if event_type_s == GlobalAccountDataEventType::PushRules.to_cow_str() {
return Err!(Request(BadJson(
"This endpoint cannot be used for setting/configuring push rules."
)));
}
let data: serde_json::Value = serde_json::from_str(data.get())
.map_err(|e| err!(Request(BadJson(warn!("Invalid JSON provided: {e}")))))?;

View File

@@ -1,121 +0,0 @@
use axum::extract::State;
use axum_client_ip::InsecureClientIp;
use conduwuit::{Err, Result, at};
use futures::StreamExt;
use ruma::api::client::dehydrated_device::{
delete_dehydrated_device::unstable as delete_dehydrated_device,
get_dehydrated_device::unstable as get_dehydrated_device, get_events::unstable as get_events,
put_dehydrated_device::unstable as put_dehydrated_device,
};
use crate::Ruma;
const MAX_BATCH_EVENTS: usize = 50;
/// # `PUT /_matrix/client/../dehydrated_device`
///
/// Creates or overwrites the user's dehydrated device.
#[tracing::instrument(skip_all, fields(%client))]
pub(crate) async fn put_dehydrated_device_route(
State(services): State<crate::State>,
InsecureClientIp(client): InsecureClientIp,
body: Ruma<put_dehydrated_device::Request>,
) -> Result<put_dehydrated_device::Response> {
let sender_user = body
.sender_user
.as_deref()
.expect("AccessToken authentication required");
let device_id = body.body.device_id.clone();
services
.users
.set_dehydrated_device(sender_user, body.body)
.await?;
Ok(put_dehydrated_device::Response { device_id })
}
/// # `DELETE /_matrix/client/../dehydrated_device`
///
/// Deletes the user's dehydrated device without replacement.
#[tracing::instrument(skip_all, fields(%client))]
pub(crate) async fn delete_dehydrated_device_route(
State(services): State<crate::State>,
InsecureClientIp(client): InsecureClientIp,
body: Ruma<delete_dehydrated_device::Request>,
) -> Result<delete_dehydrated_device::Response> {
let sender_user = body.sender_user();
let device_id = services.users.get_dehydrated_device_id(sender_user).await?;
services.users.remove_device(sender_user, &device_id).await;
Ok(delete_dehydrated_device::Response { device_id })
}
/// # `GET /_matrix/client/../dehydrated_device`
///
/// Gets the user's dehydrated device
#[tracing::instrument(skip_all, fields(%client))]
pub(crate) async fn get_dehydrated_device_route(
State(services): State<crate::State>,
InsecureClientIp(client): InsecureClientIp,
body: Ruma<get_dehydrated_device::Request>,
) -> Result<get_dehydrated_device::Response> {
let sender_user = body.sender_user();
let device = services.users.get_dehydrated_device(sender_user).await?;
Ok(get_dehydrated_device::Response {
device_id: device.device_id,
device_data: device.device_data,
})
}
/// # `GET /_matrix/client/../dehydrated_device/{device_id}/events`
///
/// Paginates the events of the dehydrated device.
#[tracing::instrument(skip_all, fields(%client))]
pub(crate) async fn get_dehydrated_events_route(
State(services): State<crate::State>,
InsecureClientIp(client): InsecureClientIp,
body: Ruma<get_events::Request>,
) -> Result<get_events::Response> {
let sender_user = body.sender_user();
let device_id = &body.body.device_id;
let existing_id = services.users.get_dehydrated_device_id(sender_user).await;
if existing_id.as_ref().is_err()
|| existing_id
.as_ref()
.is_ok_and(|existing_id| existing_id != device_id)
{
return Err!(Request(Forbidden("Not the dehydrated device_id.")));
}
let since: Option<u64> = body
.body
.next_batch
.as_deref()
.map(str::parse)
.transpose()?;
let mut next_batch: Option<u64> = None;
let events = services
.users
.get_to_device_events(sender_user, device_id, since, None)
.take(MAX_BATCH_EVENTS)
.inspect(|&(count, _)| {
next_batch.replace(count);
})
.map(at!(1))
.collect()
.await;
Ok(get_events::Response {
events,
next_batch: next_batch.as_ref().map(ToString::to_string),
})
}

View File

@@ -1,6 +1,6 @@
use axum::extract::State;
use axum_client_ip::InsecureClientIp;
use conduwuit::{Err, Error, Result, debug, err, utils};
use conduwuit::{Err, Result, debug, err, utils};
use futures::StreamExt;
use ruma::{
MilliSecondsSinceUnixEpoch, OwnedDeviceId,
@@ -232,7 +232,7 @@ pub(crate) async fn delete_devices_route(
.await?;
if !worked {
return Err(Error::Uiaa(uiaainfo));
return Err!(Uiaa(uiaainfo));
}
// Success!
},
@@ -243,10 +243,10 @@ pub(crate) async fn delete_devices_route(
.uiaa
.create(sender_user, sender_device, &uiaainfo, json);
return Err(Error::Uiaa(uiaainfo));
return Err!(Uiaa(uiaainfo));
},
| _ => {
return Err(Error::BadRequest(ErrorKind::NotJson, "Not json."));
return Err!(BadRequest(ErrorKind::NotJson, "Not json."));
},
},
}

View File

@@ -5,7 +5,7 @@
use axum::extract::State;
use conduwuit::{
Err, Error, Result, debug, debug_warn, err,
Err, Result, debug, debug_warn, err,
result::NotFound,
utils,
utils::{IterStream, stream::WidebandExt},
@@ -215,7 +215,7 @@ pub(crate) async fn upload_signing_keys_route(
.await?;
if !worked {
return Err(Error::Uiaa(uiaainfo));
return Err!(Uiaa(uiaainfo));
}
// Success!
},
@@ -226,10 +226,10 @@ pub(crate) async fn upload_signing_keys_route(
.uiaa
.create(sender_user, sender_device, &uiaainfo, json);
return Err(Error::Uiaa(uiaainfo));
return Err!(Uiaa(uiaainfo));
},
| _ => {
return Err(Error::BadRequest(ErrorKind::NotJson, "Not json."));
return Err!(BadRequest(ErrorKind::NotJson, "Not json."));
},
},
}
@@ -396,12 +396,12 @@ pub(crate) async fn get_key_changes_route(
let from = body
.from
.parse()
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `from`."))?;
.map_err(|_| err!(BadRequest(ErrorKind::InvalidParam, "Invalid `from`.")))?;
let to = body
.to
.parse()
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `to`."))?;
.map_err(|_| err!(BadRequest(ErrorKind::InvalidParam, "Invalid `to`.")))?;
device_list_updates.extend(
services

View File

@@ -3,10 +3,9 @@
use axum::extract::State;
use axum_client_ip::InsecureClientIp;
use conduwuit::{
Err, Result, err,
Err, Result, err, error,
utils::{self, content_disposition::make_content_disposition, math::ruma_from_usize},
};
use conduwuit_core::error;
use conduwuit_service::{
Services,
media::{CACHE_CONTROL_IMMUTABLE, CORP_CROSS_ORIGIN, Dim, FileMeta, MXC_LENGTH},
@@ -70,7 +69,7 @@ pub(crate) async fn create_content_route(
.create(mxc, Some(user), Some(&content_disposition), content_type, &body.file)
.await
{
err!("Failed to save uploaded media: {e}");
error!("Failed to save uploaded media: {e}");
return Err!(Request(Unknown("Failed to save uploaded media")));
}
@@ -145,22 +144,12 @@ pub(crate) async fn get_content_route(
server_name: &body.server_name,
media_id: &body.media_id,
};
let FileMeta {
content,
content_type,
content_disposition,
} = match fetch_file(&services, &mxc, user, body.timeout_ms, None).await {
| Ok(meta) => meta,
| Err(conduwuit::Error::Io(e)) => match e.kind() {
| std::io::ErrorKind::NotFound => return Err!(Request(NotFound("Media not found."))),
| std::io::ErrorKind::PermissionDenied => {
error!("Permission denied when trying to read file: {e:?}");
return Err!(Request(Unknown("Unknown error when fetching file.")));
},
| _ => return Err!(Request(Unknown("Unknown error when fetching file."))),
},
| Err(_) => return Err!(Request(Unknown("Unknown error when fetching file."))),
};
} = fetch_file(&services, &mxc, user, body.timeout_ms, None).await?;
Ok(get_content::v1::Response {
file: content.expect("entire file contents"),
@@ -196,18 +185,7 @@ pub(crate) async fn get_content_as_filename_route(
content,
content_type,
content_disposition,
} = match fetch_file(&services, &mxc, user, body.timeout_ms, None).await {
| Ok(meta) => meta,
| Err(conduwuit::Error::Io(e)) => match e.kind() {
| std::io::ErrorKind::NotFound => return Err!(Request(NotFound("Media not found."))),
| std::io::ErrorKind::PermissionDenied => {
error!("Permission denied when trying to read file: {e:?}");
return Err!(Request(Unknown("Unknown error when fetching file.")));
},
| _ => return Err!(Request(Unknown("Unknown error when fetching file."))),
},
| Err(_) => return Err!(Request(Unknown("Unknown error when fetching file."))),
};
} = fetch_file(&services, &mxc, user, body.timeout_ms, Some(&body.filename)).await?;
Ok(get_content_as_filename::v1::Response {
file: content.expect("entire file contents"),

View File

@@ -1,7 +1,7 @@
use axum::extract::State;
use axum_client_ip::InsecureClientIp;
use conduwuit::{
Err, Error, Result, at, debug_warn,
Err, Result, at, debug_warn,
matrix::{
event::{Event, Matches},
pdu::PduCount,
@@ -322,7 +322,7 @@ pub(crate) async fn is_ignored_pdu<Pdu>(
if server_ignored {
// the sender's server is ignored, so ignore this event
return Err(Error::BadRequest(
return Err!(BadRequest(
ErrorKind::SenderIgnored { sender: None },
"The sender's server is ignored by this server.",
));
@@ -331,7 +331,7 @@ pub(crate) async fn is_ignored_pdu<Pdu>(
if user_ignored && !services.config.send_messages_from_ignored_users_to_client {
// the recipient of this PDU has the sender ignored, and we're not
// configured to send ignored messages to clients
return Err(Error::BadRequest(
return Err!(BadRequest(
ErrorKind::SenderIgnored { sender: Some(event.sender().to_owned()) },
"You have ignored this sender.",
));

View File

@@ -6,7 +6,6 @@
pub(super) mod backup;
pub(super) mod capabilities;
pub(super) mod context;
pub(super) mod dehydrated_device;
pub(super) mod device;
pub(super) mod directory;
pub(super) mod filter;
@@ -50,7 +49,6 @@
pub(super) use backup::*;
pub(super) use capabilities::*;
pub(super) use context::*;
pub(super) use dehydrated_device::*;
pub(super) use device::*;
pub(super) use directory::*;
pub(super) use filter::*;

View File

@@ -1,5 +1,5 @@
use axum::extract::State;
use conduwuit::{Err, Error, Result, err};
use conduwuit::{Err, Result, err};
use conduwuit_service::Services;
use ruma::{
CanonicalJsonObject, CanonicalJsonValue,
@@ -243,27 +243,27 @@ pub(crate) async fn set_pushrule_route(
body.before.as_deref(),
) {
let err = match error {
| InsertPushRuleError::ServerDefaultRuleId => Error::BadRequest(
| InsertPushRuleError::ServerDefaultRuleId => err!(BadRequest(
ErrorKind::InvalidParam,
"Rule IDs starting with a dot are reserved for server-default rules.",
),
| InsertPushRuleError::InvalidRuleId => Error::BadRequest(
)),
| InsertPushRuleError::InvalidRuleId => err!(BadRequest(
ErrorKind::InvalidParam,
"Rule ID containing invalid characters.",
),
| InsertPushRuleError::RelativeToServerDefaultRule => Error::BadRequest(
)),
| InsertPushRuleError::RelativeToServerDefaultRule => err!(BadRequest(
ErrorKind::InvalidParam,
"Can't place a push rule relatively to a server-default rule.",
),
| InsertPushRuleError::UnknownRuleId => Error::BadRequest(
)),
| InsertPushRuleError::UnknownRuleId => err!(BadRequest(
ErrorKind::NotFound,
"The before or after rule could not be found.",
),
| InsertPushRuleError::BeforeHigherThanAfter => Error::BadRequest(
)),
| InsertPushRuleError::BeforeHigherThanAfter => err!(BadRequest(
ErrorKind::InvalidParam,
"The before rule has a higher priority than the after rule.",
),
| _ => Error::BadRequest(ErrorKind::InvalidParam, "Invalid data."),
)),
| _ => err!(BadRequest(ErrorKind::InvalidParam, "Invalid data.")),
};
return Err(err);
@@ -433,13 +433,13 @@ pub(crate) async fn delete_pushrule_route(
.remove(body.kind.clone(), &body.rule_id)
{
let err = match error {
| RemovePushRuleError::ServerDefault => Error::BadRequest(
| RemovePushRuleError::ServerDefault => err!(BadRequest(
ErrorKind::InvalidParam,
"Cannot delete a server-default pushrule.",
),
)),
| RemovePushRuleError::NotFound =>
Error::BadRequest(ErrorKind::NotFound, "Push rule not found."),
| _ => Error::BadRequest(ErrorKind::InvalidParam, "Invalid data."),
err!(BadRequest(ErrorKind::NotFound, "Push rule not found.")),
| _ => err!(BadRequest(ErrorKind::InvalidParam, "Invalid data.")),
};
return Err(err);

View File

@@ -2,7 +2,7 @@
use axum::extract::State;
use conduwuit::{
Err, Error, Event, Result, RoomVersion, debug, err, info,
Err, Event, Result, RoomVersion, debug, err, info,
matrix::{StateKey, pdu::PduBuilder},
};
use futures::{FutureExt, StreamExt};
@@ -58,7 +58,7 @@ pub(crate) async fn upgrade_room_route(
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
if !services.server.supported_room_version(&body.new_version) {
return Err(Error::BadRequest(
return Err!(BadRequest(
ErrorKind::UnsupportedRoomVersion,
"This server does not support that room version.",
));
@@ -170,7 +170,7 @@ pub(crate) async fn upgrade_room_route(
"creator".into(),
json!(&sender_user).try_into().map_err(|e| {
info!("Error forming creation event: {e}");
Error::BadRequest(ErrorKind::BadJson, "Error forming creation event")
err!(BadRequest(ErrorKind::BadJson, "Error forming creation event"))
})?,
);
},
@@ -186,13 +186,13 @@ pub(crate) async fn upgrade_room_route(
"room_version".into(),
json!(&body.new_version)
.try_into()
.map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Error forming creation event"))?,
.map_err(|_| err!(BadRequest(ErrorKind::BadJson, "Error forming creation event")))?,
);
create_event_content.insert(
"predecessor".into(),
json!(predecessor)
.try_into()
.map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Error forming creation event"))?,
.map_err(|_| err!(BadRequest(ErrorKind::BadJson, "Error forming creation event")))?,
);
// Validate creation event content
@@ -203,7 +203,7 @@ pub(crate) async fn upgrade_room_route(
)
.is_err()
{
return Err(Error::BadRequest(ErrorKind::BadJson, "Error forming creation event"));
return Err!(BadRequest(ErrorKind::BadJson, "Error forming creation event"));
}
let create_event_id = services

View File

@@ -50,8 +50,8 @@ pub(crate) async fn send_message_event_route(
// Check if this is a new transaction id
if let Ok(response) = services
.transactions
.get_client_txn(sender_user, sender_device, &body.txn_id)
.transaction_ids
.existing_txnid(sender_user, sender_device, &body.txn_id)
.await
{
// The client might have sent a txnid of the /sendToDevice endpoint
@@ -92,7 +92,7 @@ pub(crate) async fn send_message_event_route(
)
.await?;
services.transactions.add_client_txnid(
services.transaction_ids.add_txnid(
sender_user,
sender_device,
&body.txn_id,

View File

@@ -3,7 +3,7 @@
use axum::extract::State;
use axum_client_ip::InsecureClientIp;
use conduwuit::{
Err, Error, Result, debug, err, info,
Err, Result, debug, err, info,
utils::{self, ReadyExt, hash},
warn,
};
@@ -191,7 +191,7 @@ pub(crate) async fn handle_login(
}
if services.users.is_locked(&user_id).await? {
return Err(Error::BadRequest(ErrorKind::UserLocked, "This account has been locked."));
return Err!(BadRequest(ErrorKind::UserLocked, "This account has been locked."));
}
if services.users.is_login_disabled(&user_id).await {
@@ -390,7 +390,7 @@ pub(crate) async fn login_token_route(
.await?;
if !worked {
return Err(Error::Uiaa(uiaainfo));
return Err!(Uiaa(uiaainfo));
}
// Success!
@@ -402,7 +402,7 @@ pub(crate) async fn login_token_route(
.uiaa
.create(sender_user, sender_device, &uiaainfo, json);
return Err(Error::Uiaa(uiaainfo));
return Err!(Uiaa(uiaainfo));
},
| _ => {
return Err!(Request(NotJson("No JSON body was sent when required.")));

View File

@@ -11,7 +11,7 @@
use axum::extract::State;
use axum_client_ip::InsecureClientIp;
use conduwuit::{
Result, at, extract_variant,
Result, extract_variant,
utils::{
ReadyExt, TryFutureExtExt,
stream::{BroadbandExt, Tools, WidebandExt},
@@ -385,7 +385,6 @@ pub(crate) async fn build_sync_events(
last_sync_end_count,
Some(current_count),
)
.map(at!(1))
.collect::<Vec<_>>();
let device_one_time_keys_count = services

View File

@@ -336,9 +336,7 @@ async fn handle_lists<'a, Rooms, AllRooms>(
let ranges = list.ranges.clone();
for mut range in ranges {
range.0 = range
.0
.min(UInt::try_from(active_rooms.len()).unwrap_or(UInt::MAX));
range.0 = uint!(0);
range.1 = range.1.checked_add(uint!(1)).unwrap_or(range.1);
range.1 = range
.1
@@ -1029,7 +1027,6 @@ async fn collect_to_device(
events: services
.users
.get_to_device_events(sender_user, sender_device, None, Some(next_batch))
.map(at!(1))
.collect()
.await,
})

View File

@@ -1,7 +1,7 @@
use std::collections::BTreeMap;
use axum::extract::State;
use conduwuit::{Error, Result};
use conduwuit::{Result, err};
use conduwuit_service::sending::EduBuf;
use futures::StreamExt;
use ruma::{
@@ -26,8 +26,8 @@ pub(crate) async fn send_event_to_device_route(
// Check if this is a new transaction id
if services
.transactions
.get_client_txn(sender_user, sender_device, &body.txn_id)
.transaction_ids
.existing_txnid(sender_user, sender_device, &body.txn_id)
.await
.is_ok()
{
@@ -66,7 +66,7 @@ pub(crate) async fn send_event_to_device_route(
let event = event
.deserialize_as()
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Event is invalid"))?;
.map_err(|_| err!(BadRequest(ErrorKind::InvalidParam, "Event is invalid")))?;
match target_device_id_maybe {
| DeviceIdOrAllDevices::DeviceId(target_device_id) => {
@@ -104,8 +104,8 @@ pub(crate) async fn send_event_to_device_route(
// Save transaction id with empty data
services
.transactions
.add_client_txnid(sender_user, sender_device, &body.txn_id, &[]);
.transaction_ids
.add_txnid(sender_user, sender_device, &body.txn_id, &[]);
Ok(send_event_to_device::v3::Response {})
}

View File

@@ -50,7 +50,6 @@ pub(crate) async fn get_supported_versions_route(
("org.matrix.msc2836".to_owned(), true), /* threading/threads (https://github.com/matrix-org/matrix-spec-proposals/pull/2836) */
("org.matrix.msc2946".to_owned(), true), /* spaces/hierarchy summaries (https://github.com/matrix-org/matrix-spec-proposals/pull/2946) */
("org.matrix.msc3026.busy_presence".to_owned(), true), /* busy presence status (https://github.com/matrix-org/matrix-spec-proposals/pull/3026) */
("org.matrix.msc3814".to_owned(), true), /* dehydrated devices */
("org.matrix.msc3827".to_owned(), true), /* filtering of /publicRooms by room type (https://github.com/matrix-org/matrix-spec-proposals/pull/3827) */
("org.matrix.msc3952_intentional_mentions".to_owned(), true), /* intentional mentions (https://github.com/matrix-org/matrix-spec-proposals/pull/3952) */
("org.matrix.msc3916.stable".to_owned(), true), /* authenticated media (https://github.com/matrix-org/matrix-spec-proposals/pull/3916) */

View File

@@ -1,11 +1,8 @@
use axum::{Json, extract::State, response::IntoResponse};
use conduwuit::{Error, Result};
use ruma::api::client::{
discovery::{
discover_homeserver::{self, HomeserverInfo, SlidingSyncProxyInfo},
discover_support::{self, Contact},
},
error::ErrorKind,
use conduwuit::{Err, Result};
use ruma::api::client::discovery::{
discover_homeserver::{self, HomeserverInfo, SlidingSyncProxyInfo},
discover_support::{self, Contact},
};
use crate::Ruma;
@@ -19,7 +16,7 @@ pub(crate) async fn well_known_client(
) -> Result<discover_homeserver::Response> {
let client_url = match services.config.well_known.client.as_ref() {
| Some(url) => url.to_string(),
| None => return Err(Error::BadRequest(ErrorKind::NotFound, "Not found.")),
| None => return Err!(BadRequest(ErrorKind::NotFound, "Not found.")),
};
Ok(discover_homeserver::Response {
@@ -27,32 +24,10 @@ pub(crate) async fn well_known_client(
identity_server: None,
sliding_sync_proxy: Some(SlidingSyncProxyInfo { url: client_url }),
tile_server: None,
rtc_foci: services
.config
.matrix_rtc
.effective_foci(&services.config.well_known.rtc_focus_server_urls)
.to_vec(),
rtc_foci: services.config.well_known.rtc_focus_server_urls.clone(),
})
}
/// # `GET /_matrix/client/v1/rtc/transports`
/// # `GET /_matrix/client/unstable/org.matrix.msc4143/rtc/transports`
///
/// Returns the list of MatrixRTC foci (transports) configured for this
/// homeserver, implementing MSC4143.
pub(crate) async fn get_rtc_transports(
State(services): State<crate::State>,
_body: Ruma<ruma::api::client::discovery::get_rtc_transports::Request>,
) -> Result<ruma::api::client::discovery::get_rtc_transports::Response> {
Ok(ruma::api::client::discovery::get_rtc_transports::Response::new(
services
.config
.matrix_rtc
.effective_foci(&services.config.well_known.rtc_focus_server_urls)
.to_vec(),
))
}
/// # `GET /.well-known/matrix/support`
///
/// Server support contact and support page of a homeserver's domain.
@@ -110,7 +85,7 @@ pub(crate) async fn well_known_support(
if contacts.is_empty() && support_page.is_none() {
// No admin room, no configured contacts, and no support page
return Err(Error::BadRequest(ErrorKind::NotFound, "Not found."));
return Err!(BadRequest(ErrorKind::NotFound, "Not found."));
}
Ok(discover_support::Response { contacts, support_page })
@@ -127,7 +102,7 @@ pub(crate) async fn syncv3_client_server_json(
| Some(url) => url.to_string(),
| None => match services.config.well_known.server.as_ref() {
| Some(url) => url.to_string(),
| None => return Err(Error::BadRequest(ErrorKind::NotFound, "Not found.")),
| None => return Err!(BadRequest(ErrorKind::NotFound, "Not found.")),
},
};

View File

@@ -160,10 +160,6 @@ pub fn build(router: Router<State>, server: &Server) -> Router<State> {
.ruma_route(&client::update_device_route)
.ruma_route(&client::delete_device_route)
.ruma_route(&client::delete_devices_route)
.ruma_route(&client::put_dehydrated_device_route)
.ruma_route(&client::delete_dehydrated_device_route)
.ruma_route(&client::get_dehydrated_device_route)
.ruma_route(&client::get_dehydrated_events_route)
.ruma_route(&client::get_tags_route)
.ruma_route(&client::update_tag_route)
.ruma_route(&client::delete_tag_route)
@@ -188,7 +184,6 @@ pub fn build(router: Router<State>, server: &Server) -> Router<State> {
.ruma_route(&client::put_suspended_status)
.ruma_route(&client::well_known_support)
.ruma_route(&client::well_known_client)
.ruma_route(&client::get_rtc_transports)
.route("/_conduwuit/server_version", get(client::conduwuit_server_version))
.route("/_continuwuity/server_version", get(client::conduwuit_server_version))
.ruma_route(&client::room_initial_sync_route)

View File

@@ -4,7 +4,7 @@
headers::{Authorization, authorization::Bearer},
typed_header::TypedHeaderRejectionReason,
};
use conduwuit::{Err, Error, Result, debug_error, err, warn};
use conduwuit::{Err, Result, debug_error, err, warn};
use futures::{
TryFutureExt,
future::{
@@ -14,8 +14,7 @@
pin_mut,
};
use ruma::{
CanonicalJsonObject, CanonicalJsonValue, DeviceId, OwnedDeviceId, OwnedServerName,
OwnedUserId, UserId,
CanonicalJsonObject, CanonicalJsonValue, OwnedDeviceId, OwnedServerName, OwnedUserId, UserId,
api::{
AuthScheme, IncomingRequest, Metadata,
client::{
@@ -67,17 +66,23 @@ pub(super) async fn auth(
if metadata.authentication == AuthScheme::None {
match metadata {
| &get_public_rooms::v3::Request::METADATA => {
match token {
| Token::Appservice(_) | Token::User(_) => {
// we should have validated the token above
// already
},
| Token::None | Token::Invalid => {
return Err(Error::BadRequest(
ErrorKind::MissingToken,
"Missing or invalid access token.",
));
},
if !services
.server
.config
.allow_public_room_directory_without_auth
{
match token {
| Token::Appservice(_) | Token::User(_) => {
// we should have validated the token above
// already
},
| Token::None | Token::Invalid => {
return Err!(BadRequest(
ErrorKind::MissingToken,
"Missing or invalid access token.",
));
},
}
}
},
| &get_profile::v3::Request::METADATA
@@ -91,7 +96,7 @@ pub(super) async fn auth(
// already
},
| Token::None | Token::Invalid => {
return Err(Error::BadRequest(
return Err!(BadRequest(
ErrorKind::MissingToken,
"Missing or invalid access token.",
));
@@ -125,10 +130,10 @@ pub(super) async fn auth(
appservice_info: None,
})
} else {
Err(Error::BadRequest(ErrorKind::MissingToken, "Missing access token."))
Err!(BadRequest(ErrorKind::MissingToken, "Missing access token."))
}
},
| _ => Err(Error::BadRequest(ErrorKind::MissingToken, "Missing access token.")),
| _ => Err!(BadRequest(ErrorKind::MissingToken, "Missing access token.")),
},
| (
AuthScheme::AccessToken | AuthScheme::AccessTokenOptional | AuthScheme::None,
@@ -144,7 +149,7 @@ pub(super) async fn auth(
&ruma::api::client::session::logout::v3::Request::METADATA
| &ruma::api::client::session::logout_all::v3::Request::METADATA
) {
return Err(Error::BadRequest(
return Err!(BadRequest(
ErrorKind::UserLocked,
"This account has been locked.",
));
@@ -169,11 +174,11 @@ pub(super) async fn auth(
appservice_info: None,
}),
| (AuthScheme::ServerSignatures, Token::Appservice(_) | Token::User(_)) =>
Err(Error::BadRequest(
Err!(BadRequest(
ErrorKind::Unauthorized,
"Only server signatures should be used on this endpoint.",
)),
| (AuthScheme::AppserviceToken, Token::User(_)) => Err(Error::BadRequest(
| (AuthScheme::AppserviceToken, Token::User(_)) => Err!(BadRequest(
ErrorKind::Unauthorized,
"Only appservice access tokens should be used on this endpoint.",
)),
@@ -191,13 +196,13 @@ pub(super) async fn auth(
appservice_info: None,
})
} else {
Err(Error::BadRequest(
Err!(BadRequest(
ErrorKind::UnknownToken { soft_logout: false },
"Unknown access token.",
))
}
},
| (_, Token::Invalid) => Err(Error::BadRequest(
| (_, Token::Invalid) => Err!(BadRequest(
ErrorKind::UnknownToken { soft_logout: false },
"Unknown access token.",
)),
@@ -229,33 +234,10 @@ async fn auth_appservice(
return Err!(Request(Exclusive("User is not in namespace.")));
}
// MSC3202/MSC4190: Handle device_id masquerading for appservices.
// The device_id can be provided via `device_id` or
// `org.matrix.msc3202.device_id` query parameter.
let sender_device = if let Some(ref device_id_str) = request.query.device_id {
let device_id: &DeviceId = device_id_str.as_str().into();
// Verify the device exists for this user
if services
.users
.get_device_metadata(&user_id, device_id)
.await
.is_err()
{
return Err!(Request(Forbidden(
"Device does not exist for user or appservice cannot masquerade as this device."
)));
}
Some(device_id.to_owned())
} else {
None
};
Ok(Auth {
origin: None,
sender_user: Some(user_id),
sender_device,
sender_device: None,
appservice_info: Some(*info),
})
}

View File

@@ -11,10 +11,6 @@
pub(super) struct QueryParams {
pub(super) access_token: Option<String>,
pub(super) user_id: Option<String>,
/// Device ID for appservice device masquerading (MSC3202/MSC4190).
/// Can be provided as `device_id` or `org.matrix.msc3202.device_id`.
#[serde(alias = "org.matrix.msc3202.device_id")]
pub(super) device_id: Option<String>,
}
pub(super) struct Request {

View File

@@ -1,12 +1,9 @@
use std::{borrow::Borrow, iter::once};
use axum::extract::State;
use conduwuit::{Err, Error, Result, info, utils::stream::ReadyExt};
use conduwuit::{Err, Error, Result, err, info, utils::stream::ReadyExt};
use futures::StreamExt;
use ruma::{
RoomId,
api::{client::error::ErrorKind, federation::authorization::get_event_authorization},
};
use ruma::{RoomId, api::federation::authorization::get_event_authorization};
use super::AccessCheck;
use crate::Ruma;
@@ -47,7 +44,7 @@ pub(crate) async fn get_event_authorization_route(
.timeline
.get_pdu_json(&body.event_id)
.await
.map_err(|_| Error::BadRequest(ErrorKind::NotFound, "Event not found."))?;
.map_err(|_| err!(BadRequest(ErrorKind::NotFound, "Event not found.")))?;
let room_id_str = event
.get("room_id")

View File

@@ -2,7 +2,7 @@
use axum_client_ip::InsecureClientIp;
use base64::{Engine as _, engine::general_purpose};
use conduwuit::{
Err, Error, PduEvent, Result, err, error,
Err, PduEvent, Result, err, error,
matrix::{Event, event::gen_event_id},
utils::{self, hash::sha256},
warn,
@@ -33,7 +33,7 @@ pub(crate) async fn create_invite_route(
.await?;
if !services.server.supported_room_version(&body.room_version) {
return Err(Error::BadRequest(
return Err!(BadRequest(
ErrorKind::IncompatibleRoomVersion { room_version: body.room_version.clone() },
"Server does not support this room version.",
));

View File

@@ -1,7 +1,7 @@
use std::borrow::ToOwned;
use axum::extract::State;
use conduwuit::{Err, Error, Result, debug, debug_info, info, matrix::pdu::PduBuilder, warn};
use conduwuit::{Err, Result, debug, debug_info, info, matrix::pdu::PduBuilder, warn};
use conduwuit_service::Services;
use futures::StreamExt;
use ruma::{
@@ -80,7 +80,7 @@ pub(crate) async fn create_join_event_template_route(
let room_version_id = services.rooms.state.get_room_version(&body.room_id).await?;
if !body.ver.contains(&room_version_id) {
return Err(Error::BadRequest(
return Err!(BadRequest(
ErrorKind::IncompatibleRoomVersion { room_version: room_version_id },
"Room version not supported.",
));

View File

@@ -1,6 +1,6 @@
use RoomVersionId::*;
use axum::extract::State;
use conduwuit::{Err, Error, Result, debug_warn, info, matrix::pdu::PduBuilder, warn};
use conduwuit::{Err, Result, debug_warn, info, matrix::pdu::PduBuilder, warn};
use ruma::{
RoomVersionId,
api::{client::error::ErrorKind, federation::knock::create_knock_event_template},
@@ -67,14 +67,14 @@ pub(crate) async fn create_knock_event_template_route(
let room_version_id = services.rooms.state.get_room_version(&body.room_id).await?;
if matches!(room_version_id, V1 | V2 | V3 | V4 | V5 | V6) {
return Err(Error::BadRequest(
return Err!(BadRequest(
ErrorKind::IncompatibleRoomVersion { room_version: room_version_id },
"Room version does not support knocking.",
));
}
if !body.ver.contains(&room_version_id) {
return Err(Error::BadRequest(
return Err!(BadRequest(
ErrorKind::IncompatibleRoomVersion { room_version: room_version_id },
"Your homeserver does not support the features required to knock on this room.",
));

View File

@@ -1,6 +1,6 @@
use axum::extract::State;
use axum_client_ip::InsecureClientIp;
use conduwuit::{Error, Result};
use conduwuit::{Err, Result, err};
use ruma::{
api::{
client::error::ErrorKind,
@@ -25,7 +25,7 @@ pub(crate) async fn get_public_rooms_filtered_route(
.config
.allow_public_room_directory_over_federation
{
return Err(Error::BadRequest(ErrorKind::forbidden(), "Room directory is not public"));
return Err!(BadRequest(ErrorKind::forbidden(), "Room directory is not public"));
}
let response = crate::client::get_public_rooms_filtered_helper(
@@ -38,7 +38,10 @@ pub(crate) async fn get_public_rooms_filtered_route(
)
.await
.map_err(|_| {
Error::BadRequest(ErrorKind::Unknown, "Failed to return this server's public room list.")
err!(BadRequest(
ErrorKind::Unknown,
"Failed to return this server's public room list."
))
})?;
Ok(get_public_rooms_filtered::v1::Response {
@@ -62,7 +65,7 @@ pub(crate) async fn get_public_rooms_route(
.globals
.allow_public_room_directory_over_federation()
{
return Err(Error::BadRequest(ErrorKind::forbidden(), "Room directory is not public"));
return Err!(BadRequest(ErrorKind::forbidden(), "Room directory is not public"));
}
let response = crate::client::get_public_rooms_filtered_helper(
@@ -75,7 +78,10 @@ pub(crate) async fn get_public_rooms_route(
)
.await
.map_err(|_| {
Error::BadRequest(ErrorKind::Unknown, "Failed to return this server's public room list.")
err!(BadRequest(
ErrorKind::Unknown,
"Failed to return this server's public room list."
))
})?;
Ok(get_public_rooms::v1::Response {

View File

@@ -1,7 +1,7 @@
use std::collections::BTreeMap;
use axum::extract::State;
use conduwuit::{Error, Result, err};
use conduwuit::{Err, Result, err};
use futures::StreamExt;
use get_profile_information::v1::ProfileField;
use rand::seq::SliceRandom;
@@ -67,17 +67,16 @@ pub(crate) async fn get_profile_information_route(
.config
.allow_inbound_profile_lookup_federation_requests
{
return Err(Error::BadRequest(
return Err!(BadRequest(
ErrorKind::forbidden(),
"Profile lookup over federation is not allowed on this homeserver.",
));
}
if !services.globals.server_is_ours(body.user_id.server_name()) {
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
"User does not belong to this server.",
));
return Err!(
BadRequest(ErrorKind::InvalidParam, "User does not belong to this server.",)
);
}
let mut displayname = None;

View File

@@ -1,33 +1,27 @@
use std::{
collections::{BTreeMap, HashMap, HashSet},
net::IpAddr,
time::{Duration, Instant},
};
use std::{collections::BTreeMap, net::IpAddr, time::Instant};
use axum::extract::State;
use axum_client_ip::InsecureClientIp;
use conduwuit::{
Err, Error, Result, debug, debug_warn, err, error,
result::LogErr,
state_res::lexicographical_topological_sort,
trace,
utils::{
IterStream, ReadyExt, millis_since_unix_epoch,
stream::{BroadbandExt, TryBroadbandExt, automatic_width},
},
warn,
};
use conduwuit_service::{
Services,
sending::{EDU_LIMIT, PDU_LIMIT},
};
use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt};
use http::StatusCode;
use itertools::Itertools;
use ruma::{
CanonicalJsonObject, MilliSecondsSinceUnixEpoch, OwnedEventId, OwnedRoomId, OwnedUserId,
RoomId, ServerName, UserId,
CanonicalJsonObject, OwnedEventId, OwnedRoomId, OwnedUserId, RoomId, ServerName, UserId,
api::{
client::error::{ErrorKind, ErrorKind::LimitExceeded},
client::error::ErrorKind,
federation::transactions::{
edu::{
DeviceListUpdateContent, DirectDeviceContent, Edu, PresenceContent,
@@ -38,16 +32,9 @@
},
},
events::receipt::{ReceiptEvent, ReceiptEventContent, ReceiptType},
int,
serde::Raw,
to_device::DeviceIdOrAllDevices,
uint,
};
use service::transactions::{
FederationTxnState, TransactionError, TxnKey, WrappedTransactionResponse,
};
use tokio::sync::watch::{Receiver, Sender};
use tracing::instrument;
use crate::Ruma;
@@ -57,6 +44,15 @@
/// # `PUT /_matrix/federation/v1/send/{txnId}`
///
/// Push EDUs and PDUs to this server.
#[tracing::instrument(
name = "txn",
level = "debug",
skip_all,
fields(
%client,
origin = body.origin().as_str()
),
)]
pub(crate) async fn send_transaction_message_route(
State(services): State<crate::State>,
InsecureClientIp(client): InsecureClientIp,
@@ -80,73 +76,16 @@ pub(crate) async fn send_transaction_message_route(
)));
}
let txn_key = (body.origin().to_owned(), body.transaction_id.clone());
// Atomically check cache, join active, or start new transaction
match services
.transactions
.get_or_start_federation_txn(txn_key.clone())?
{
| FederationTxnState::Cached(response) => {
// Already responded
Ok(response)
},
| FederationTxnState::Active(receiver) => {
// Another thread is processing
wait_for_result(receiver).await
},
| FederationTxnState::Started { receiver, sender } => {
// We're the first, spawn the processing task
services
.server
.runtime()
.spawn(process_inbound_transaction(services, body, client, txn_key, sender));
// and wait for it
wait_for_result(receiver).await
},
}
}
async fn wait_for_result(
mut recv: Receiver<WrappedTransactionResponse>,
) -> Result<send_transaction_message::v1::Response> {
if tokio::time::timeout(Duration::from_secs(50), recv.changed())
.await
.is_err()
{
// Took too long, return 429 to encourage the sender to try again
return Err(Error::BadRequest(
LimitExceeded { retry_after: None },
"Transaction is being still being processed. Please try again later.",
));
}
let value = recv.borrow_and_update();
match value.clone() {
| Some(Ok(response)) => Ok(response),
| Some(Err(err)) => Err(transaction_error_to_response(&err)),
| None => Err(Error::Request(
ErrorKind::Unknown,
"Transaction processing failed unexpectedly".into(),
StatusCode::INTERNAL_SERVER_ERROR,
)),
}
}
#[instrument(
skip_all,
fields(
id = ?body.transaction_id.as_str(),
origin = ?body.origin()
)
)]
async fn process_inbound_transaction(
services: crate::State,
body: Ruma<send_transaction_message::v1::Request>,
client: IpAddr,
txn_key: TxnKey,
sender: Sender<WrappedTransactionResponse>,
) {
let txn_start_time = Instant::now();
trace!(
pdus = body.pdus.len(),
edus = body.edus.len(),
elapsed = ?txn_start_time.elapsed(),
id = %body.transaction_id,
origin = %body.origin(),
"Starting txn",
);
let pdus = body
.pdus
.iter()
@@ -163,79 +102,40 @@ async fn process_inbound_transaction(
.filter_map(Result::ok)
.stream();
debug!(pdus = body.pdus.len(), edus = body.edus.len(), "Processing transaction",);
let results = match handle(&services, &client, body.origin(), pdus, edus).await {
| Ok(results) => results,
| Err(err) => {
fail_federation_txn(services, &txn_key, &sender, err);
return;
},
};
for (id, result) in &results {
if let Err(e) = result {
if matches!(e, Error::BadRequest(ErrorKind::NotFound, _)) {
debug_warn!("Incoming PDU failed {id}: {e:?}");
}
}
}
let results = handle(&services, &client, body.origin(), txn_start_time, pdus, edus).await?;
debug!(
pdus = body.pdus.len(),
edus = body.edus.len(),
elapsed = ?txn_start_time.elapsed(),
"Finished processing transaction"
id = %body.transaction_id,
origin = %body.origin(),
"Finished txn",
);
for (id, result) in &results {
if let Err(e) = result {
if matches!(e, Error::BadRequest { kind: ErrorKind::NotFound, .. }) {
warn!("Incoming PDU failed {id}: {e:?}");
}
}
}
let response = send_transaction_message::v1::Response {
Ok(send_transaction_message::v1::Response {
pdus: results
.into_iter()
.map(|(e, r)| (e, r.map_err(error::sanitized_message)))
.collect(),
};
services
.transactions
.finish_federation_txn(txn_key, sender, response);
})
}
/// Handles a failed federation transaction by sending the error through
/// the channel and cleaning up the transaction state. This allows waiters to
/// receive an appropriate error response.
fn fail_federation_txn(
services: crate::State,
txn_key: &TxnKey,
sender: &Sender<WrappedTransactionResponse>,
err: TransactionError,
) {
debug!("Transaction failed: {err}");
// Remove from active state so the transaction can be retried
services.transactions.remove_federation_txn(txn_key);
// Send the error to any waiters
if let Err(e) = sender.send(Some(Err(err))) {
debug_warn!("Failed to send transaction error to receivers: {e}");
}
}
/// Converts a TransactionError into an appropriate HTTP error response.
fn transaction_error_to_response(err: &TransactionError) -> Error {
match err {
| TransactionError::ShuttingDown => Error::Request(
ErrorKind::Unknown,
"Server is shutting down, please retry later".into(),
StatusCode::SERVICE_UNAVAILABLE,
),
}
}
async fn handle(
services: &Services,
client: &IpAddr,
origin: &ServerName,
started: Instant,
pdus: impl Stream<Item = Pdu> + Send,
edus: impl Stream<Item = Edu> + Send,
) -> std::result::Result<ResolvedMap, TransactionError> {
) -> Result<ResolvedMap> {
// group pdus by room
let pdus = pdus
.collect()
@@ -252,7 +152,7 @@ async fn handle(
.into_iter()
.try_stream()
.broad_and_then(|(room_id, pdus): (_, Vec<_>)| {
handle_room(services, client, origin, room_id, pdus.into_iter())
handle_room(services, client, origin, started, room_id, pdus.into_iter())
.map_ok(Vec::into_iter)
.map_ok(IterStream::try_stream)
})
@@ -269,51 +169,14 @@ async fn handle(
Ok(results)
}
/// Attempts to build a localised directed acyclic graph out of the given PDUs,
/// returning them in a topologically sorted order.
///
/// This is used to attempt to process PDUs in an order that respects their
/// dependencies, however it is ultimately the sender's responsibility to send
/// them in a processable order, so this is just a best effort attempt. It does
/// not account for power levels or other tie breaks.
async fn build_local_dag(
pdu_map: &HashMap<OwnedEventId, CanonicalJsonObject>,
) -> Result<Vec<OwnedEventId>> {
debug_assert!(pdu_map.len() >= 2, "needless call to build_local_dag with less than 2 PDUs");
let mut dag: HashMap<OwnedEventId, HashSet<OwnedEventId>> = HashMap::new();
for (event_id, value) in pdu_map {
let prev_events = value
.get("prev_events")
.expect("pdu must have prev_events")
.as_array()
.expect("prev_events must be an array")
.iter()
.map(|v| {
OwnedEventId::parse(v.as_str().expect("prev_events values must be strings"))
.expect("prev_events must be valid event IDs")
})
.collect::<HashSet<OwnedEventId>>();
dag.insert(event_id.clone(), prev_events);
}
lexicographical_topological_sort(&dag, &|_| async {
// Note: we don't bother fetching power levels because that would massively slow
// this function down. This is a best-effort attempt to order events correctly
// for processing, however ultimately that should be the sender's job.
Ok((int!(0), MilliSecondsSinceUnixEpoch(uint!(0))))
})
.await
.map_err(|e| err!("failed to resolve local graph: {e}"))
}
async fn handle_room(
services: &Services,
_client: &IpAddr,
origin: &ServerName,
txn_start_time: Instant,
room_id: OwnedRoomId,
pdus: impl Iterator<Item = Pdu> + Send,
) -> std::result::Result<Vec<(OwnedEventId, Result)>, TransactionError> {
) -> Result<Vec<(OwnedEventId, Result)>> {
let _room_lock = services
.rooms
.event_handler
@@ -322,40 +185,27 @@ async fn handle_room(
.await;
let room_id = &room_id;
let pdu_map: HashMap<OwnedEventId, CanonicalJsonObject> = pdus
.into_iter()
.map(|(_, event_id, value)| (event_id, value))
.collect();
// Try to sort PDUs by their dependencies, but fall back to arbitrary order on
// failure (e.g., cycles). This is best-effort; proper ordering is the sender's
// responsibility.
let sorted_event_ids = if pdu_map.len() >= 2 {
build_local_dag(&pdu_map).await.unwrap_or_else(|e| {
debug_warn!("Failed to build local DAG for room {room_id}: {e}");
pdu_map.keys().cloned().collect()
pdus.try_stream()
.and_then(|(_, event_id, value)| async move {
services.server.check_running()?;
let pdu_start_time = Instant::now();
let result = services
.rooms
.event_handler
.handle_incoming_pdu(origin, room_id, &event_id, value, true)
.await
.map(|_| ());
debug!(
pdu_elapsed = ?pdu_start_time.elapsed(),
txn_elapsed = ?txn_start_time.elapsed(),
"Finished PDU {event_id}",
);
Ok((event_id, result))
})
} else {
pdu_map.keys().cloned().collect()
};
let mut results = Vec::with_capacity(sorted_event_ids.len());
for event_id in sorted_event_ids {
let value = pdu_map
.get(&event_id)
.expect("sorted event IDs must be from the original map")
.clone();
services
.server
.check_running()
.map_err(|_| TransactionError::ShuttingDown)?;
let result = services
.rooms
.event_handler
.handle_incoming_pdu(origin, room_id, &event_id, value, true)
.await
.map(|_| ());
results.push((event_id, result));
}
Ok(results)
.try_collect()
.await
}
async fn handle_edu(services: &Services, client: &IpAddr, origin: &ServerName, edu: Edu) {
@@ -628,8 +478,8 @@ async fn handle_edu_direct_to_device(
// Check if this is a new transaction id
if services
.transactions
.get_client_txn(sender, None, message_id)
.transaction_ids
.existing_txnid(sender, None, message_id)
.await
.is_ok()
{
@@ -648,8 +498,8 @@ async fn handle_edu_direct_to_device(
// Save transaction id with empty data
services
.transactions
.add_client_txnid(sender, None, message_id, &[]);
.transaction_ids
.add_txnid(sender, None, message_id, &[]);
}
async fn handle_edu_direct_to_device_user<Event: Send + Sync>(

View File

@@ -1,7 +1,7 @@
use std::time::Duration;
use axum::extract::State;
use conduwuit::{Error, Result};
use conduwuit::{Err, Result};
use futures::{FutureExt, StreamExt, TryFutureExt};
use ruma::api::{
client::error::ErrorKind,
@@ -24,7 +24,7 @@ pub(crate) async fn get_devices_route(
body: Ruma<get_devices::v1::Request>,
) -> Result<get_devices::v1::Response> {
if !services.globals.user_is_local(&body.user_id) {
return Err(Error::BadRequest(
return Err!(BadRequest(
ErrorKind::InvalidParam,
"Tried to access user from other server.",
));
@@ -86,10 +86,9 @@ pub(crate) async fn get_keys_route(
.iter()
.any(|(u, _)| !services.globals.user_is_local(u))
{
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
"User does not belong to this server.",
));
return Err!(
BadRequest(ErrorKind::InvalidParam, "User does not belong to this server.",)
);
}
let result = get_keys_helper(
@@ -121,7 +120,7 @@ pub(crate) async fn claim_keys_route(
.iter()
.any(|(u, _)| !services.globals.user_is_local(u))
{
return Err(Error::BadRequest(
return Err!(BadRequest(
ErrorKind::InvalidParam,
"Tried to access user from other server.",
));

View File

@@ -1,6 +1,6 @@
use axum::extract::State;
use conduwuit::{Error, Result};
use ruma::api::{client::error::ErrorKind, federation::discovery::discover_homeserver};
use conduwuit::{Err, Result};
use ruma::api::federation::discovery::discover_homeserver;
use crate::Ruma;
@@ -14,7 +14,7 @@ pub(crate) async fn well_known_server(
Ok(discover_homeserver::Response {
server: match services.server.config.well_known.server.as_ref() {
| Some(server_name) => server_name.to_owned(),
| None => return Err(Error::BadRequest(ErrorKind::NotFound, "Not found.")),
| None => return Err!(BadRequest(ErrorKind::NotFound, "Not found.")),
},
})
}

View File

@@ -98,7 +98,8 @@ serde-saphyr.workspace = true
serde.workspace = true
smallvec.workspace = true
smallstr.workspace = true
thiserror.workspace = true
snafu.workspace = true
paste.workspace = true
tikv-jemallocator.optional = true
tikv-jemallocator.workspace = true
tikv-jemalloc-ctl.optional = true

View File

@@ -368,31 +368,6 @@ pub struct Config {
#[serde(default = "default_max_fetch_prev_events")]
pub max_fetch_prev_events: u16,
/// How many incoming federation transactions the server is willing to be
/// processing at any given time before it becomes overloaded and starts
/// rejecting further transactions until some slots become available.
///
/// Setting this value too low or too high may result in unstable
/// federation, and setting it too high may cause runaway resource usage.
///
/// default: 150
#[serde(default = "default_max_concurrent_inbound_transactions")]
pub max_concurrent_inbound_transactions: usize,
/// Maximum age (in seconds) for cached federation transaction responses.
/// Entries older than this will be removed during cleanup.
///
/// default: 7200 (2 hours)
#[serde(default = "default_transaction_id_cache_max_age_secs")]
pub transaction_id_cache_max_age_secs: u64,
/// Maximum number of cached federation transaction responses.
/// When the cache exceeds this limit, older entries will be removed.
///
/// default: 8192
#[serde(default = "default_transaction_id_cache_max_entries")]
pub transaction_id_cache_max_entries: usize,
/// Default/base connection timeout (seconds). This is used only by URL
/// previews and update/news endpoint checks.
///
@@ -678,6 +653,12 @@ pub struct Config {
#[serde(default)]
pub allow_public_room_directory_over_federation: bool,
/// Set this to true to allow your server's public room directory to be
/// queried without client authentication (access token) through the Client
/// APIs. Set this to false to protect against /publicRooms spiders.
#[serde(default)]
pub allow_public_room_directory_without_auth: bool,
/// Allow guests/unauthenticated users to access TURN credentials.
///
/// This is the equivalent of Synapse's `turn_allow_guests` config option.
@@ -2080,12 +2061,6 @@ pub struct Config {
/// display: nested
#[serde(default)]
pub blurhashing: BlurhashConfig,
/// Configuration for MatrixRTC (MSC4143) transport discovery.
/// display: nested
#[serde(default)]
pub matrix_rtc: MatrixRtcConfig,
#[serde(flatten)]
#[allow(clippy::zero_sized_map_values)]
// this is a catchall, the map shouldn't be zero at runtime
@@ -2151,16 +2126,17 @@ pub struct WellKnownConfig {
/// listed.
pub support_mxid: Option<OwnedUserId>,
/// **DEPRECATED**: Use `[global.matrix_rtc].foci` instead.
///
/// A list of MatrixRTC foci URLs which will be served as part of the
/// MSC4143 client endpoint at /.well-known/matrix/client.
/// MSC4143 client endpoint at /.well-known/matrix/client. If you're
/// setting up livekit, you'd want something like:
/// rtc_focus_server_urls = [
/// { type = "livekit", livekit_service_url = "https://livekit.example.com" },
/// ]
///
/// This option is deprecated and will be removed in a future release.
/// Please migrate to the new `[global.matrix_rtc]` config section.
/// To disable, set this to be an empty vector (`[]`).
///
/// default: []
#[serde(default)]
#[serde(default = "default_rtc_focus_urls")]
pub rtc_focus_server_urls: Vec<RtcFocusInfo>,
}
@@ -2189,43 +2165,6 @@ pub struct BlurhashConfig {
pub blurhash_max_raw_size: u64,
}
#[derive(Clone, Debug, Deserialize, Default)]
#[config_example_generator(filename = "conduwuit-example.toml", section = "global.matrix_rtc")]
pub struct MatrixRtcConfig {
/// A list of MatrixRTC foci (transports) which will be served via the
/// MSC4143 RTC transports endpoint at
/// `/_matrix/client/v1/rtc/transports`. If you're setting up livekit,
/// you'd want something like:
/// ```toml
/// [global.matrix_rtc]
/// foci = [
/// { type = "livekit", livekit_service_url = "https://livekit.example.com" },
/// ]
/// ```
///
/// To disable, set this to an empty list (`[]`).
///
/// default: []
#[serde(default)]
pub foci: Vec<RtcFocusInfo>,
}
impl MatrixRtcConfig {
/// Returns the effective foci, falling back to the deprecated
/// `rtc_focus_server_urls` if the new config is empty.
#[must_use]
pub fn effective_foci<'a>(
&'a self,
deprecated_foci: &'a [RtcFocusInfo],
) -> &'a [RtcFocusInfo] {
if !self.foci.is_empty() {
&self.foci
} else {
deprecated_foci
}
}
}
#[derive(Clone, Debug, Default, Deserialize)]
#[config_example_generator(filename = "conduwuit-example.toml", section = "global.ldap")]
pub struct LdapConfig {
@@ -2419,7 +2358,6 @@ pub struct DraupnirConfig {
"well_known_support_email",
"well_known_support_mxid",
"registration_token_file",
"well_known.rtc_focus_server_urls",
];
impl Config {
@@ -2602,12 +2540,6 @@ fn default_pusher_idle_timeout() -> u64 { 15 }
fn default_max_fetch_prev_events() -> u16 { 192_u16 }
fn default_max_concurrent_inbound_transactions() -> usize { 150 }
fn default_transaction_id_cache_max_age_secs() -> u64 { 60 * 60 * 2 }
fn default_transaction_id_cache_max_entries() -> usize { 8192 }
fn default_tracing_flame_filter() -> String {
cfg!(debug_assertions)
.then_some("trace,h2=off")
@@ -2703,6 +2635,9 @@ fn default_rocksdb_stats_level() -> u8 { 1 }
#[inline]
pub fn default_default_room_version() -> RoomVersionId { RoomVersionId::V11 }
#[must_use]
pub fn default_rtc_focus_urls() -> Vec<RtcFocusInfo> { vec![] }
fn default_ip_range_denylist() -> Vec<String> {
vec![
"127.0.0.0/8".to_owned(),

View File

@@ -45,63 +45,162 @@ macro_rules! Err {
macro_rules! err {
(Request(Forbidden($level:ident!($($args:tt)+)))) => {{
let mut buf = String::new();
$crate::error::Error::Request(
$crate::ruma::api::client::error::ErrorKind::forbidden(),
$crate::err_log!(buf, $level, $($args)+),
$crate::http::StatusCode::BAD_REQUEST
)
$crate::error::Error::Request {
kind: $crate::ruma::api::client::error::ErrorKind::forbidden(),
message: $crate::err_log!(buf, $level, $($args)+),
code: $crate::http::StatusCode::BAD_REQUEST,
backtrace: Some($crate::snafu::Backtrace::capture()),
}
}};
(Request(Forbidden($($args:tt)+))) => {
$crate::error::Error::Request(
$crate::ruma::api::client::error::ErrorKind::forbidden(),
$crate::format_maybe!($($args)+),
$crate::http::StatusCode::BAD_REQUEST
)
{
let message: std::borrow::Cow<'static, str> = $crate::format_maybe!($($args)+);
$crate::error::Error::Request {
kind: $crate::ruma::api::client::error::ErrorKind::forbidden(),
message,
code: $crate::http::StatusCode::BAD_REQUEST,
backtrace: Some($crate::snafu::Backtrace::capture()),
}
}
};
(Request(NotFound($level:ident!($($args:tt)+)))) => {{
let mut buf = String::new();
$crate::error::Error::Request {
kind: $crate::ruma::api::client::error::ErrorKind::NotFound,
message: $crate::err_log!(buf, $level, $($args)+),
code: $crate::http::StatusCode::BAD_REQUEST,
backtrace: None,
}
}};
(Request(NotFound($($args:tt)+))) => {
{
let message: std::borrow::Cow<'static, str> = $crate::format_maybe!($($args)+);
$crate::error::Error::Request {
kind: $crate::ruma::api::client::error::ErrorKind::NotFound,
message,
code: $crate::http::StatusCode::BAD_REQUEST,
backtrace: None,
}
}
};
(Request($variant:ident($level:ident!($($args:tt)+)))) => {{
let mut buf = String::new();
$crate::error::Error::Request(
$crate::ruma::api::client::error::ErrorKind::$variant,
$crate::err_log!(buf, $level, $($args)+),
$crate::http::StatusCode::BAD_REQUEST
)
$crate::error::Error::Request {
kind: $crate::ruma::api::client::error::ErrorKind::$variant,
message: $crate::err_log!(buf, $level, $($args)+),
code: $crate::http::StatusCode::BAD_REQUEST,
backtrace: Some($crate::snafu::Backtrace::capture()),
}
}};
(Request($variant:ident($($args:tt)+))) => {
$crate::error::Error::Request(
$crate::ruma::api::client::error::ErrorKind::$variant,
$crate::format_maybe!($($args)+),
$crate::http::StatusCode::BAD_REQUEST
)
{
let message: std::borrow::Cow<'static, str> = $crate::format_maybe!($($args)+);
$crate::error::Error::Request {
kind: $crate::ruma::api::client::error::ErrorKind::$variant,
message,
code: $crate::http::StatusCode::BAD_REQUEST,
backtrace: Some($crate::snafu::Backtrace::capture()),
}
}
};
(Config($item:literal, $($args:tt)+)) => {{
let mut buf = String::new();
$crate::error::Error::Config($item, $crate::err_log!(buf, error, config = %$item, $($args)+))
$crate::error::ConfigSnafu {
directive: $item,
message: $crate::err_log!(buf, error, config = %$item, $($args)+),
}.build()
}};
(BadRequest(ErrorKind::NotFound, $($args:tt)+)) => {
{
let message: std::borrow::Cow<'static, str> = $crate::format_maybe!($($args)+);
$crate::error::Error::Request {
kind: $crate::ruma::api::client::error::ErrorKind::NotFound,
message,
code: $crate::http::StatusCode::BAD_REQUEST,
backtrace: None,
}
}
};
(BadRequest($kind:expr, $($args:tt)+)) => {
{
let message: std::borrow::Cow<'static, str> = $crate::format_maybe!($($args)+);
$crate::error::BadRequestSnafu {
kind: $kind,
message,
}.build()
}
};
(FeatureDisabled($($args:tt)+)) => {
{
let feature: std::borrow::Cow<'static, str> = $crate::format_maybe!($($args)+);
$crate::error::FeatureDisabledSnafu { feature }.build()
}
};
(Federation($server:expr, $error:expr $(,)?)) => {
{
$crate::error::FederationSnafu {
server: $server,
error: $error,
}.build()
}
};
(InconsistentRoomState($message:expr, $room_id:expr $(,)?)) => {
{
$crate::error::InconsistentRoomStateSnafu {
message: $message,
room_id: $room_id,
}.build()
}
};
(Uiaa($info:expr $(,)?)) => {
{
$crate::error::UiaaSnafu {
info: $info,
}.build()
}
};
($variant:ident($level:ident!($($args:tt)+))) => {{
let mut buf = String::new();
$crate::error::Error::$variant($crate::err_log!(buf, $level, $($args)+))
$crate::paste::paste! {
$crate::error::[<$variant Snafu>] {
message: $crate::err_log!(buf, $level, $($args)+),
}.build()
}
}};
($variant:ident($($args:ident),+)) => {
$crate::error::Error::$variant($($args),+)
};
($variant:ident($($args:tt)+)) => {
$crate::error::Error::$variant($crate::format_maybe!($($args)+))
$crate::paste::paste! {
{
let message: std::borrow::Cow<'static, str> = $crate::format_maybe!($($args)+);
$crate::error::[<$variant Snafu>] { message }.build()
}
}
};
($level:ident!($($args:tt)+)) => {{
let mut buf = String::new();
$crate::error::Error::Err($crate::err_log!(buf, $level, $($args)+))
let message: std::borrow::Cow<'static, str> = $crate::err_log!(buf, $level, $($args)+);
$crate::error::ErrSnafu { message }.build()
}};
($($args:tt)+) => {
$crate::error::Error::Err($crate::format_maybe!($($args)+))
{
let message: std::borrow::Cow<'static, str> = $crate::format_maybe!($($args)+);
$crate::error::ErrSnafu { message }.build()
}
};
}
@@ -134,7 +233,7 @@ macro_rules! err_log {
};
($crate::error::visit)(&mut $out, LEVEL, &__CALLSITE, &mut valueset_all!(__CALLSITE.metadata().fields(), $($fields)+));
($out).into()
std::borrow::Cow::<'static, str>::from($out)
}}
}

View File

@@ -6,151 +6,391 @@
use std::{any::Any, borrow::Cow, convert::Infallible, sync::PoisonError};
use snafu::{IntoError, prelude::*};
pub use self::{err::visit, log::*};
#[derive(thiserror::Error)]
#[derive(Debug, Snafu)]
#[snafu(visibility(pub))]
pub enum Error {
#[error("PANIC!")]
PanicAny(Box<dyn Any + Send>),
#[error("PANIC! {0}")]
Panic(&'static str, Box<dyn Any + Send + 'static>),
#[snafu(display("PANIC!"))]
PanicAny {
panic: Box<dyn Any + Send>,
backtrace: snafu::Backtrace,
},
#[snafu(display("PANIC! {message}"))]
Panic {
message: &'static str,
panic: Box<dyn Any + Send + 'static>,
backtrace: snafu::Backtrace,
},
// std
#[error(transparent)]
Fmt(#[from] std::fmt::Error),
#[error(transparent)]
FromUtf8(#[from] std::string::FromUtf8Error),
#[error("I/O error: {0}")]
Io(#[from] std::io::Error),
#[error(transparent)]
ParseFloat(#[from] std::num::ParseFloatError),
#[error(transparent)]
ParseInt(#[from] std::num::ParseIntError),
#[error(transparent)]
Std(#[from] Box<dyn std::error::Error + Send>),
#[error(transparent)]
ThreadAccessError(#[from] std::thread::AccessError),
#[error(transparent)]
TryFromInt(#[from] std::num::TryFromIntError),
#[error(transparent)]
TryFromSlice(#[from] std::array::TryFromSliceError),
#[error(transparent)]
Utf8(#[from] std::str::Utf8Error),
#[snafu(display("Format error: {source}"))]
Fmt {
source: std::fmt::Error,
backtrace: snafu::Backtrace,
},
#[snafu(display("UTF-8 conversion error: {source}"))]
FromUtf8 {
source: std::string::FromUtf8Error,
backtrace: snafu::Backtrace,
},
#[snafu(display("I/O error: {source}"))]
Io {
source: std::io::Error,
backtrace: snafu::Backtrace,
},
#[snafu(display("Parse float error: {source}"))]
ParseFloat {
source: std::num::ParseFloatError,
backtrace: snafu::Backtrace,
},
#[snafu(display("Parse int error: {source}"))]
ParseInt {
source: std::num::ParseIntError,
backtrace: snafu::Backtrace,
},
#[snafu(display("Error: {source}"))]
Std {
source: Box<dyn std::error::Error + Send>,
backtrace: snafu::Backtrace,
},
#[snafu(display("Thread access error: {source}"))]
ThreadAccessError {
source: std::thread::AccessError,
backtrace: snafu::Backtrace,
},
#[snafu(display("Integer conversion error: {source}"))]
TryFromInt {
source: std::num::TryFromIntError,
backtrace: snafu::Backtrace,
},
#[snafu(display("Slice conversion error: {source}"))]
TryFromSlice {
source: std::array::TryFromSliceError,
backtrace: snafu::Backtrace,
},
#[snafu(display("UTF-8 error: {source}"))]
Utf8 {
source: std::str::Utf8Error,
backtrace: snafu::Backtrace,
},
// third-party
#[error(transparent)]
CapacityError(#[from] arrayvec::CapacityError),
#[error(transparent)]
CargoToml(#[from] cargo_toml::Error),
#[error(transparent)]
Clap(#[from] clap::error::Error),
#[error(transparent)]
Extension(#[from] axum::extract::rejection::ExtensionRejection),
#[error(transparent)]
Figment(#[from] figment::error::Error),
#[error(transparent)]
Http(#[from] http::Error),
#[error(transparent)]
HttpHeader(#[from] http::header::InvalidHeaderValue),
#[error("Join error: {0}")]
JoinError(#[from] tokio::task::JoinError),
#[error(transparent)]
Json(#[from] serde_json::Error),
#[error(transparent)]
JsParseInt(#[from] ruma::JsParseIntError), // js_int re-export
#[error(transparent)]
JsTryFromInt(#[from] ruma::JsTryFromIntError), // js_int re-export
#[error(transparent)]
Path(#[from] axum::extract::rejection::PathRejection),
#[error("Mutex poisoned: {0}")]
Poison(Cow<'static, str>),
#[error("Regex error: {0}")]
Regex(#[from] regex::Error),
#[error("Request error: {0}")]
Reqwest(#[from] reqwest::Error),
#[error("{0}")]
SerdeDe(Cow<'static, str>),
#[error("{0}")]
SerdeSer(Cow<'static, str>),
#[error(transparent)]
TomlDe(#[from] toml::de::Error),
#[error(transparent)]
TomlSer(#[from] toml::ser::Error),
#[error("Tracing filter error: {0}")]
TracingFilter(#[from] tracing_subscriber::filter::ParseError),
#[error("Tracing reload error: {0}")]
TracingReload(#[from] tracing_subscriber::reload::Error),
#[error(transparent)]
TypedHeader(#[from] axum_extra::typed_header::TypedHeaderRejection),
#[error(transparent)]
YamlDe(#[from] serde_saphyr::Error),
#[error(transparent)]
YamlSer(#[from] serde_saphyr::ser_error::Error),
#[snafu(display("Capacity error: {source}"))]
CapacityError {
source: arrayvec::CapacityError,
backtrace: snafu::Backtrace,
},
#[snafu(display("Cargo.toml error: {source}"))]
CargoToml {
source: cargo_toml::Error,
backtrace: snafu::Backtrace,
},
#[snafu(display("Clap error: {source}"))]
Clap {
source: clap::error::Error,
backtrace: snafu::Backtrace,
},
#[snafu(display("Extension rejection: {source}"))]
Extension {
source: axum::extract::rejection::ExtensionRejection,
backtrace: snafu::Backtrace,
},
#[snafu(display("Figment error: {source}"))]
Figment {
source: figment::error::Error,
backtrace: snafu::Backtrace,
},
#[snafu(display("HTTP error: {source}"))]
Http {
source: http::Error,
backtrace: snafu::Backtrace,
},
#[snafu(display("Invalid HTTP header value: {source}"))]
HttpHeader {
source: http::header::InvalidHeaderValue,
backtrace: snafu::Backtrace,
},
#[snafu(display("Join error: {source}"))]
JoinError {
source: tokio::task::JoinError,
backtrace: snafu::Backtrace,
},
#[snafu(display("JSON error: {source}"))]
Json {
source: serde_json::Error,
backtrace: snafu::Backtrace,
},
#[snafu(display("JS parse int error: {source}"))]
JsParseInt {
source: ruma::JsParseIntError,
backtrace: snafu::Backtrace,
},
#[snafu(display("JS try from int error: {source}"))]
JsTryFromInt {
source: ruma::JsTryFromIntError,
backtrace: snafu::Backtrace,
},
#[snafu(display("Path rejection: {source}"))]
Path {
source: axum::extract::rejection::PathRejection,
backtrace: snafu::Backtrace,
},
#[snafu(display("Mutex poisoned: {message}"))]
Poison {
message: Cow<'static, str>,
backtrace: snafu::Backtrace,
},
#[snafu(display("Regex error: {source}"))]
Regex {
source: regex::Error,
backtrace: snafu::Backtrace,
},
#[snafu(display("Request error: {source}"))]
Reqwest {
source: reqwest::Error,
backtrace: snafu::Backtrace,
},
#[snafu(display("{message}"))]
SerdeDe {
message: Cow<'static, str>,
backtrace: snafu::Backtrace,
},
#[snafu(display("{message}"))]
SerdeSer {
message: Cow<'static, str>,
backtrace: snafu::Backtrace,
},
#[snafu(display("TOML deserialization error: {source}"))]
TomlDe {
source: toml::de::Error,
backtrace: snafu::Backtrace,
},
#[snafu(display("TOML serialization error: {source}"))]
TomlSer {
source: toml::ser::Error,
backtrace: snafu::Backtrace,
},
#[snafu(display("Tracing filter error: {source}"))]
TracingFilter {
source: tracing_subscriber::filter::ParseError,
backtrace: snafu::Backtrace,
},
#[snafu(display("Tracing reload error: {source}"))]
TracingReload {
source: tracing_subscriber::reload::Error,
backtrace: snafu::Backtrace,
},
#[snafu(display("Typed header rejection: {source}"))]
TypedHeader {
source: axum_extra::typed_header::TypedHeaderRejection,
backtrace: snafu::Backtrace,
},
#[snafu(display("YAML deserialization error: {source}"))]
YamlDe {
source: serde_saphyr::Error,
backtrace: snafu::Backtrace,
},
#[snafu(display("YAML serialization error: {source}"))]
YamlSer {
source: serde_saphyr::ser_error::Error,
backtrace: snafu::Backtrace,
},
// ruma/conduwuit
#[error("Arithmetic operation failed: {0}")]
Arithmetic(Cow<'static, str>),
#[error("{0}: {1}")]
BadRequest(ruma::api::client::error::ErrorKind, &'static str), //TODO: remove
#[error("{0}")]
BadServerResponse(Cow<'static, str>),
#[error(transparent)]
CanonicalJson(#[from] ruma::CanonicalJsonError),
#[error("There was a problem with the '{0}' directive in your configuration: {1}")]
Config(&'static str, Cow<'static, str>),
#[error("{0}")]
Conflict(Cow<'static, str>), // This is only needed for when a room alias already exists
#[error(transparent)]
ContentDisposition(#[from] ruma::http_headers::ContentDispositionParseError),
#[error("{0}")]
Database(Cow<'static, str>),
#[error("Feature '{0}' is not available on this server.")]
FeatureDisabled(Cow<'static, str>),
#[error("Remote server {0} responded with: {1}")]
Federation(ruma::OwnedServerName, ruma::api::client::error::Error),
#[error("{0} in {1}")]
InconsistentRoomState(&'static str, ruma::OwnedRoomId),
#[error(transparent)]
IntoHttp(#[from] ruma::api::error::IntoHttpError),
#[error("{0}")]
Ldap(Cow<'static, str>),
#[error(transparent)]
Mxc(#[from] ruma::MxcUriError),
#[error(transparent)]
Mxid(#[from] ruma::IdParseError),
#[error("from {0}: {1}")]
Redaction(ruma::OwnedServerName, ruma::canonical_json::RedactionError),
#[error("{0}: {1}")]
Request(ruma::api::client::error::ErrorKind, Cow<'static, str>, http::StatusCode),
#[error(transparent)]
Ruma(#[from] ruma::api::client::error::Error),
#[error(transparent)]
Signatures(#[from] ruma::signatures::Error),
#[error(transparent)]
StateRes(#[from] crate::state_res::Error),
#[error("uiaa")]
Uiaa(ruma::api::client::uiaa::UiaaInfo),
#[snafu(display("Arithmetic operation failed: {message}"))]
Arithmetic {
message: Cow<'static, str>,
backtrace: snafu::Backtrace,
},
#[snafu(display("{kind}: {message}"))]
BadRequest {
kind: ruma::api::client::error::ErrorKind,
message: Cow<'static, str>,
backtrace: snafu::Backtrace,
},
#[snafu(display("{message}"))]
BadServerResponse {
message: Cow<'static, str>,
backtrace: snafu::Backtrace,
},
#[snafu(display("Canonical JSON error: {source}"))]
CanonicalJson {
source: ruma::CanonicalJsonError,
backtrace: snafu::Backtrace,
},
#[snafu(display(
"There was a problem with the '{directive}' directive in your configuration: {message}"
))]
Config {
directive: &'static str,
message: Cow<'static, str>,
backtrace: snafu::Backtrace,
},
#[snafu(display("{message}"))]
Conflict {
message: Cow<'static, str>,
backtrace: snafu::Backtrace,
},
#[snafu(display("Content disposition error: {source}"))]
ContentDisposition {
source: ruma::http_headers::ContentDispositionParseError,
backtrace: snafu::Backtrace,
},
#[snafu(display("{message}"))]
Database {
message: Cow<'static, str>,
backtrace: snafu::Backtrace,
},
#[snafu(display("Feature '{feature}' is not available on this server."))]
FeatureDisabled {
feature: Cow<'static, str>,
},
#[snafu(display("Remote server {server} responded with: {error}"))]
Federation {
server: ruma::OwnedServerName,
error: ruma::api::client::error::Error,
backtrace: snafu::Backtrace,
},
#[snafu(display("{message} in {room_id}"))]
InconsistentRoomState {
message: &'static str,
room_id: ruma::OwnedRoomId,
backtrace: snafu::Backtrace,
},
#[snafu(display("HTTP conversion error: {source}"))]
IntoHttp {
source: ruma::api::error::IntoHttpError,
backtrace: snafu::Backtrace,
},
#[snafu(display("{message}"))]
Ldap {
message: Cow<'static, str>,
backtrace: snafu::Backtrace,
},
#[snafu(display("MXC URI error: {source}"))]
Mxc {
source: ruma::MxcUriError,
backtrace: snafu::Backtrace,
},
#[snafu(display("Matrix ID parse error: {source}"))]
Mxid {
source: ruma::IdParseError,
backtrace: snafu::Backtrace,
},
#[snafu(display("from {server}: {error}"))]
Redaction {
server: ruma::OwnedServerName,
error: ruma::canonical_json::RedactionError,
backtrace: snafu::Backtrace,
},
#[snafu(display("{kind}: {message}"))]
Request {
kind: ruma::api::client::error::ErrorKind,
message: Cow<'static, str>,
code: http::StatusCode,
backtrace: Option<snafu::Backtrace>,
},
#[snafu(display("Ruma error: {source}"))]
Ruma {
source: ruma::api::client::error::Error,
backtrace: snafu::Backtrace,
},
#[snafu(display("Signature error: {source}"))]
Signatures {
source: ruma::signatures::Error,
backtrace: snafu::Backtrace,
},
#[snafu(display("State resolution error: {source}"))]
#[snafu(context(false))]
StateRes {
source: crate::state_res::Error,
},
#[snafu(display("uiaa"))]
Uiaa {
info: ruma::api::client::uiaa::UiaaInfo,
},
// unique / untyped
#[error("{0}")]
Err(Cow<'static, str>),
#[snafu(display("{message}"))]
Err {
message: Cow<'static, str>,
backtrace: snafu::Backtrace,
},
}
impl Error {
#[inline]
#[must_use]
pub fn from_errno() -> Self { Self::Io(std::io::Error::last_os_error()) }
pub fn from_errno() -> Self { IoSnafu {}.into_error(std::io::Error::last_os_error()) }
//#[deprecated]
#[must_use]
pub fn bad_database(message: &'static str) -> Self {
crate::err!(Database(error!("{message}")))
let message: Cow<'static, str> = message.into();
DatabaseSnafu { message }.build()
}
/// Sanitizes public-facing errors that can leak sensitive information.
pub fn sanitized_message(&self) -> String {
match self {
| Self::Database(..) => String::from("Database error occurred."),
| Self::Io(..) => String::from("I/O error occurred."),
| Self::Database { .. } => String::from("Database error occurred."),
| Self::Io { .. } => String::from("I/O error occurred."),
| _ => self.message(),
}
}
@@ -158,8 +398,8 @@ pub fn sanitized_message(&self) -> String {
/// Generate the error message string.
pub fn message(&self) -> String {
match self {
| Self::Federation(origin, error) => format!("Answer from {origin}: {error}"),
| Self::Ruma(error) => response::ruma_error_message(error),
| Self::Federation { server, error, .. } => format!("Answer from {server}: {error}"),
| Self::Ruma { source, .. } => response::ruma_error_message(source),
| _ => format!("{self}"),
}
}
@@ -170,10 +410,10 @@ pub fn kind(&self) -> ruma::api::client::error::ErrorKind {
use ruma::api::client::error::ErrorKind::{FeatureDisabled, Unknown};
match self {
| Self::Federation(_, error) | Self::Ruma(error) =>
response::ruma_error_kind(error).clone(),
| Self::BadRequest(kind, ..) | Self::Request(kind, ..) => kind.clone(),
| Self::FeatureDisabled(..) => FeatureDisabled,
| Self::Federation { error, .. } => response::ruma_error_kind(error).clone(),
| Self::Ruma { source, .. } => response::ruma_error_kind(source).clone(),
| Self::BadRequest { kind, .. } | Self::Request { kind, .. } => kind.clone(),
| Self::FeatureDisabled { .. } => FeatureDisabled,
| _ => Unknown,
}
}
@@ -184,13 +424,15 @@ pub fn status_code(&self) -> http::StatusCode {
use http::StatusCode;
match self {
| Self::Federation(_, error) | Self::Ruma(error) => error.status_code,
| Self::Request(kind, _, code) => response::status_code(kind, *code),
| Self::BadRequest(kind, ..) => response::bad_request_code(kind),
| Self::FeatureDisabled(..) => response::bad_request_code(&self.kind()),
| Self::Reqwest(error) => error.status().unwrap_or(StatusCode::INTERNAL_SERVER_ERROR),
| Self::Conflict(_) => StatusCode::CONFLICT,
| Self::Io(error) => response::io_error_code(error.kind()),
| Self::Federation { error, .. } => error.status_code,
| Self::Ruma { source, .. } => source.status_code,
| Self::Request { kind, code, .. } => response::status_code(kind, *code),
| Self::BadRequest { kind, .. } => response::bad_request_code(kind),
| Self::FeatureDisabled { .. } => response::bad_request_code(&self.kind()),
| Self::Reqwest { source, .. } =>
source.status().unwrap_or(StatusCode::INTERNAL_SERVER_ERROR),
| Self::Conflict { .. } => StatusCode::CONFLICT,
| Self::Io { source, .. } => response::io_error_code(source.kind()),
| _ => StatusCode::INTERNAL_SERVER_ERROR,
}
}
@@ -203,16 +445,46 @@ pub fn status_code(&self) -> http::StatusCode {
pub fn is_not_found(&self) -> bool { self.status_code() == http::StatusCode::NOT_FOUND }
}
impl std::fmt::Debug for Error {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", self.message())
}
// Debug is already derived by Snafu
/// Macro to reduce boilerplate for From implementations using Snafu context
macro_rules! impl_from_snafu {
($source_ty:ty => $context:ident) => {
impl From<$source_ty> for Error {
fn from(source: $source_ty) -> Self { $context.into_error(source) }
}
};
}
/// Macro for From impls that format messages into ErrSnafu or other
/// message-based contexts
macro_rules! impl_from_message {
($source_ty:ty => $context:ident, $msg:expr) => {
impl From<$source_ty> for Error {
fn from(source: $source_ty) -> Self {
let message: Cow<'static, str> = format!($msg, source).into();
$context { message }.build()
}
}
};
}
/// Macro for From impls with constant messages (no formatting)
macro_rules! impl_from_const_message {
($source_ty:ty => $context:ident, $msg:expr) => {
impl From<$source_ty> for Error {
fn from(_source: $source_ty) -> Self {
let message: Cow<'static, str> = $msg.into();
$context { message }.build()
}
}
};
}
impl<T> From<PoisonError<T>> for Error {
#[cold]
#[inline(never)]
fn from(e: PoisonError<T>) -> Self { Self::Poison(e.to_string().into()) }
fn from(e: PoisonError<T>) -> Self { PoisonSnafu { message: e.to_string() }.build() }
}
#[allow(clippy::fallible_impl_from)]
@@ -224,6 +496,43 @@ fn from(_e: Infallible) -> Self {
}
}
// Implementations using the macro
impl_from_snafu!(std::io::Error => IoSnafu);
impl_from_snafu!(std::string::FromUtf8Error => FromUtf8Snafu);
impl_from_snafu!(regex::Error => RegexSnafu);
impl_from_snafu!(ruma::http_headers::ContentDispositionParseError => ContentDispositionSnafu);
impl_from_snafu!(ruma::api::error::IntoHttpError => IntoHttpSnafu);
impl_from_snafu!(ruma::JsTryFromIntError => JsTryFromIntSnafu);
impl_from_snafu!(ruma::CanonicalJsonError => CanonicalJsonSnafu);
impl_from_snafu!(axum::extract::rejection::PathRejection => PathSnafu);
impl_from_snafu!(clap::error::Error => ClapSnafu);
impl_from_snafu!(ruma::MxcUriError => MxcSnafu);
impl_from_snafu!(serde_saphyr::ser_error::Error => YamlSerSnafu);
impl_from_snafu!(toml::de::Error => TomlDeSnafu);
impl_from_snafu!(http::header::InvalidHeaderValue => HttpHeaderSnafu);
impl_from_snafu!(serde_json::Error => JsonSnafu);
// Custom implementations using message formatting
impl_from_const_message!(std::fmt::Error => ErrSnafu, "formatting error");
impl_from_message!(std::str::Utf8Error => ErrSnafu, "UTF-8 error: {}");
impl_from_message!(std::num::TryFromIntError => ArithmeticSnafu, "integer conversion error: {}");
impl_from_message!(tracing_subscriber::reload::Error => ErrSnafu, "tracing reload error: {}");
impl_from_message!(reqwest::Error => ErrSnafu, "HTTP client error: {}");
impl_from_message!(ruma::signatures::Error => ErrSnafu, "Signature error: {}");
impl_from_message!(ruma::IdParseError => ErrSnafu, "ID parse error: {}");
impl_from_message!(std::num::ParseIntError => ErrSnafu, "Integer parse error: {}");
impl_from_message!(std::array::TryFromSliceError => ErrSnafu, "Slice conversion error: {}");
impl_from_message!(tokio::task::JoinError => ErrSnafu, "Task join error: {}");
impl_from_message!(serde_saphyr::Error => ErrSnafu, "YAML error: {}");
// Generic implementation for CapacityError
impl<T> From<arrayvec::CapacityError<T>> for Error {
fn from(_source: arrayvec::CapacityError<T>) -> Self {
let message: Cow<'static, str> = "capacity error: buffer is full".into();
ErrSnafu { message }.build()
}
}
#[cold]
#[inline(never)]
pub fn infallible(_e: &Infallible) {

View File

@@ -15,13 +15,16 @@ pub fn panic(self) -> ! { panic_any(self.into_panic()) }
#[must_use]
#[inline]
pub fn from_panic(e: Box<dyn Any + Send>) -> Self { Self::Panic(debug::panic_str(&e), e) }
pub fn from_panic(e: Box<dyn Any + Send>) -> Self {
use super::PanicSnafu;
PanicSnafu { message: debug::panic_str(&e), panic: e }.build()
}
#[inline]
pub fn into_panic(self) -> Box<dyn Any + Send + 'static> {
match self {
| Self::Panic(_, e) | Self::PanicAny(e) => e,
| Self::JoinError(e) => e.into_panic(),
| Self::Panic { panic, .. } | Self::PanicAny { panic, .. } => panic,
| Self::JoinError { source, .. } => source.into_panic(),
| _ => Box::new(self),
}
}
@@ -37,8 +40,8 @@ pub fn panic_str(self) -> Option<&'static str> {
#[inline]
pub fn is_panic(&self) -> bool {
match &self {
| Self::Panic(..) | Self::PanicAny(..) => true,
| Self::JoinError(e) => e.is_panic(),
| Self::Panic { .. } | Self::PanicAny { .. } => true,
| Self::JoinError { source, .. } => source.is_panic(),
| _ => false,
}
}

View File

@@ -47,8 +47,8 @@ fn into_response(self) -> axum::response::Response {
impl From<Error> for UiaaResponse {
#[inline]
fn from(error: Error) -> Self {
if let Error::Uiaa(uiaainfo) = error {
return Self::AuthResponse(uiaainfo);
if let Error::Uiaa { info, .. } = error {
return Self::AuthResponse(info);
}
let body = ErrorBody::Standard {

View File

@@ -5,9 +5,15 @@
use crate::Error;
impl de::Error for Error {
fn custom<T: Display + ToString>(msg: T) -> Self { Self::SerdeDe(msg.to_string().into()) }
fn custom<T: Display + ToString>(msg: T) -> Self {
let message: std::borrow::Cow<'static, str> = msg.to_string().into();
super::SerdeDeSnafu { message }.build()
}
}
impl ser::Error for Error {
fn custom<T: Display + ToString>(msg: T) -> Self { Self::SerdeSer(msg.to_string().into()) }
fn custom<T: Display + ToString>(msg: T) -> Self {
let message: std::borrow::Cow<'static, str> = msg.to_string().into();
super::SerdeSerSnafu { message }.build()
}
}

View File

@@ -14,7 +14,6 @@
static VERSION: OnceLock<String> = OnceLock::new();
static VERSION_UA: OnceLock<String> = OnceLock::new();
static USER_AGENT: OnceLock<String> = OnceLock::new();
static USER_AGENT_MEDIA: OnceLock<String> = OnceLock::new();
#[inline]
#[must_use]
@@ -22,22 +21,14 @@ pub fn name() -> &'static str { BRANDING }
#[inline]
pub fn version() -> &'static str { VERSION.get_or_init(init_version) }
#[inline]
pub fn version_ua() -> &'static str { VERSION_UA.get_or_init(init_version_ua) }
#[inline]
pub fn user_agent() -> &'static str { USER_AGENT.get_or_init(init_user_agent) }
#[inline]
pub fn user_agent_media() -> &'static str { USER_AGENT_MEDIA.get_or_init(init_user_agent_media) }
fn init_user_agent() -> String { format!("{}/{} (bot; +{WEBSITE})", name(), version_ua()) }
fn init_user_agent_media() -> String {
format!("{}/{} (embedbot; facebookexternalhit/1.1; +{WEBSITE})", name(), version_ua())
}
fn init_version_ua() -> String {
conduwuit_build_metadata::version_tag()
.map_or_else(|| SEMANTIC.to_owned(), |extra| format!("{SEMANTIC}+{extra}"))

View File

@@ -1,7 +1,7 @@
use ruma::{RoomVersionId, canonical_json::redact_content_in_place};
use serde_json::{Value as JsonValue, json, value::to_raw_value};
use crate::{Error, Result, err, implement};
use crate::{Result, err, implement};
#[implement(super::Pdu)]
pub fn redact(&mut self, room_version_id: &RoomVersionId, reason: JsonValue) -> Result {
@@ -10,8 +10,15 @@ pub fn redact(&mut self, room_version_id: &RoomVersionId, reason: JsonValue) ->
let mut content = serde_json::from_str(self.content.get())
.map_err(|e| err!(Request(BadJson("Failed to deserialize content into type: {e}"))))?;
redact_content_in_place(&mut content, room_version_id, self.kind.to_string())
.map_err(|e| Error::Redaction(self.sender.server_name().to_owned(), e))?;
redact_content_in_place(&mut content, room_version_id, self.kind.to_string()).map_err(
|error| {
crate::error::RedactionSnafu {
server: self.sender.server_name().to_owned(),
error,
}
.build()
},
)?;
let reason = serde_json::to_value(reason).expect("Failed to preserialize reason");

View File

@@ -27,7 +27,7 @@
use crate::{
matrix::{Event, Pdu, pdu::EventHash},
state_res::{self as state_res, Error, Result, StateMap},
state_res::{self as state_res, Error, Result, StateMap, error::NotFoundSnafu},
};
static SERVER_TIMESTAMP: AtomicU64 = AtomicU64::new(0);
@@ -170,10 +170,12 @@ fn resolve_deeper_event_set(c: &mut test::Bencher) {
#[allow(unused)]
impl<E: Event + Clone> TestStore<E> {
fn get_event(&self, room_id: &RoomId, event_id: &EventId) -> Result<E> {
self.0
.get(event_id)
.cloned()
.ok_or_else(|| Error::NotFound(format!("{} not found", event_id)))
self.0.get(event_id).cloned().ok_or_else(|| {
NotFoundSnafu {
message: format!("{} not found", event_id),
}
.build()
})
}
/// Returns the events that correspond to the `event_ids` sorted in the same

View File

@@ -1,23 +1,40 @@
use serde_json::Error as JsonError;
use thiserror::Error;
use snafu::{IntoError, prelude::*};
/// Represents the various errors that arise when resolving state.
#[derive(Error, Debug)]
#[derive(Debug, Snafu)]
#[snafu(visibility(pub))]
#[non_exhaustive]
pub enum Error {
/// A deserialization error.
#[error(transparent)]
SerdeJson(#[from] JsonError),
#[snafu(display("JSON error: {source}"))]
SerdeJson {
source: JsonError,
backtrace: snafu::Backtrace,
},
/// The given option or version is unsupported.
#[error("Unsupported room version: {0}")]
Unsupported(String),
#[snafu(display("Unsupported room version: {version}"))]
Unsupported {
version: String,
backtrace: snafu::Backtrace,
},
/// The given event was not found.
#[error("Not found error: {0}")]
NotFound(String),
#[snafu(display("Not found error: {message}"))]
NotFound {
message: String,
backtrace: snafu::Backtrace,
},
/// Invalid fields in the given PDU.
#[error("Invalid PDU: {0}")]
InvalidPdu(String),
#[snafu(display("Invalid PDU: {message}"))]
InvalidPdu {
message: String,
backtrace: snafu::Backtrace,
},
}
impl From<serde_json::Error> for Error {
fn from(source: serde_json::Error) -> Self { SerdeJsonSnafu.into_error(source) }
}

View File

@@ -24,6 +24,7 @@
use super::{
Error, Event, Result, StateEventType, StateKey, TimelineEventType,
error::InvalidPduSnafu,
power_levels::{
deserialize_power_levels, deserialize_power_levels_content_fields,
deserialize_power_levels_content_invite, deserialize_power_levels_content_redact,
@@ -383,8 +384,8 @@ pub async fn auth_check<E, F, Fut>(
return Ok(false);
}
let target_user =
<&UserId>::try_from(state_key).map_err(|e| Error::InvalidPdu(format!("{e}")))?;
let target_user = <&UserId>::try_from(state_key)
.map_err(|e| InvalidPduSnafu { message: format!("{e}") }.build())?;
let user_for_join_auth = content
.join_authorised_via_users_server
@@ -461,7 +462,7 @@ pub async fn auth_check<E, F, Fut>(
?sender_membership_event_content,
"Sender membership event content missing membership field"
);
return Err(Error::InvalidPdu("Missing membership field".to_owned()));
return Err(InvalidPduSnafu { message: "Missing membership field" }.build());
};
let membership_state = membership_state.deserialize()?;

View File

@@ -29,18 +29,18 @@
};
use serde_json::from_str as from_json_str;
pub(crate) use self::error::Error;
pub(crate) use self::error::{Error, InvalidPduSnafu, NotFoundSnafu};
use self::power_levels::PowerLevelsContentFields;
pub use self::{
event_auth::{auth_check, auth_types_for_event},
room_version::RoomVersion,
};
use super::{Event, StateKey};
use crate::{
debug, debug_error, err,
matrix::{Event, StateKey},
debug, debug_error,
state_res::room_version::StateResolutionVersion,
trace,
utils::stream::{BroadbandExt, IterStream, ReadyExt, TryBroadbandExt, WidebandExt},
utils::stream::{BroadbandExt, IterStream, ReadyExt, TryBroadbandExt},
warn,
};
@@ -118,7 +118,10 @@ pub async fn resolve<'a, Pdu, Sets, SetIter, Hasher, Fetch, FetchFut, Exists, Ex
let csg = calculate_conflicted_subgraph(&conflicting, event_fetch)
.await
.ok_or_else(|| {
Error::InvalidPdu("Failed to calculate conflicted subgraph".to_owned())
InvalidPduSnafu {
message: "Failed to calculate conflicted subgraph",
}
.build()
})?;
debug!(count = csg.len(), "conflicted subgraph");
trace!(set = ?csg, "conflicted subgraph");
@@ -149,10 +152,11 @@ pub async fn resolve<'a, Pdu, Sets, SetIter, Hasher, Fetch, FetchFut, Exists, Ex
let control_events: Vec<_> = all_conflicted
.iter()
.stream()
.wide_filter_map(async |id| {
is_power_event_id(id, &event_fetch)
.broad_filter_map(async |id| {
event_fetch(id.clone())
.await
.then_some(id.clone())
.filter(|event| is_power_event(&event))
.map(|_| id.clone())
})
.collect()
.await;
@@ -314,7 +318,10 @@ async fn calculate_conflicted_subgraph<F, Fut, E>(
trace!(event_id = event_id.as_str(), "fetching event for its auth events");
let evt = fetch_event(event_id.clone()).await;
if evt.is_none() {
err!("could not fetch event {} to calculate conflicted subgraph", event_id);
tracing::error!(
"could not fetch event {} to calculate conflicted subgraph",
event_id
);
path.pop();
continue;
}
@@ -402,11 +409,11 @@ async fn reverse_topological_power_sort<E, F, Fut>(
let fetcher = async |event_id: OwnedEventId| {
let pl = *event_to_pl
.get(&event_id)
.ok_or_else(|| Error::NotFound(String::new()))?;
.ok_or_else(|| NotFoundSnafu { message: "" }.build())?;
let ev = fetch_event(event_id)
.await
.ok_or_else(|| Error::NotFound(String::new()))?;
.ok_or_else(|| NotFoundSnafu { message: "" }.build())?;
Ok((pl, ev.origin_server_ts()))
};
@@ -612,9 +619,12 @@ async fn iterative_auth_check<'a, E, F, Fut, S>(
let events_to_check: Vec<_> = events_to_check
.map(Result::Ok)
.broad_and_then(async |event_id| {
fetch_event(event_id.to_owned())
.await
.ok_or_else(|| Error::NotFound(format!("Failed to find {event_id}")))
fetch_event(event_id.to_owned()).await.ok_or_else(|| {
NotFoundSnafu {
message: format!("Failed to find {event_id}"),
}
.build()
})
})
.try_collect()
.boxed()
@@ -653,7 +663,7 @@ async fn iterative_auth_check<'a, E, F, Fut, S>(
trace!(event_id = event.event_id().as_str(), "checking event");
let state_key = event
.state_key()
.ok_or_else(|| Error::InvalidPdu("State event had no state key".to_owned()))?;
.ok_or_else(|| InvalidPduSnafu { message: "State event had no state key" }.build())?;
let auth_types = auth_types_for_event(
event.event_type(),
@@ -669,13 +679,14 @@ async fn iterative_auth_check<'a, E, F, Fut, S>(
trace!("room version uses hashed IDs, manually fetching create event");
let create_event_id_raw = event.room_id_or_hash().as_str().replace('!', "$");
let create_event_id = EventId::parse(&create_event_id_raw).map_err(|e| {
Error::InvalidPdu(format!(
"Failed to parse create event ID from room ID/hash: {e}"
))
InvalidPduSnafu {
message: format!("Failed to parse create event ID from room ID/hash: {e}"),
}
.build()
})?;
let create_event = fetch_event(create_event_id.into()).await.ok_or_else(|| {
NotFoundSnafu { message: "Failed to find create event" }.build()
})?;
let create_event = fetch_event(create_event_id.into())
.await
.ok_or_else(|| Error::NotFound("Failed to find create event".into()))?;
auth_state.insert(create_event.event_type().with_state_key(""), create_event);
}
for aid in event.auth_events() {
@@ -686,7 +697,7 @@ async fn iterative_auth_check<'a, E, F, Fut, S>(
auth_state.insert(
ev.event_type()
.with_state_key(ev.state_key().ok_or_else(|| {
Error::InvalidPdu("State event had no state key".to_owned())
InvalidPduSnafu { message: "State event had no state key" }.build()
})?),
ev.clone(),
);
@@ -801,13 +812,13 @@ async fn mainline_sort<E, F, Fut>(
let event = fetch_event(p.clone())
.await
.ok_or_else(|| Error::NotFound(format!("Failed to find {p}")))?;
.ok_or_else(|| NotFoundSnafu { message: format!("Failed to find {p}") }.build())?;
pl = None;
for aid in event.auth_events() {
let ev = fetch_event(aid.to_owned())
.await
.ok_or_else(|| Error::NotFound(format!("Failed to find {aid}")))?;
let ev = fetch_event(aid.to_owned()).await.ok_or_else(|| {
NotFoundSnafu { message: format!("Failed to find {aid}") }.build()
})?;
if is_type_and_key(&ev, &TimelineEventType::RoomPowerLevels, "") {
pl = Some(aid.to_owned());
@@ -869,9 +880,9 @@ async fn get_mainline_depth<E, F, Fut>(
event = None;
for aid in sort_ev.auth_events() {
let aev = fetch_event(aid.to_owned())
.await
.ok_or_else(|| Error::NotFound(format!("Failed to find {aid}")))?;
let aev = fetch_event(aid.to_owned()).await.ok_or_else(|| {
NotFoundSnafu { message: format!("Failed to find {aid}") }.build()
})?;
if is_type_and_key(&aev, &TimelineEventType::RoomPowerLevels, "") {
event = Some(aev);
@@ -915,6 +926,7 @@ async fn add_event_and_auth_chain_to_graph<E, F, Fut>(
}
}
#[allow(dead_code)]
async fn is_power_event_id<E, F, Fut>(event_id: &EventId, fetch: &F) -> bool
where
F: Fn(OwnedEventId) -> Fut + Sync,

View File

@@ -1,6 +1,6 @@
use ruma::RoomVersionId;
use super::{Error, Result};
use super::{Result, error::UnsupportedSnafu};
#[derive(Debug)]
#[allow(clippy::exhaustive_enums)]
@@ -163,7 +163,11 @@ pub fn new(version: &RoomVersionId) -> Result<Self> {
| RoomVersionId::V10 => Self::V10,
| RoomVersionId::V11 => Self::V11,
| RoomVersionId::V12 => Self::V12,
| ver => return Err(Error::Unsupported(format!("found version `{ver}`"))),
| ver =>
return Err(UnsupportedSnafu {
version: format!("found version `{ver}`"),
}
.build()),
})
}
}

View File

@@ -22,7 +22,7 @@
value::{RawValue as RawJsonValue, to_raw_value as to_raw_json_value},
};
use super::auth_types_for_event;
use super::{auth_types_for_event, error::NotFoundSnafu};
use crate::{
Result, RoomVersion, info,
matrix::{Event, EventTypeExt, Pdu, StateMap, pdu::EventHash},
@@ -232,7 +232,7 @@ pub(crate) fn get_event(&self, _: &RoomId, event_id: &EventId) -> Result<E> {
self.0
.get(event_id)
.cloned()
.ok_or_else(|| super::Error::NotFound(format!("{event_id} not found")))
.ok_or_else(|| NotFoundSnafu { message: format!("{event_id} not found") }.build())
.map_err(Into::into)
}

View File

@@ -14,9 +14,11 @@
pub use ::arrayvec;
pub use ::http;
pub use ::paste;
pub use ::ruma;
pub use ::smallstr;
pub use ::smallvec;
pub use ::snafu;
pub use ::toml;
pub use ::tracing;
pub use config::Config;

View File

@@ -11,7 +11,6 @@
pub mod math;
pub mod mutex_map;
pub mod rand;
pub mod response;
pub mod result;
pub mod set;
pub mod stream;

View File

@@ -1,51 +0,0 @@
use futures::StreamExt;
use num_traits::ToPrimitive;
use crate::Err;
/// Reads the response body while enforcing a maximum size limit to prevent
/// memory exhaustion.
pub async fn limit_read(response: reqwest::Response, max_size: u64) -> crate::Result<Vec<u8>> {
if response.content_length().is_some_and(|len| len > max_size) {
return Err!(BadServerResponse("Response too large"));
}
let mut data = Vec::new();
let mut reader = response.bytes_stream();
while let Some(chunk) = reader.next().await {
let chunk = chunk?;
data.extend_from_slice(&chunk);
if data.len() > max_size.to_usize().expect("max_size must fit in usize") {
return Err!(BadServerResponse("Response too large"));
}
}
Ok(data)
}
/// Reads the response body as text while enforcing a maximum size limit to
/// prevent memory exhaustion.
pub async fn limit_read_text(
response: reqwest::Response,
max_size: u64,
) -> crate::Result<String> {
let text = String::from_utf8(limit_read(response, max_size).await?)?;
Ok(text)
}
#[allow(async_fn_in_trait)]
pub trait LimitReadExt {
async fn limit_read(self, max_size: u64) -> crate::Result<Vec<u8>>;
async fn limit_read_text(self, max_size: u64) -> crate::Result<String>;
}
impl LimitReadExt for reqwest::Response {
async fn limit_read(self, max_size: u64) -> crate::Result<Vec<u8>> {
limit_read(self, max_size).await
}
async fn limit_read_text(self, max_size: u64) -> crate::Result<String> {
limit_read_text(self, max_size).await
}
}

View File

@@ -3,17 +3,19 @@
stream::{Stream, TryStream},
};
use crate::{Error, Result};
pub trait IterStream<I: IntoIterator + Send> {
/// Convert an Iterator into a Stream
fn stream(self) -> impl Stream<Item = <I as IntoIterator>::Item> + Send;
/// Convert an Iterator into a TryStream with a generic error type
fn try_stream<E>(
/// Convert an Iterator into a TryStream
fn try_stream(
self,
) -> impl TryStream<
Ok = <I as IntoIterator>::Item,
Error = E,
Item = Result<<I as IntoIterator>::Item, E>,
Error = Error,
Item = Result<<I as IntoIterator>::Item, Error>,
> + Send;
}
@@ -26,12 +28,12 @@ impl<I> IterStream<I> for I
fn stream(self) -> impl Stream<Item = <I as IntoIterator>::Item> + Send { stream::iter(self) }
#[inline]
fn try_stream<E>(
fn try_stream(
self,
) -> impl TryStream<
Ok = <I as IntoIterator>::Item,
Error = E,
Item = Result<<I as IntoIterator>::Item, E>,
Error = Error,
Item = Result<<I as IntoIterator>::Item, Error>,
> + Send {
self.stream().map(Ok)
}

View File

@@ -1,10 +1,9 @@
//! Synchronous combinator extensions to futures::TryStream
use std::result::Result;
use futures::{TryFuture, TryStream, TryStreamExt};
use super::automatic_width;
use crate::Result;
/// Concurrency extensions to augment futures::TryStreamExt. broad_ combinators
/// produce out-of-order

View File

@@ -2,6 +2,8 @@
use std::{cell::Cell, fmt::Debug, path::PathBuf, sync::LazyLock};
use snafu::IntoError;
use crate::{Result, is_equal_to};
type Id = usize;
@@ -142,7 +144,9 @@ pub fn getcpu() -> Result<usize> {
#[cfg(not(target_os = "linux"))]
#[inline]
pub fn getcpu() -> Result<usize> { Err(crate::Error::Io(std::io::ErrorKind::Unsupported.into())) }
pub fn getcpu() -> Result<usize> {
Err(crate::error::IoSnafu.into_error(std::io::ErrorKind::Unsupported.into()))
}
fn query_cores_available() -> impl Iterator<Item = Id> {
core_affinity::get_core_ids()

View File

@@ -255,7 +255,10 @@ fn deserialize_newtype_struct<V>(self, name: &'static str, visitor: V) -> Result
| "$serde_json::private::RawValue" => visitor.visit_map(self),
| "Cbor" => visitor
.visit_newtype_struct(&mut minicbor_serde::Deserializer::new(self.record_trail()))
.map_err(|e| Self::Error::SerdeDe(e.to_string().into())),
.map_err(|e| {
let message: std::borrow::Cow<'static, str> = e.to_string().into();
conduwuit_core::error::SerdeDeSnafu { message }.build()
}),
| _ => visitor.visit_newtype_struct(self),
}
@@ -313,9 +316,10 @@ fn deserialize_i64<V: Visitor<'de>>(self, visitor: V) -> Result<V::Value> {
let end = self.pos.saturating_add(BYTES).min(self.buf.len());
let bytes: ArrayVec<u8, BYTES> = self.buf[self.pos..end].try_into()?;
let bytes = bytes
.into_inner()
.map_err(|_| Self::Error::SerdeDe("i64 buffer underflow".into()))?;
let bytes = bytes.into_inner().map_err(|_| {
let message: std::borrow::Cow<'static, str> = "i64 buffer underflow".into();
conduwuit_core::error::SerdeDeSnafu { message }.build()
})?;
self.inc_pos(BYTES);
visitor.visit_i64(i64::from_be_bytes(bytes))
@@ -345,9 +349,10 @@ fn deserialize_u64<V: Visitor<'de>>(self, visitor: V) -> Result<V::Value> {
let end = self.pos.saturating_add(BYTES).min(self.buf.len());
let bytes: ArrayVec<u8, BYTES> = self.buf[self.pos..end].try_into()?;
let bytes = bytes
.into_inner()
.map_err(|_| Self::Error::SerdeDe("u64 buffer underflow".into()))?;
let bytes = bytes.into_inner().map_err(|_| {
let message: std::borrow::Cow<'static, str> = "u64 buffer underflow".into();
conduwuit_core::error::SerdeDeSnafu { message }.build()
})?;
self.inc_pos(BYTES);
visitor.visit_u64(u64::from_be_bytes(bytes))

View File

@@ -362,10 +362,6 @@ pub(super) fn open_list(db: &Arc<Engine>, maps: &[Descriptor]) -> Result<Maps> {
name: "userid_blurhash",
..descriptor::RANDOM_SMALL
},
Descriptor {
name: "userid_dehydrateddevice",
..descriptor::RANDOM_SMALL
},
Descriptor {
name: "userid_devicelistversion",
..descriptor::RANDOM_SMALL

View File

@@ -199,7 +199,10 @@ fn serialize_newtype_struct<T>(self, name: &'static str, value: &T) -> Result<Se
value
.serialize(&mut Serializer::new(&mut Writer::new(&mut self.out)))
.map_err(|e| Self::Error::SerdeSer(e.to_string().into()))
.map_err(|e| {
let message: std::borrow::Cow<'static, str> = e.to_string().into();
conduwuit_core::error::SerdeSerSnafu { message }.build()
})
},
| _ => unhandled!("Unrecognized serialization Newtype {name:?}"),
}

View File

@@ -1,4 +1,4 @@
use std::sync::Arc;
use std::{borrow::Cow, sync::Arc};
use axum::{Router, response::IntoResponse};
use conduwuit::Error;
@@ -18,5 +18,10 @@ pub(crate) fn build(services: &Arc<Services>) -> (Router, Guard) {
}
async fn not_found(_uri: Uri) -> impl IntoResponse {
Error::Request(ErrorKind::Unrecognized, "Not Found".into(), StatusCode::NOT_FOUND)
Error::Request {
kind: ErrorKind::Unrecognized,
message: Cow::Borrowed("Not Found"),
code: StatusCode::NOT_FOUND,
backtrace: None,
}
}

View File

@@ -530,12 +530,7 @@ async fn handle_response_error(
Ok(())
}
pub async fn is_admin_command<E>(
&self,
event: &E,
body: &str,
sent_locally: bool,
) -> Option<InvocationSource>
pub async fn is_admin_command<E>(&self, event: &E, body: &str) -> Option<InvocationSource>
where
E: Event + Send + Sync,
{
@@ -585,15 +580,6 @@ pub async fn is_admin_command<E>(
return None;
}
// Escaped commands must be sent locally (via client API), not via federation
if !sent_locally {
conduwuit::warn!(
"Ignoring escaped admin command from {} that arrived via federation",
event.sender()
);
return None;
}
// Looks good
Some(InvocationSource::EscapedCommand)
}

View File

@@ -18,7 +18,7 @@
use std::{sync::Arc, time::Duration};
use async_trait::async_trait;
use conduwuit::{Result, Server, debug, error, utils::response::LimitReadExt, warn};
use conduwuit::{Result, Server, debug, error, warn};
use database::{Deserialized, Map};
use ruma::events::{Mentions, room::message::RoomMessageEventContent};
use serde::Deserialize;
@@ -137,7 +137,7 @@ async fn check(&self) -> Result<()> {
.get(CHECK_FOR_ANNOUNCEMENTS_URL)
.send()
.await?
.limit_read_text(1024 * 1024)
.text()
.await?;
let response = serde_json::from_str::<CheckForAnnouncementsResponse>(&response)?;

View File

@@ -147,11 +147,11 @@ pub async fn register_appservice(
// same appservice)
if let Ok(existing) = self.find_from_token(&registration.as_token).await {
if existing.registration.id != registration.id {
return Err(err!(Request(InvalidParam(
return Err!(Request(InvalidParam(
"Cannot register appservice: Token is already used by appservice '{}'. \
Please generate a different token.",
existing.registration.id
))));
)));
}
}
@@ -163,10 +163,10 @@ pub async fn register_appservice(
.await
.is_ok()
{
return Err(err!(Request(InvalidParam(
return Err!(Request(InvalidParam(
"Cannot register appservice: The provided token is already in use by a user \
device. Please generate a different token for the appservice."
))));
)));
}
self.db

View File

@@ -39,7 +39,7 @@ fn build(args: crate::Args<'_>) -> Result<Arc<Self>> {
let url_preview_user_agent = config
.url_preview_user_agent
.clone()
.unwrap_or_else(|| conduwuit::version::user_agent_media().to_owned());
.unwrap_or_else(|| conduwuit::version::user_agent().to_owned());
Ok(Arc::new(Self {
default: base(config)?

View File

@@ -2,8 +2,8 @@
use bytes::Bytes;
use conduwuit::{
Err, Error, Result, debug, debug::INFO_SPAN_LEVEL, debug_error, debug_warn, err, implement,
trace, utils::response::LimitReadExt,
Err, Result, debug, debug::INFO_SPAN_LEVEL, debug_error, debug_warn, err,
error::inspect_debug_log, implement, trace,
};
use http::{HeaderValue, header::AUTHORIZATION};
use ipaddress::IPAddress;
@@ -133,22 +133,7 @@ async fn handle_response<T>(
where
T: OutgoingRequest + Send,
{
const HUGE_ENDPOINTS: [&str; 2] =
["/_matrix/federation/v2/send_join/", "/_matrix/federation/v2/state/"];
let size_limit: u64 = if HUGE_ENDPOINTS.iter().any(|e| url.path().starts_with(e)) {
// Some federation endpoints can return huge response bodies, so we'll bump the
// limit for those endpoints specifically.
self.services
.server
.config
.max_request_size
.saturating_mul(10)
} else {
self.services.server.config.max_request_size
}
.try_into()
.expect("size_limit (usize) should fit within a u64");
let response = into_http_response(dest, actual, method, url, response, size_limit).await?;
let response = into_http_response(dest, actual, method, url, response).await?;
T::IncomingResponse::try_from_http_response(response)
.map_err(|e| err!(BadServerResponse("Server returned bad 200 response: {e:?}")))
@@ -160,7 +145,6 @@ async fn into_http_response(
method: &Method,
url: &Url,
mut response: Response,
max_size: u64,
) -> Result<http::Response<Bytes>> {
let status = response.status();
trace!(
@@ -183,22 +167,19 @@ async fn into_http_response(
);
trace!("Waiting for response body...");
let body = response
.bytes()
.await
.inspect_err(inspect_debug_log)
.unwrap_or_else(|_| Vec::new().into());
let http_response = http_response_builder
.body(
response
.limit_read(max_size)
.await
.unwrap_or_default()
.into(),
)
.body(body)
.expect("reqwest body is valid http body");
debug!("Got {status:?} for {method} {url}");
if !status.is_success() {
return Err(Error::Federation(
dest.to_owned(),
RumaError::from_http_response(http_response),
));
return Err!(Federation(dest.to_owned(), RumaError::from_http_response(http_response),));
}
Ok(http_response)

View File

@@ -170,8 +170,6 @@ pub(super) fn remove_url_preview(&self, url: &str) -> Result<()> {
Ok(())
}
pub(super) async fn clear_url_previews(&self) { self.url_previews.clear().await; }
pub(super) fn set_url_preview(
&self,
url: &str,

View File

@@ -7,7 +7,7 @@
use std::time::SystemTime;
use conduwuit::{Err, Result, debug, err, utils::response::LimitReadExt};
use conduwuit::{Err, Result, debug, err};
use conduwuit_core::implement;
use ipaddress::IPAddress;
use serde::Serialize;
@@ -37,9 +37,6 @@ pub async fn remove_url_preview(&self, url: &str) -> Result<()> {
self.db.remove_url_preview(url)
}
#[implement(Service)]
pub async fn clear_url_previews(&self) { self.db.clear_url_previews().await; }
#[implement(Service)]
pub async fn set_url_preview(&self, url: &str, data: &UrlPreviewData) -> Result<()> {
let now = SystemTime::now()
@@ -112,22 +109,8 @@ pub async fn download_image(&self, url: &str) -> Result<UrlPreviewData> {
use image::ImageReader;
use ruma::Mxc;
let image = self
.services
.client
.url_preview
.get(url)
.send()
.await?
.limit_read(
self.services
.server
.config
.max_request_size
.try_into()
.expect("u64 should fit in usize"),
)
.await?;
let image = self.services.client.url_preview.get(url).send().await?;
let image = image.bytes().await?;
let mxc = Mxc {
server_name: self.services.globals.server_name(),
media_id: &random_string(super::MXC_LENGTH),
@@ -165,20 +148,24 @@ async fn download_html(&self, url: &str) -> Result<UrlPreviewData> {
use webpage::HTML;
let client = &self.services.client.url_preview;
let body = client
.get(url)
.send()
.await?
.limit_read_text(
self.services
.server
.config
.max_request_size
.try_into()
.expect("u64 should fit in usize"),
)
.await?;
let Ok(html) = HTML::from_string(body.clone(), Some(url.to_owned())) else {
let mut response = client.get(url).send().await?;
let mut bytes: Vec<u8> = Vec::new();
while let Some(chunk) = response.chunk().await? {
bytes.extend_from_slice(&chunk);
if bytes.len() > self.services.globals.url_preview_max_spider_size() {
debug!(
"Response body from URL {} exceeds url_preview_max_spider_size ({}), not \
processing the rest of the response body and assuming our necessary data is in \
this range.",
url,
self.services.globals.url_preview_max_spider_size()
);
break;
}
}
let body = String::from_utf8_lossy(&bytes);
let Ok(html) = HTML::from_string(body.to_string(), Some(url.to_owned())) else {
return Err!(Request(Unknown("Failed to parse HTML")));
};

View File

@@ -2,7 +2,7 @@
use conduwuit::{
Err, Error, Result, debug_warn, err, implement,
utils::{content_disposition::make_content_disposition, response::LimitReadExt},
utils::content_disposition::make_content_disposition,
};
use http::header::{CONTENT_DISPOSITION, CONTENT_TYPE, HeaderValue};
use ruma::{
@@ -35,7 +35,7 @@ pub async fn fetch_remote_thumbnail(
.fetch_thumbnail_authenticated(mxc, user, server, timeout_ms, dim)
.await;
if let Err(Error::Request(NotFound, ..)) = &result {
if let Err(Error::Request { kind: NotFound, .. }) = &result {
return self
.fetch_thumbnail_unauthenticated(mxc, user, server, timeout_ms, dim)
.await;
@@ -67,7 +67,7 @@ pub async fn fetch_remote_content(
);
});
if let Err(Error::Request(Unrecognized, ..)) = &result {
if let Err(Error::Request { kind: Unrecognized, .. }) = &result {
return self
.fetch_content_unauthenticated(mxc, user, server, timeout_ms)
.await;
@@ -286,15 +286,10 @@ async fn location_request(&self, location: &str) -> Result<FileMeta> {
.and_then(Result::ok);
response
.limit_read(
self.services
.server
.config
.max_request_size
.try_into()
.expect("u64 should fit in usize"),
)
.bytes()
.await
.map(Vec::from)
.map_err(Into::into)
.map(|content| FileMeta {
content: Some(content),
content_type: content_type.clone(),

View File

@@ -31,7 +31,7 @@
pub mod sending;
pub mod server_keys;
pub mod sync;
pub mod transactions;
pub mod transaction_ids;
pub mod uiaa;
pub mod users;

View File

@@ -1,7 +1,6 @@
use std::{fmt::Debug, mem, sync::Arc};
use bytes::BytesMut;
use conduwuit::utils::response::LimitReadExt;
use conduwuit_core::{
Err, Event, Result, debug_warn, err, trace,
utils::{stream::TryIgnore, string_from_bytes},
@@ -31,7 +30,7 @@
uint,
};
use crate::{Dep, client, config, globals, rooms, sending, users};
use crate::{Dep, client, globals, rooms, sending, users};
pub struct Service {
db: Data,
@@ -40,7 +39,6 @@ pub struct Service {
struct Services {
globals: Dep<globals::Service>,
config: Dep<config::Service>,
client: Dep<client::Service>,
state_accessor: Dep<rooms::state_accessor::Service>,
state_cache: Dep<rooms::state_cache::Service>,
@@ -63,7 +61,6 @@ fn build(args: crate::Args<'_>) -> Result<Arc<Self>> {
services: Services {
globals: args.depend::<globals::Service>("globals"),
client: args.depend::<client::Service>("client"),
config: args.depend::<config::Service>("config"),
state_accessor: args
.depend::<rooms::state_accessor::Service>("rooms::state_accessor"),
state_cache: args.depend::<rooms::state_cache::Service>("rooms::state_cache"),
@@ -248,15 +245,7 @@ pub async fn send_request<T>(&self, dest: &str, request: T) -> Result<T::Incomin
.expect("http::response::Builder is usable"),
);
let body = response
.limit_read(
self.services
.config
.max_request_size
.try_into()
.expect("usize fits into u64"),
)
.await?;
let body = response.bytes().await?;
if !status.is_success() {
debug_warn!("Push gateway response body: {:?}", string_from_bytes(&body));

View File

@@ -1,6 +1,4 @@
use conduwuit::{
Result, debug, debug_error, debug_info, implement, trace, utils::response::LimitReadExt,
};
use conduwuit::{Result, debug, debug_error, debug_info, debug_warn, implement, trace};
#[implement(super::Service)]
#[tracing::instrument(name = "well-known", level = "debug", skip(self, dest))]
@@ -26,8 +24,12 @@ pub(super) async fn request_well_known(&self, dest: &str) -> Result<Option<Strin
return Ok(None);
}
let text = response.limit_read_text(8192).await?;
let text = response.text().await?;
trace!("response text: {text:?}");
if text.len() >= 12288 {
debug_warn!("response contains junk");
return Ok(None);
}
let body: serde_json::Value = serde_json::from_str(&text).unwrap_or_default();

View File

@@ -142,7 +142,7 @@ async fn get_auth_chain_outer(
let chunk_cache: Vec<_> = chunk
.into_iter()
.try_stream::<conduwuit::Error>()
.try_stream()
.broad_and_then(|(shortid, event_id)| async move {
if let Ok(cached) = self.get_cached_eventid_authchain(&[shortid]).await {
return Ok(cached.to_vec());

View File

@@ -63,9 +63,7 @@ pub(super) async fn fetch_state<Pdu>(
},
| hash_map::Entry::Occupied(_) => {
return Err!(Database(
"State event's type and state_key combination exists multiple times: {}, {}",
pdu.kind(),
state_key
"State event's type and state_key combination exists multiple times.",
));
},
}

View File

@@ -162,9 +162,7 @@ pub(super) async fn handle_outlier_pdu<'a, Pdu>(
},
| hash_map::Entry::Occupied(_) => {
return Err!(Request(InvalidParam(
"Auth event's type and state_key combination exists multiple times: {}, {}",
auth_event.kind,
auth_event.state_key().unwrap_or("")
"Auth event's type and state_key combination exists multiple times.",
)));
},
}

View File

@@ -112,7 +112,14 @@ pub async fn state_resolution<'a, StateSets>(
{
let event_fetch = |event_id| self.event_fetch(event_id);
let event_exists = |event_id| self.event_exists(event_id);
state_res::resolve(room_version, state_sets, auth_chain_sets, &event_fetch, &event_exists)
.map_err(|e| err!(error!("State resolution failed: {e:?}")))
.await
Ok(
state_res::resolve(
room_version,
state_sets,
auth_chain_sets,
&event_fetch,
&event_exists,
)
.await?,
)
}

View File

@@ -3,7 +3,7 @@
str::FromStr,
};
use conduwuit::{Error, Result};
use conduwuit::{Err, Error, Result};
use ruma::{UInt, api::client::error::ErrorKind};
use crate::rooms::short::ShortRoomId;
@@ -57,7 +57,7 @@ fn from_str(value: &str) -> Result<Self> {
if let Some(token) = pag_tok() {
Ok(token)
} else {
Err(Error::BadRequest(ErrorKind::InvalidParam, "invalid token"))
Err!(BadRequest(ErrorKind::InvalidParam, "invalid token"))
}
}
}

View File

@@ -72,26 +72,6 @@ pub async fn append_incoming_pdu<'a, Leaves>(
.append_pdu(pdu, pdu_json, new_room_leaves, state_lock, room_id)
.await?;
// Process admin commands for federation events
if *pdu.kind() == TimelineEventType::RoomMessage {
let content: ExtractBody = pdu.get_content()?;
if let Some(body) = content.body {
if let Some(source) = self
.services
.admin
.is_admin_command(pdu, &body, false)
.await
{
self.services.admin.command_with_sender(
body,
Some(pdu.event_id().into()),
source,
pdu.sender.clone().into(),
)?;
}
}
}
Ok(Some(pdu_id))
}
@@ -354,6 +334,15 @@ pub async fn append_pdu<'a, Leaves>(
let content: ExtractBody = pdu.get_content()?;
if let Some(body) = content.body {
self.services.search.index_pdu(shortroomid, &pdu_id, &body);
if let Some(source) = self.services.admin.is_admin_command(pdu, &body).await {
self.services.admin.command_with_sender(
body,
Some((pdu.event_id()).into()),
source,
pdu.sender.clone().into(),
)?;
}
}
},
| _ => {},

View File

@@ -18,7 +18,7 @@
},
};
use super::{ExtractBody, RoomMutexGuard};
use super::RoomMutexGuard;
/// Creates a new persisted data unit and adds it to a room. This function
/// takes a roomid_mutex_state, meaning that only this function is able to
@@ -126,26 +126,6 @@ pub async fn build_and_append_pdu(
.boxed()
.await?;
// Process admin commands for locally sent events
if *pdu.kind() == TimelineEventType::RoomMessage {
let content: ExtractBody = pdu.get_content()?;
if let Some(body) = content.body {
if let Some(source) = self
.services
.admin
.is_admin_command(&pdu, &body, true)
.await
{
self.services.admin.command_with_sender(
body,
Some(pdu.event_id().into()),
source,
pdu.sender.clone().into(),
)?;
}
}
}
// We set the room state after inserting the pdu, so that we never have a moment
// in time where events in the current room state do not exist
trace!("Setting room state for room {room_id}");
@@ -187,8 +167,6 @@ pub async fn build_and_append_pdu(
Ok(pdu.event_id().to_owned())
}
/// Assert invariants about the admin room, to prevent (for example) all admins
/// from leaving or being banned from the room
#[implement(super::Service)]
#[tracing::instrument(skip_all, level = "debug")]
async fn check_pdu_for_admin_room<Pdu>(&self, pdu: &Pdu, sender: &UserId) -> Result

View File

@@ -75,10 +75,7 @@ fn from_evt(
let content: RoomCreateEventContent = serde_json::from_str(content.get())?;
Ok(content.room_version)
} else {
Err(Error::InconsistentRoomState(
"non-create event for room of unknown version",
room_id,
))
Err!(InconsistentRoomState("non-create event for room of unknown version", room_id))
}
}
let PduBuilder {
@@ -275,7 +272,9 @@ fn from_evt(
.hash_and_sign_event(&mut pdu_json, &room_version_id)
{
return match e {
| Error::Signatures(ruma::signatures::Error::PduSize) => {
| Error::Signatures { source, .. }
if matches!(source, ruma::signatures::Error::PduSize) =>
{
Err!(Request(TooLarge("Message/PDU is too long (exceeds 65535 bytes)")))
},
| _ => Err!(Request(Unknown(warn!("Signing event failed: {e}")))),

View File

@@ -1,7 +1,7 @@
use std::{fmt::Debug, mem};
use bytes::BytesMut;
use conduwuit::{Err, Result, debug_error, err, utils, utils::response::LimitReadExt, warn};
use conduwuit::{Err, Result, debug_error, err, utils, warn};
use reqwest::Client;
use ruma::api::{IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken};
@@ -38,7 +38,7 @@ pub(crate) async fn send_antispam_request<T>(
.expect("http::response::Builder is usable"),
);
let body = response.limit_read(65535).await?; // TODO: handle timeout
let body = response.bytes().await?; // TODO: handle timeout
if !status.is_success() {
debug_error!("Antispam response bytes: {:?}", utils::string_from_bytes(&body));

View File

@@ -1,9 +1,7 @@
use std::{fmt::Debug, mem};
use bytes::BytesMut;
use conduwuit::{
Err, Result, debug_error, err, implement, trace, utils, utils::response::LimitReadExt, warn,
};
use conduwuit::{Err, Result, debug_error, err, implement, trace, utils, warn};
use ruma::api::{
IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken, appservice::Registration,
};
@@ -79,15 +77,7 @@ pub async fn send_appservice_request<T>(
.expect("http::response::Builder is usable"),
);
let body = response
.limit_read(
self.server
.config
.max_request_size
.try_into()
.expect("usize fits into u64"),
)
.await?;
let body = response.bytes().await?;
if !status.is_success() {
debug_error!("Appservice response bytes: {:?}", utils::string_from_bytes(&body));

View File

@@ -10,7 +10,7 @@
use base64::{Engine as _, engine::general_purpose::URL_SAFE_NO_PAD};
use conduwuit_core::{
Error, Event, Result, at, debug, err, error,
Error, Event, Result, debug, err, error,
result::LogErr,
trace,
utils::{
@@ -175,7 +175,7 @@ async fn handle_response_ok<'a>(
if !new_events.is_empty() {
self.db.mark_as_active(new_events.iter());
let new_events_vec = new_events.into_iter().map(at!(1)).collect();
let new_events_vec = new_events.into_iter().map(|(_, event)| event).collect();
futures.push(self.send_events(dest.clone(), new_events_vec));
} else {
statuses.remove(dest);

View File

@@ -14,7 +14,7 @@
media, moderation, presence, pusher, registration_tokens, resolver, rooms, sending,
server_keys,
service::{self, Args, Map, Service},
sync, transactions, uiaa, users,
sync, transaction_ids, uiaa, users,
};
pub struct Services {
@@ -37,7 +37,7 @@ pub struct Services {
pub sending: Arc<sending::Service>,
pub server_keys: Arc<server_keys::Service>,
pub sync: Arc<sync::Service>,
pub transactions: Arc<transactions::Service>,
pub transaction_ids: Arc<transaction_ids::Service>,
pub uiaa: Arc<uiaa::Service>,
pub users: Arc<users::Service>,
pub moderation: Arc<moderation::Service>,
@@ -110,7 +110,7 @@ macro_rules! build {
sending: build!(sending::Service),
server_keys: build!(server_keys::Service),
sync: build!(sync::Service),
transactions: build!(transactions::Service),
transaction_ids: build!(transaction_ids::Service),
uiaa: build!(uiaa::Service),
users: build!(users::Service),
moderation: build!(moderation::Service),

View File

@@ -0,0 +1,54 @@
use std::sync::Arc;
use conduwuit::{Result, implement};
use database::{Handle, Map};
use ruma::{DeviceId, TransactionId, UserId};
pub struct Service {
db: Data,
}
struct Data {
userdevicetxnid_response: Arc<Map>,
}
impl crate::Service for Service {
fn build(args: crate::Args<'_>) -> Result<Arc<Self>> {
Ok(Arc::new(Self {
db: Data {
userdevicetxnid_response: args.db["userdevicetxnid_response"].clone(),
},
}))
}
fn name(&self) -> &str { crate::service::make_name(std::module_path!()) }
}
#[implement(Service)]
pub fn add_txnid(
&self,
user_id: &UserId,
device_id: Option<&DeviceId>,
txn_id: &TransactionId,
data: &[u8],
) {
let mut key = user_id.as_bytes().to_vec();
key.push(0xFF);
key.extend_from_slice(device_id.map(DeviceId::as_bytes).unwrap_or_default());
key.push(0xFF);
key.extend_from_slice(txn_id.as_bytes());
self.db.userdevicetxnid_response.insert(&key, data);
}
// If there's no entry, this is a new transaction
#[implement(Service)]
pub async fn existing_txnid(
&self,
user_id: &UserId,
device_id: Option<&DeviceId>,
txn_id: &TransactionId,
) -> Result<Handle<'_>> {
let key = (user_id, device_id, txn_id);
self.db.userdevicetxnid_response.qry(&key).await
}

Some files were not shown because too many files have changed in this diff Show More