Compare commits

...

33 Commits

Author SHA1 Message Date
Jade Ellis
aed15f246a refactor: Clean up logging issues
Primary issues: Double escapes (debug fmt), spans without levels
2026-01-05 18:28:57 +00:00
timedout
27d6604d14 fix: Use a timeout instead of deadline 2026-01-03 17:08:47 +00:00
timedout
1c7bd2f6fa style: Remove unnecessary then() calls in chain 2026-01-03 16:22:49 +00:00
timedout
56d7099011 style: Include errors in key claim response too 2026-01-03 16:10:06 +00:00
timedout
bc426e1bfc fix: Apply client-requested timeout to federated key queries
Also parallelised federation calls in related functions
2026-01-03 16:05:05 +00:00
timedout
6c61b3ec5b fix: Build error two: electric boogaloo 2025-12-31 21:15:28 +00:00
timedout
9d9d1170b6 fix: Build error 2025-12-31 21:04:06 +00:00
Jade Ellis
7be20abcad style: Fix typo 2025-12-31 20:08:53 +00:00
Jade Ellis
078275964c chore: Update precommit hooks 2025-12-31 20:08:53 +00:00
timedout
bf200ad12d fix: Resolve compile errors
me and cargo check are oops now
2025-12-31 20:01:29 +00:00
timedout
41e628892d chore: Add news fragment 2025-12-31 20:01:29 +00:00
timedout
44851ee6a2 feat: Fall back to remote room summary if local fails 2025-12-31 20:01:29 +00:00
timedout
a7e6e6e83f feat: Allow local server admins to bypass summary visibility checks
feat: Allow local server admins to bypass summary visibility checks

Also improve error messages so they aren't so damn long.
2025-12-31 20:01:29 +00:00
Ginger
8a561fcd3a chore: Clippy fixes 2025-12-31 19:56:35 +00:00
Ginger
25c305f473 chore: Fix comment formatting 2025-12-31 19:56:35 +00:00
Ginger
c900350164 chore: Add news fragment 2025-12-31 19:56:35 +00:00
Ginger
c565e6ffbc feat: Restrict where certain admin commands may be used 2025-12-31 19:56:31 +00:00
Jade Ellis
442f887c98 style: Improve warning regarding admin removal 2025-12-31 19:40:42 +00:00
Terry
03220845e5 docs: Changelog 2025-12-31 19:35:53 +00:00
Terry
f8c1e9bcde feat: Config defined admin list
Closes !1246
2025-12-31 19:35:40 +00:00
Ginger
21324b748f feat: Enable console feature by default 2025-12-31 19:12:25 +00:00
Jade Ellis
b7bf36443b docs: Fix typo 2025-12-31 19:03:22 +00:00
ginger
d72192aa32 fix(ci): Stop using nightly to build Debian packages 2025-12-30 14:23:31 -05:00
Jade Ellis
38ecc41780 chore: Release 2025-12-30 17:45:32 +00:00
Jade Ellis
7ae958bb03 docs: Announcement 2025-12-30 17:35:20 +00:00
Jade Ellis
f676fa53f1 chore: Specify the tag body template 2025-12-30 17:34:44 +00:00
Jade Ellis
978bdc6466 docs: Changelog 2025-12-30 17:34:44 +00:00
timedout
7c741e62cf fix: Forbid creators in power levels 2025-12-30 17:34:43 +00:00
Olivia Lee
12aecf8091 validate membership events returned by remote servers
This fixes a vulnerability where an attacker with a malicious remote
server and a user on the local server can trick the local server into
signing arbitrary events. The attacker issue a remote leave as the local
user to a room on the malicious server. Without any validation of the
make_leave response, the local server would sign the attacker-controlled
event and pass it back to the malicious server with send_leave.

The join and knock endpoints are also fixed in this commit, but are less
useful for exploitation because the local server replaces the "content"
field returned by the remote server. Remote invites are unaffected
because we already check that the event returned from /invite has the
same event ID as the event passed to it.

Co-authored-by: timedout <git@nexy7574.co.uk>
Co-authored-by: Jade Ellis <jade@ellis.link>
Co-authored-by: Ginger <ginger@gingershaped.computer>
2025-12-30 15:24:45 +00:00
Renovate Bot
19372f0b15 chore(deps): update dependency cargo-bins/cargo-binstall to v1.16.6 2025-12-29 23:52:04 +00:00
Jade Ellis
a66b90cb3d ci: Explicitly auto tag latest 2025-12-29 23:45:02 +00:00
Jade Ellis
7234ce6cbe ci: Don't force tag all versions as latest 2025-12-29 23:45:02 +00:00
Jade Ellis
beb0c2ad9a fix(ci): Don't double append latest tag suffix 2025-12-29 23:45:02 +00:00
99 changed files with 780 additions and 455 deletions

View File

@@ -64,6 +64,7 @@ runs:
uses: docker/metadata-action@v5
with:
flavor: |
latest=auto
suffix=${{ inputs.tag_suffix }},onlatest=true
tags: |
type=semver,pattern={{version}},prefix=v
@@ -72,7 +73,6 @@ runs:
type=ref,event=branch,prefix=${{ format('refs/heads/{0}', github.event.repository.default_branch) != github.ref && 'branch-' || '' }},
type=ref,event=pr
type=sha,format=short
type=raw,value=latest${{ inputs.tag_suffix }},enable=${{ startsWith(github.ref, 'refs/tags/v') }},priority=1100
images: ${{ inputs.images }}
# default labels & annotations: https://github.com/docker/metadata-action/blob/master/src/meta.ts#L509
env:

View File

@@ -59,10 +59,9 @@ jobs:
# Aggressive GC since cache restores don't increment counter
echo "CARGO_INCREMENTAL_GC_TRIGGER=5" >> $GITHUB_ENV
- name: Setup Rust nightly
- name: Setup Rust
uses: ./.forgejo/actions/setup-rust
with:
rust-version: nightly
github-token: ${{ secrets.GH_PUBLIC_RO }}
- name: Get package version and component

View File

@@ -23,7 +23,7 @@ repos:
- id: check-added-large-files
- repo: https://github.com/crate-ci/typos
rev: v1.40.0
rev: v1.41.0
hooks:
- id: typos
- id: typos
@@ -31,7 +31,7 @@ repos:
stages: [commit-msg]
- repo: https://github.com/crate-ci/committed
rev: v1.1.8
rev: v1.1.9
hooks:
- id: committed

View File

@@ -24,3 +24,4 @@ extend-ignore-re = [
"continuwuity" = "continuwuity"
"continuwity" = "continuwuity"
"execuse" = "execuse"
"oltp" = "OTLP"

12
CHANGELOG.md Normal file
View File

@@ -0,0 +1,12 @@
# Continuwuity 0.5.0 (2025-12-30)
**This release contains a CRITICAL vulnerability patch, and you must update as soon as possible**
## Features
- Enabled the OTLP exporter in default builds, and allow configuring the exporter protocol. (@Jade). (#1251)
## Bug Fixes
- Don't allow admin room upgrades, as this can break the admin room (@timedout) (#1245)
- Fix invalid creators in power levels during upgrade to v12 (@timedout) (#1245)

38
Cargo.lock generated
View File

@@ -940,7 +940,7 @@ dependencies = [
[[package]]
name = "conduwuit"
version = "0.5.0"
version = "0.5.1"
dependencies = [
"clap",
"conduwuit_admin",
@@ -972,7 +972,7 @@ dependencies = [
[[package]]
name = "conduwuit_admin"
version = "0.5.0"
version = "0.5.1"
dependencies = [
"clap",
"conduwuit_api",
@@ -994,7 +994,7 @@ dependencies = [
[[package]]
name = "conduwuit_api"
version = "0.5.0"
version = "0.5.1"
dependencies = [
"async-trait",
"axum 0.7.9",
@@ -1027,14 +1027,14 @@ dependencies = [
[[package]]
name = "conduwuit_build_metadata"
version = "0.5.0"
version = "0.5.1"
dependencies = [
"built",
]
[[package]]
name = "conduwuit_core"
version = "0.5.0"
version = "0.5.1"
dependencies = [
"argon2",
"arrayvec",
@@ -1095,7 +1095,7 @@ dependencies = [
[[package]]
name = "conduwuit_database"
version = "0.5.0"
version = "0.5.1"
dependencies = [
"async-channel",
"conduwuit_core",
@@ -1114,7 +1114,7 @@ dependencies = [
[[package]]
name = "conduwuit_macros"
version = "0.5.0"
version = "0.5.1"
dependencies = [
"itertools 0.14.0",
"proc-macro2",
@@ -1124,7 +1124,7 @@ dependencies = [
[[package]]
name = "conduwuit_router"
version = "0.5.0"
version = "0.5.1"
dependencies = [
"axum 0.7.9",
"axum-client-ip",
@@ -1159,7 +1159,7 @@ dependencies = [
[[package]]
name = "conduwuit_service"
version = "0.5.0"
version = "0.5.1"
dependencies = [
"async-trait",
"base64 0.22.1",
@@ -1200,7 +1200,7 @@ dependencies = [
[[package]]
name = "conduwuit_web"
version = "0.5.0"
version = "0.5.1"
dependencies = [
"askama",
"axum 0.7.9",
@@ -1750,7 +1750,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb"
dependencies = [
"libc",
"windows-sys 0.52.0",
"windows-sys 0.61.2",
]
[[package]]
@@ -2405,7 +2405,7 @@ dependencies = [
"libc",
"percent-encoding",
"pin-project-lite",
"socket2 0.5.10",
"socket2 0.6.1",
"tokio",
"tower-service",
"tracing",
@@ -3121,7 +3121,7 @@ version = "0.50.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7957b9740744892f114936ab4a57b3f487491bbeafaf8083688b16841a4240e5"
dependencies = [
"windows-sys 0.60.2",
"windows-sys 0.61.2",
]
[[package]]
@@ -3749,7 +3749,7 @@ dependencies = [
"quinn-udp",
"rustc-hash",
"rustls",
"socket2 0.5.10",
"socket2 0.6.1",
"thiserror 2.0.17",
"tokio",
"tracing",
@@ -3786,9 +3786,9 @@ dependencies = [
"cfg_aliases",
"libc",
"once_cell",
"socket2 0.5.10",
"socket2 0.6.1",
"tracing",
"windows-sys 0.52.0",
"windows-sys 0.60.2",
]
[[package]]
@@ -4325,7 +4325,7 @@ dependencies = [
"errno",
"libc",
"linux-raw-sys",
"windows-sys 0.52.0",
"windows-sys 0.61.2",
]
[[package]]
@@ -6206,7 +6206,7 @@ dependencies = [
[[package]]
name = "xtask"
version = "0.5.0"
version = "0.5.1"
dependencies = [
"clap",
"serde",
@@ -6215,7 +6215,7 @@ dependencies = [
[[package]]
name = "xtask-generate-commands"
version = "0.5.0"
version = "0.5.1"
dependencies = [
"clap-markdown",
"clap_builder",

View File

@@ -21,7 +21,7 @@ license = "Apache-2.0"
readme = "README.md"
repository = "https://forgejo.ellis.link/continuwuation/continuwuity"
rust-version = "1.86.0"
version = "0.5.0"
version = "0.5.1"
[workspace.metadata.crane]
name = "conduwuit"

View File

@@ -0,0 +1 @@
The `console` feature is now enabled by default, allowing the server console to be used for running admin commands directly.

View File

@@ -0,0 +1 @@
Certain potentially dangerous admin commands are now restricted to only be usable in the admin room and server console.

View File

@@ -1 +0,0 @@
Enabled the OLTP exporter in default builds, and allow configuring the exporter protocol. (@Jade).

1
changelog.d/1253.feature Normal file
View File

@@ -0,0 +1 @@
Implemented a configuration defined admin list independent of the admin room. (@Terryiscool160).

1
changelog.d/1257.bugfix Normal file
View File

@@ -0,0 +1 @@
Fixed unreliable room summary fetching and improved error messages. Contributed by @nex.

2
changelog.d/1261.bugfix Normal file
View File

@@ -0,0 +1,2 @@
Client requested timeout parameter is now applied to e2ee key lookups and claims. Related federation requests are now
also concurrent. Contributed by @nex.

View File

@@ -1590,6 +1590,18 @@
#
#admin_room_tag = "m.server_notice"
# A list of Matrix IDs that are qualified as server admins.
#
# Any Matrix IDs within this list are regarded as an admin
# regardless of whether they are in the admin room or not
#
#admins_list = []
# Defines whether those within the admin room are added to the
# admins_list.
#
#admins_from_room = true
# Sentry.io crash/panic reporting, performance monitoring/metrics, etc.
# This is NOT enabled by default.
#

View File

@@ -48,7 +48,7 @@ EOF
# Developer tool versions
# renovate: datasource=github-releases depName=cargo-bins/cargo-binstall
ENV BINSTALL_VERSION=1.16.2
ENV BINSTALL_VERSION=1.16.6
# renovate: datasource=github-releases depName=psastras/sbom-rs
ENV CARGO_SBOM_VERSION=0.9.1
# renovate: datasource=crate depName=lddtree

View File

@@ -18,7 +18,7 @@ RUN --mount=type=cache,target=/etc/apk/cache apk add \
# Developer tool versions
# renovate: datasource=github-releases depName=cargo-bins/cargo-binstall
ENV BINSTALL_VERSION=1.16.2
ENV BINSTALL_VERSION=1.16.6
# renovate: datasource=github-releases depName=psastras/sbom-rs
ENV CARGO_SBOM_VERSION=0.9.1
# renovate: datasource=crate depName=lddtree

View File

@@ -128,7 +128,7 @@ ### Log Levels
```rs
// Good
error!(
error = %err,
error = ?err,
room_id = %room_id,
"Failed to send event to room"
);
@@ -264,7 +264,7 @@ ### Code Comments
warn!(
destination = %destination,
attempt = attempt,
error = %err,
error = ?err,
retry_delay_ms = retry_delay.as_millis(),
"Federation request failed, retrying"
);

View File

@@ -6,10 +6,10 @@
"message": "Welcome to Continuwuity! Important announcements about the project will appear here."
},
{
"id": 6,
"id": 7,
"mention_room": true,
"date": "2025-12-22",
"message": "Continuwuity v0.5.0 has been released. **The release contains a fix for the critical vulnerability [GHSA-22fw-4jq7-g8r8](https://github.com/continuwuity/continuwuity/security/advisories/GHSA-22fw-4jq7-g8r8). Update as soon as possible.**\n\nThis has been *actively exploited* to create fake leave events in the Continuwuity rooms. Please leave and rejoin the rooms to fix any issues this may have caused. \n\n - [Continuwuity (space)](https://matrix.to/#/!PxtzompFuodlyzdCDtV5lzjXs10XIHeOOaq_FYodHyk?via=ellis.link&via=gingershaped.computer&via=continuwuity.org)\n - [Continuwuity](https://matrix.to/#/!kn3VQSLcgWGUFm0FFRid4MinJ_aeZPjHQ0irXbHa3bU?via=ellis.link&via=gingershaped.computer&via=continuwuity.org)\n - [Continuwuity Announcements](https://matrix.to/#/!d7zDZg1Vu5nhkCi50jNfOIObD5fpfGhfl48SZWZek7k?via=ellis.link)\n - [Continuwuity Offtopic](https://matrix.to/#/!QlOomq-suHC9rJHfDFVdbcGg4HS2ojSQ0bo4W2JOGMM?via=ellis.link&via=gingershaped.computer&via=continuwuity.org)\n - [Continuwuity Development](https://matrix.to/#/!aAvealFbgiKTJGzumNbjuwDgt1tOkBKwiyfYqE3ouk0?via=ellis.link&via=explodie.org&via=continuwuity.org)\n"
"date": "2025-12-30",
"message": "Continuwuity v0.5.1 has been released. **The release contains a fix for the critical vulnerability [GHSA-m5p2-vccg-8c9v](https://github.com/continuwuity/continuwuity/security/advisories/GHSA-m5p2-vccg-8c9v) (embargoed) affecting all Conduit-derived servers. Update as soon as possible.**\n\nThis has been *actively exploited* to attempt account takeover and forge events bricking the Continuwuity rooms. The new space is accessible at [Continuwuity (room list)](https://matrix.to/#/!8cR4g-i9ucof69E4JHNg9LbPVkGprHb3SzcrGBDDJgk?via=continuwuity.org&via=starstruck.systems&via=gingershaped.computer)\n"
}
]
}

1
release.toml Normal file
View File

@@ -0,0 +1 @@
tag-message = "chore: Release v{{version}}"

View File

@@ -48,19 +48,31 @@ pub enum AdminCommand {
Query(QueryCommand),
}
#[tracing::instrument(skip_all, name = "command")]
#[tracing::instrument(skip_all, name = "command", level = "info")]
pub(super) async fn process(command: AdminCommand, context: &Context<'_>) -> Result {
use AdminCommand::*;
match command {
| Appservices(command) => appservice::process(command, context).await,
| Appservices(command) => {
// appservice commands are all restricted
context.bail_restricted()?;
appservice::process(command, context).await
},
| Media(command) => media::process(command, context).await,
| Users(command) => user::process(command, context).await,
| Users(command) => {
// user commands are all restricted
context.bail_restricted()?;
user::process(command, context).await
},
| Rooms(command) => room::process(command, context).await,
| Federation(command) => federation::process(command, context).await,
| Server(command) => server::process(command, context).await,
| Debug(command) => debug::process(command, context).await,
| Query(command) => query::process(command, context).await,
| Query(command) => {
// query commands are all restricted
context.bail_restricted()?;
query::process(command, context).await
},
| Check(command) => check::process(command, context).await,
}
}

View File

@@ -1,6 +1,6 @@
use std::{fmt, time::SystemTime};
use conduwuit::Result;
use conduwuit::{Err, Result};
use conduwuit_service::Services;
use futures::{
Future, FutureExt, TryFutureExt,
@@ -8,6 +8,7 @@
lock::Mutex,
};
use ruma::{EventId, UserId};
use service::admin::InvocationSource;
pub(crate) struct Context<'a> {
pub(crate) services: &'a Services,
@@ -16,6 +17,7 @@ pub(crate) struct Context<'a> {
pub(crate) reply_id: Option<&'a EventId>,
pub(crate) sender: Option<&'a UserId>,
pub(crate) output: Mutex<BufWriter<Vec<u8>>>,
pub(crate) source: InvocationSource,
}
impl Context<'_> {
@@ -43,4 +45,22 @@ pub(crate) fn sender_or_service_user(&self) -> &UserId {
self.sender
.unwrap_or_else(|| self.services.globals.server_user.as_ref())
}
/// Returns an Err if the [`Self::source`] of this context does not allow
/// restricted commands to be executed.
///
/// This is intended to be placed at the start of restricted commands'
/// implementations, like so:
///
/// ```ignore
/// self.bail_restricted()?;
/// // actual command impl
/// ```
pub(crate) fn bail_restricted(&self) -> Result {
if self.source.allows_restricted() {
Ok(())
} else {
Err!("This command can only be used in the admin room.")
}
}
}

View File

@@ -291,6 +291,8 @@ pub(super) async fn get_remote_pdu(
#[admin_command]
pub(super) async fn get_room_state(&self, room: OwnedRoomOrAliasId) -> Result {
self.bail_restricted()?;
let room_id = self.services.rooms.alias.resolve(&room).await?;
let room_state: Vec<Raw<AnyStateEvent>> = self
.services
@@ -417,27 +419,6 @@ pub(super) async fn change_log_level(&self, filter: Option<String>, reset: bool)
Err!("No log level was specified.")
}
#[admin_command]
pub(super) async fn sign_json(&self) -> Result {
if self.body.len() < 2
|| !self.body[0].trim().starts_with("```")
|| self.body.last().unwrap_or(&"").trim() != "```"
{
return Err!("Expected code block in command body. Add --help for details.");
}
let string = self.body[1..self.body.len().checked_sub(1).unwrap()].join("\n");
match serde_json::from_str(&string) {
| Err(e) => return Err!("Invalid json: {e}"),
| Ok(mut value) => {
self.services.server_keys.sign_json(&mut value)?;
let json_text = serde_json::to_string_pretty(&value)?;
write!(self, "{json_text}")
},
}
.await
}
#[admin_command]
pub(super) async fn verify_json(&self) -> Result {
if self.body.len() < 2
@@ -475,8 +456,10 @@ pub(super) async fn verify_pdu(&self, event_id: OwnedEventId) -> Result {
}
#[admin_command]
#[tracing::instrument(skip(self))]
#[tracing::instrument(skip(self), level = "info")]
pub(super) async fn first_pdu_in_room(&self, room_id: OwnedRoomId) -> Result {
self.bail_restricted()?;
if !self
.services
.rooms
@@ -500,8 +483,10 @@ pub(super) async fn first_pdu_in_room(&self, room_id: OwnedRoomId) -> Result {
}
#[admin_command]
#[tracing::instrument(skip(self))]
#[tracing::instrument(skip(self), level = "info")]
pub(super) async fn latest_pdu_in_room(&self, room_id: OwnedRoomId) -> Result {
self.bail_restricted()?;
if !self
.services
.rooms
@@ -525,13 +510,15 @@ pub(super) async fn latest_pdu_in_room(&self, room_id: OwnedRoomId) -> Result {
}
#[admin_command]
#[tracing::instrument(skip(self))]
#[tracing::instrument(skip(self), level = "info")]
pub(super) async fn force_set_room_state_from_server(
&self,
room_id: OwnedRoomId,
server_name: OwnedServerName,
at_event: Option<OwnedEventId>,
) -> Result {
self.bail_restricted()?;
if !self
.services
.rooms

View File

@@ -47,9 +47,9 @@ pub enum DebugCommand {
shorteventid: ShortEventId,
},
/// - Attempts to retrieve a PDU from a remote server. Inserts it into our
/// database/timeline if found and we do not have this PDU already
/// (following normal event auth rules, handles it as an incoming PDU).
/// - Attempts to retrieve a PDU from a remote server. **Does not** insert
/// it into the database
/// or persist it anywhere.
GetRemotePdu {
/// An event ID (a $ followed by the base64 reference hash)
event_id: OwnedEventId,
@@ -125,12 +125,6 @@ pub enum DebugCommand {
reset: bool,
},
/// - Sign JSON blob
///
/// This command needs a JSON blob provided in a Markdown code block below
/// the command.
SignJson,
/// - Verify JSON signatures
///
/// This command needs a JSON blob provided in a Markdown code block below

View File

@@ -8,12 +8,14 @@
#[admin_command]
pub(super) async fn disable_room(&self, room_id: OwnedRoomId) -> Result {
self.bail_restricted()?;
self.services.rooms.metadata.disable_room(&room_id, true);
self.write_str("Room disabled.").await
}
#[admin_command]
pub(super) async fn enable_room(&self, room_id: OwnedRoomId) -> Result {
self.bail_restricted()?;
self.services.rooms.metadata.disable_room(&room_id, false);
self.write_str("Room enabled.").await
}

View File

@@ -16,6 +16,8 @@ pub(super) async fn delete(
mxc: Option<OwnedMxcUri>,
event_id: Option<OwnedEventId>,
) -> Result {
self.bail_restricted()?;
if event_id.is_some() && mxc.is_some() {
return Err!("Please specify either an MXC or an event ID, not both.",);
}
@@ -176,6 +178,8 @@ pub(super) async fn delete(
#[admin_command]
pub(super) async fn delete_list(&self) -> Result {
self.bail_restricted()?;
if self.body.len() < 2
|| !self.body[0].trim().starts_with("```")
|| self.body.last().unwrap_or(&"").trim() != "```"
@@ -231,6 +235,8 @@ pub(super) async fn delete_past_remote_media(
after: bool,
yes_i_want_to_delete_local_media: bool,
) -> Result {
self.bail_restricted()?;
if before && after {
return Err!("Please only pick one argument, --before or --after.",);
}
@@ -273,6 +279,8 @@ pub(super) async fn delete_all_from_server(
server_name: OwnedServerName,
yes_i_want_to_delete_local_media: bool,
) -> Result {
self.bail_restricted()?;
if server_name == self.services.globals.server_name() && !yes_i_want_to_delete_local_media {
return Err!("This command only works for remote media by default.",);
}

View File

@@ -37,7 +37,7 @@ pub(super) fn dispatch(services: Arc<Services>, command: CommandInput) -> Proces
Box::pin(handle_command(services, command))
}
#[tracing::instrument(skip_all, name = "admin")]
#[tracing::instrument(skip_all, name = "admin", level = "info")]
async fn handle_command(services: Arc<Services>, command: CommandInput) -> ProcessorResult {
AssertUnwindSafe(Box::pin(process_command(services, &command)))
.catch_unwind()
@@ -59,6 +59,7 @@ async fn process_command(services: Arc<Services>, input: &CommandInput) -> Proce
reply_id: input.reply_id.as_deref(),
sender: input.sender.as_deref(),
output: BufWriter::new(Vec::new()).into(),
source: input.source,
};
let (result, mut logs) = process(&context, command, &args).await;

View File

@@ -98,7 +98,7 @@ async fn ban_room(&self, room: OwnedRoomOrAliasId) -> Result {
{
| Ok((room_id, servers)) => {
debug!(
?room_id,
%room_id,
?servers,
"Got federation response fetching room ID for room {room}"
);
@@ -240,7 +240,7 @@ async fn ban_list_of_rooms(&self) -> Result {
{
| Ok((room_id, servers)) => {
debug!(
?room_id,
%room_id,
?servers,
"Got federation response fetching room ID for \
{room}",
@@ -397,7 +397,7 @@ async fn unban_room(&self, room: OwnedRoomOrAliasId) -> Result {
{
| Ok((room_id, servers)) => {
debug!(
?room_id,
%room_id,
?servers,
"Got federation response fetching room ID for room {room}"
);

View File

@@ -24,6 +24,8 @@ pub(super) async fn uptime(&self) -> Result {
#[admin_command]
pub(super) async fn show_config(&self) -> Result {
self.bail_restricted()?;
self.write_str(&format!("{}", *self.services.server.config))
.await
}
@@ -118,6 +120,8 @@ pub(super) async fn list_backups(&self) -> Result {
#[admin_command]
pub(super) async fn backup_database(&self) -> Result {
self.bail_restricted()?;
let db = Arc::clone(&self.services.db);
let result = self
.services
@@ -144,6 +148,8 @@ pub(super) async fn admin_notice(&self, message: Vec<String>) -> Result {
#[admin_command]
pub(super) async fn reload_mods(&self) -> Result {
self.bail_restricted()?;
self.services.server.reload()?;
self.write_str("Reloading server...").await
@@ -168,6 +174,8 @@ pub(super) async fn restart(&self, force: bool) -> Result {
#[admin_command]
pub(super) async fn shutdown(&self) -> Result {
self.bail_restricted()?;
warn!("shutdown command");
self.services.server.shutdown()?;

View File

@@ -461,9 +461,11 @@ pub(super) async fn force_join_list_of_local_users(
);
}
let Ok(admin_room) = self.services.admin.get_admin_room().await else {
return Err!("There is not an admin room to check for server admins.",);
};
let server_admins = self.services.admin.get_admins().await;
if server_admins.is_empty() {
return Err!("There are no admins set for this server.");
}
let (room_id, servers) = self
.services
@@ -482,15 +484,6 @@ pub(super) async fn force_join_list_of_local_users(
return Err!("We are not joined in this room.");
}
let server_admins: Vec<_> = self
.services
.rooms
.state_cache
.active_local_users_in_room(&admin_room)
.map(ToOwned::to_owned)
.collect()
.await;
if !self
.services
.rooms
@@ -583,9 +576,11 @@ pub(super) async fn force_join_all_local_users(
);
}
let Ok(admin_room) = self.services.admin.get_admin_room().await else {
return Err!("There is not an admin room to check for server admins.",);
};
let server_admins = self.services.admin.get_admins().await;
if server_admins.is_empty() {
return Err!("There are no admins set for this server.");
}
let (room_id, servers) = self
.services
@@ -604,15 +599,6 @@ pub(super) async fn force_join_all_local_users(
return Err!("We are not joined in this room.");
}
let server_admins: Vec<_> = self
.services
.rooms
.state_cache
.active_local_users_in_room(&admin_room)
.map(ToOwned::to_owned)
.collect()
.await;
if !self
.services
.rooms

View File

@@ -49,7 +49,7 @@
///
/// Note: This will not reserve the username, so the username might become
/// invalid when trying to register
#[tracing::instrument(skip_all, fields(%client), name = "register_available")]
#[tracing::instrument(skip_all, fields(%client), name = "register_available", level = "info")]
pub(crate) async fn get_register_available_route(
State(services): State<crate::State>,
InsecureClientIp(client): InsecureClientIp,
@@ -138,7 +138,7 @@ pub(crate) async fn get_register_available_route(
/// - If `inhibit_login` is false: Creates a device and returns device id and
/// access_token
#[allow(clippy::doc_markdown)]
#[tracing::instrument(skip_all, fields(%client), name = "register")]
#[tracing::instrument(skip_all, fields(%client), name = "register", level = "info")]
pub(crate) async fn register_route(
State(services): State<crate::State>,
InsecureClientIp(client): InsecureClientIp,
@@ -603,7 +603,7 @@ pub(crate) async fn register_route(
/// last seen ts)
/// - Forgets to-device events
/// - Triggers device list updates
#[tracing::instrument(skip_all, fields(%client), name = "change_password")]
#[tracing::instrument(skip_all, fields(%client), name = "change_password", level = "info")]
pub(crate) async fn change_password_route(
State(services): State<crate::State>,
InsecureClientIp(client): InsecureClientIp,
@@ -727,7 +727,7 @@ pub(crate) async fn whoami_route(
/// - Forgets all to-device events
/// - Triggers device list updates
/// - Removes ability to log in again
#[tracing::instrument(skip_all, fields(%client), name = "deactivate")]
#[tracing::instrument(skip_all, fields(%client), name = "deactivate", level = "info")]
pub(crate) async fn deactivate_route(
State(services): State<crate::State>,
InsecureClientIp(client): InsecureClientIp,

View File

@@ -102,7 +102,7 @@ pub(crate) async fn get_alias_route(
};
let servers = room_available_servers(&services, &room_id, &room_alias, servers).await;
debug!(?room_alias, ?room_id, "available servers: {servers:?}");
debug!(%room_alias, %room_id, "available servers: {servers:?}");
Ok(get_alias::v3::Response::new(room_id, servers))
}

View File

@@ -74,7 +74,7 @@ pub(crate) async fn get_context_route(
}
if !visible {
debug_warn!(req_evt = ?event_id, ?base_id, ?room_id, "Event requested by {sender_user} but is not allowed to see it, returning 404");
debug_warn!(req_evt = %event_id, ?base_id, %room_id, "Event requested by {sender_user} but is not allowed to see it, returning 404");
return Err!(Request(NotFound("Event not found.")));
}

View File

@@ -49,7 +49,7 @@ pub(crate) async fn get_device_route(
/// # `PUT /_matrix/client/r0/devices/{deviceId}`
///
/// Updates the metadata on a given device of the sender user.
#[tracing::instrument(skip_all, fields(%client), name = "update_device")]
#[tracing::instrument(skip_all, fields(%client), name = "update_device", level = "debug")]
pub(crate) async fn update_device_route(
State(services): State<crate::State>,
InsecureClientIp(client): InsecureClientIp,

View File

@@ -44,7 +44,7 @@
/// Lists the public rooms on this server.
///
/// - Rooms are ordered by the number of joined members
#[tracing::instrument(skip_all, fields(%client), name = "publicrooms")]
#[tracing::instrument(skip_all, fields(%client), name = "publicrooms", level = "info")]
pub(crate) async fn get_public_rooms_filtered_route(
State(services): State<crate::State>,
InsecureClientIp(client): InsecureClientIp,
@@ -80,7 +80,7 @@ pub(crate) async fn get_public_rooms_filtered_route(
/// Lists the public rooms on this server.
///
/// - Rooms are ordered by the number of joined members
#[tracing::instrument(skip_all, fields(%client), name = "publicrooms")]
#[tracing::instrument(skip_all, fields(%client), name = "publicrooms", level = "info")]
pub(crate) async fn get_public_rooms_route(
State(services): State<crate::State>,
InsecureClientIp(client): InsecureClientIp,
@@ -116,7 +116,7 @@ pub(crate) async fn get_public_rooms_route(
/// # `PUT /_matrix/client/r0/directory/list/room/{roomId}`
///
/// Sets the visibility of a given room in the room directory.
#[tracing::instrument(skip_all, fields(%client), name = "room_directory")]
#[tracing::instrument(skip_all, fields(%client), name = "room_directory", level = "info")]
pub(crate) async fn set_room_visibility_route(
State(services): State<crate::State>,
InsecureClientIp(client): InsecureClientIp,

View File

@@ -1,7 +1,15 @@
use std::collections::{BTreeMap, HashMap, HashSet};
use std::{
collections::{BTreeMap, HashMap, HashSet},
time::Duration,
};
use axum::extract::State;
use conduwuit::{Err, Error, Result, debug, debug_warn, err, result::NotFound, utils};
use conduwuit::{
Err, Error, Result, debug, debug_warn, err,
result::NotFound,
utils,
utils::{IterStream, stream::WidebandExt},
};
use conduwuit_service::{Services, users::parse_master_key};
use futures::{StreamExt, stream::FuturesUnordered};
use ruma::{
@@ -44,7 +52,7 @@ pub(crate) async fn upload_keys_route(
.deserialize()
.inspect_err(|e| {
debug_warn!(
?key_id,
%key_id,
?one_time_key,
"Invalid one time key JSON submitted by client, skipping: {e}"
);
@@ -86,8 +94,8 @@ pub(crate) async fn upload_keys_route(
{
if existing_keys.json().get() == device_keys.json().get() {
debug!(
?sender_user,
?sender_device,
%sender_user,
%sender_device,
?device_keys,
"Ignoring user uploaded keys as they are an exact copy already in the \
database"
@@ -134,6 +142,7 @@ pub(crate) async fn get_keys_route(
&body.device_keys,
|u| u == sender_user,
true, // Always allow local users to see device names of other local users
body.timeout.unwrap_or(Duration::from_secs(10)),
)
.await
}
@@ -145,7 +154,12 @@ pub(crate) async fn claim_keys_route(
State(services): State<crate::State>,
body: Ruma<claim_keys::v3::Request>,
) -> Result<claim_keys::v3::Response> {
claim_keys_helper(&services, &body.one_time_keys).await
claim_keys_helper(
&services,
&body.one_time_keys,
body.timeout.unwrap_or(Duration::from_secs(10)),
)
.await
}
/// # `POST /_matrix/client/r0/keys/device_signing/upload`
@@ -324,7 +338,7 @@ pub(crate) async fn upload_signatures_route(
for (user_id, keys) in &body.signed_keys {
for (key_id, key) in keys {
let Ok(key) = serde_json::to_value(key)
.inspect_err(|e| debug_warn!(?key_id, "Invalid \"key\" JSON: {e}"))
.inspect_err(|e| debug_warn!(%key_id, "Invalid \"key\" JSON: {e}"))
else {
continue;
};
@@ -421,6 +435,7 @@ pub(crate) async fn get_keys_helper<F>(
device_keys_input: &BTreeMap<OwnedUserId, Vec<OwnedDeviceId>>,
allowed_signatures: F,
include_display_names: bool,
timeout: Duration,
) -> Result<get_keys::v3::Response>
where
F: Fn(&UserId) -> bool + Send + Sync,
@@ -512,9 +527,10 @@ pub(crate) async fn get_keys_helper<F>(
let mut failures = BTreeMap::new();
let mut futures: FuturesUnordered<_> = get_over_federation
let futures = get_over_federation
.into_iter()
.map(|(server, vec)| async move {
.stream()
.wide_filter_map(|(server, vec)| async move {
let mut device_keys_input_fed = BTreeMap::new();
for (user_id, keys) in vec {
device_keys_input_fed.insert(user_id.to_owned(), keys.clone());
@@ -522,17 +538,22 @@ pub(crate) async fn get_keys_helper<F>(
let request =
federation::keys::get_keys::v1::Request { device_keys: device_keys_input_fed };
let response = tokio::time::timeout(
timeout,
services.sending.send_federation_request(server, request),
)
.await
// Need to flatten the Result<Result<V, E>, E> into Result<V, E>
.map_err(|_| err!(Request(Unknown("Timeout when getting keys over federation."))))
.and_then(|res| res);
let response = services
.sending
.send_federation_request(server, request)
.await;
(server, response)
Some((server, response))
})
.collect();
.collect::<FuturesUnordered<_>>()
.await
.into_iter();
while let Some((server, response)) = futures.next().await {
for (server, response) in futures {
match response {
| Ok(response) => {
for (user, master_key) in response.master_keys {
@@ -564,8 +585,8 @@ pub(crate) async fn get_keys_helper<F>(
self_signing_keys.extend(response.self_signing_keys);
device_keys.extend(response.device_keys);
},
| _ => {
failures.insert(server.to_string(), json!({}));
| Err(e) => {
failures.insert(server.to_string(), json!({ "error": e.to_string() }));
},
}
}
@@ -608,6 +629,7 @@ fn add_unsigned_device_display_name(
pub(crate) async fn claim_keys_helper(
services: &Services,
one_time_keys_input: &BTreeMap<OwnedUserId, BTreeMap<OwnedDeviceId, OneTimeKeyAlgorithm>>,
timeout: Duration,
) -> Result<claim_keys::v3::Response> {
let mut one_time_keys = BTreeMap::new();
@@ -638,32 +660,39 @@ pub(crate) async fn claim_keys_helper(
let mut failures = BTreeMap::new();
let mut futures: FuturesUnordered<_> = get_over_federation
let futures = get_over_federation
.into_iter()
.map(|(server, vec)| async move {
.stream()
.wide_filter_map(|(server, vec)| async move {
let mut one_time_keys_input_fed = BTreeMap::new();
for (user_id, keys) in vec {
one_time_keys_input_fed.insert(user_id.clone(), keys.clone());
}
(
server,
services
.sending
.send_federation_request(server, federation::keys::claim_keys::v1::Request {
let response = tokio::time::timeout(
timeout,
services.sending.send_federation_request(
server,
federation::keys::claim_keys::v1::Request {
one_time_keys: one_time_keys_input_fed,
})
.await,
},
),
)
.await
.map_err(|_| err!(Request(Unknown("Timeout when claiming keys over federation."))))
.and_then(|res| res);
Some((server, response))
})
.collect();
.collect::<FuturesUnordered<_>>()
.await
.into_iter();
while let Some((server, response)) = futures.next().await {
for (server, response) in futures {
match response {
| Ok(keys) => {
one_time_keys.extend(keys.one_time_keys);
},
| Err(_e) => {
failures.insert(server.to_string(), json!({}));
| Err(e) => {
failures.insert(server.to_string(), json!({"error": e.to_string()}));
},
}
}

View File

@@ -21,7 +21,7 @@
/// # `POST /_matrix/client/r0/rooms/{roomId}/invite`
///
/// Tries to send an invite event into the room.
#[tracing::instrument(skip_all, fields(%client), name = "invite")]
#[tracing::instrument(skip_all, fields(%client), name = "invite", level = "info")]
pub(crate) async fn invite_user_route(
State(services): State<crate::State>,
InsecureClientIp(client): InsecureClientIp,

View File

@@ -48,7 +48,7 @@
},
};
use super::banned_room_check;
use super::{banned_room_check, validate_remote_member_event_stub};
use crate::Ruma;
/// # `POST /_matrix/client/r0/rooms/{roomId}/join`
@@ -59,7 +59,7 @@
/// rules locally
/// - If the server does not know about the room: asks other servers over
/// federation
#[tracing::instrument(skip_all, fields(%client), name = "join")]
#[tracing::instrument(skip_all, fields(%client), name = "join", level = "info")]
pub(crate) async fn join_room_by_id_route(
State(services): State<crate::State>,
InsecureClientIp(client): InsecureClientIp,
@@ -131,7 +131,7 @@ pub(crate) async fn join_room_by_id_route(
/// - If the server does not know about the room: use the server name query
/// param if specified. if not specified, asks other servers over federation
/// via room alias server name and room ID server name
#[tracing::instrument(skip_all, fields(%client), name = "join")]
#[tracing::instrument(skip_all, fields(%client), name = "join", level = "info")]
pub(crate) async fn join_room_by_id_or_alias_route(
State(services): State<crate::State>,
InsecureClientIp(client): InsecureClientIp,
@@ -351,7 +351,7 @@ pub async fn join_room_by_id_helper(
Ok(join_room_by_id::v3::Response::new(room_id.to_owned()))
}
#[tracing::instrument(skip_all, fields(%sender_user, %room_id), name = "join_remote")]
#[tracing::instrument(skip_all, fields(%sender_user, %room_id), name = "join_remote", level = "info")]
async fn join_room_by_id_helper_remote(
services: &Services,
sender_user: &UserId,
@@ -709,7 +709,7 @@ async fn join_room_by_id_helper_remote(
Ok(())
}
#[tracing::instrument(skip_all, fields(%sender_user, %room_id), name = "join_local")]
#[tracing::instrument(skip_all, fields(%sender_user, %room_id), name = "join_local", level = "info")]
async fn join_room_by_id_helper_local(
services: &Services,
sender_user: &UserId,
@@ -837,6 +837,13 @@ async fn join_room_by_id_helper_local(
err!(BadServerResponse("Invalid make_join event json received from server: {e:?}"))
})?;
validate_remote_member_event_stub(
&MembershipState::Join,
sender_user,
room_id,
&join_event_stub,
)?;
let join_authorized_via_users_server = join_event_stub
.get("content")
.map(|s| {

View File

@@ -38,13 +38,13 @@
},
};
use super::{banned_room_check, join::join_room_by_id_helper};
use super::{banned_room_check, join::join_room_by_id_helper, validate_remote_member_event_stub};
use crate::Ruma;
/// # `POST /_matrix/client/*/knock/{roomIdOrAlias}`
///
/// Tries to knock the room to ask permission to join for the sender user.
#[tracing::instrument(skip_all, fields(%client), name = "knock")]
#[tracing::instrument(skip_all, fields(%client), name = "knock", level = "info")]
pub(crate) async fn knock_room_route(
State(services): State<crate::State>,
InsecureClientIp(client): InsecureClientIp,
@@ -408,6 +408,13 @@ async fn knock_room_helper_local(
err!(BadServerResponse("Invalid make_knock event json received from server: {e:?}"))
})?;
validate_remote_member_event_stub(
&MembershipState::Knock,
sender_user,
room_id,
&knock_event_stub,
)?;
knock_event_stub.insert(
"origin".to_owned(),
CanonicalJsonValue::String(services.globals.server_name().as_str().to_owned()),

View File

@@ -21,6 +21,7 @@
};
use service::Services;
use super::validate_remote_member_event_stub;
use crate::Ruma;
/// # `POST /_matrix/client/v3/rooms/{roomId}/leave`
@@ -324,6 +325,13 @@ pub async fn remote_leave_room<S: ::std::hash::BuildHasher>(
)))
})?;
validate_remote_member_event_stub(
&MembershipState::Leave,
user_id,
room_id,
&leave_event_stub,
)?;
// TODO: Is origin needed?
leave_event_stub.insert(
"origin".to_owned(),

View File

@@ -13,7 +13,14 @@
use axum::extract::State;
use conduwuit::{Err, Result, warn};
use futures::{FutureExt, StreamExt};
use ruma::{OwnedRoomId, RoomId, ServerName, UserId, api::client::membership::joined_rooms};
use ruma::{
CanonicalJsonObject, OwnedRoomId, RoomId, ServerName, UserId,
api::client::membership::joined_rooms,
events::{
StaticEventContent,
room::member::{MembershipState, RoomMemberEventContent},
},
};
use service::Services;
pub(crate) use self::{
@@ -56,7 +63,7 @@ pub(crate) async fn joined_rooms_route(
///
/// Performs automatic deactivation if `auto_deactivate_banned_room_attempts` is
/// enabled
#[tracing::instrument(skip(services))]
#[tracing::instrument(skip(services), level = "info")]
pub(crate) async fn banned_room_check(
services: &Services,
user_id: &UserId,
@@ -153,3 +160,80 @@ pub(crate) async fn banned_room_check(
Ok(())
}
/// Validates that an event returned from a remote server by `/make_*`
/// actually is a membership event with the expected fields.
///
/// Without checking this, the remote server could use the remote membership
/// mechanism to trick our server into signing arbitrary malicious events.
pub(crate) fn validate_remote_member_event_stub(
membership: &MembershipState,
user_id: &UserId,
room_id: &RoomId,
event_stub: &CanonicalJsonObject,
) -> Result<()> {
let Some(event_type) = event_stub.get("type") else {
return Err!(BadServerResponse(
"Remote server returned member event with missing type field"
));
};
if event_type != &RoomMemberEventContent::TYPE {
return Err!(BadServerResponse(
"Remote server returned member event with invalid event type"
));
}
let Some(sender) = event_stub.get("sender") else {
return Err!(BadServerResponse(
"Remote server returned member event with missing sender field"
));
};
if sender != &user_id.as_str() {
return Err!(BadServerResponse(
"Remote server returned member event with incorrect sender"
));
}
let Some(state_key) = event_stub.get("state_key") else {
return Err!(BadServerResponse(
"Remote server returned member event with missing state_key field"
));
};
if state_key != &user_id.as_str() {
return Err!(BadServerResponse(
"Remote server returned member event with incorrect state_key"
));
}
let Some(event_room_id) = event_stub.get("room_id") else {
return Err!(BadServerResponse(
"Remote server returned member event with missing room_id field"
));
};
if event_room_id != &room_id.as_str() {
return Err!(BadServerResponse(
"Remote server returned member event with incorrect room_id"
));
}
let Some(content) = event_stub
.get("content")
.and_then(|content| content.as_object())
else {
return Err!(BadServerResponse(
"Remote server returned member event with missing content field"
));
};
let Some(event_membership) = content.get("membership") else {
return Err!(BadServerResponse(
"Remote server returned member event with missing membership field"
));
};
if event_membership != &membership.as_str() {
return Err!(BadServerResponse(
"Remote server returned member event with incorrect room_id"
));
}
Ok(())
}

View File

@@ -115,7 +115,7 @@ async fn paginate_relations_with_filter(
.user_can_see_event(sender_user, room_id, target)
.await
{
debug_warn!(req_evt = ?target, ?room_id, "Event relations requested by {sender_user} but is not allowed to see it, returning 404");
debug_warn!(req_evt = %target, %room_id, "Event relations requested by {sender_user} but is not allowed to see it, returning 404");
return Err!(Request(NotFound("Event not found.")));
}

View File

@@ -29,7 +29,7 @@ struct Report {
/// # `POST /_matrix/client/v3/rooms/{roomId}/report`
///
/// Reports an abusive room to homeserver admins
#[tracing::instrument(skip_all, fields(%client), name = "report_room")]
#[tracing::instrument(skip_all, fields(%client), name = "report_room", level = "info")]
pub(crate) async fn report_room_route(
State(services): State<crate::State>,
InsecureClientIp(client): InsecureClientIp,
@@ -85,7 +85,7 @@ pub(crate) async fn report_room_route(
/// # `POST /_matrix/client/v3/rooms/{roomId}/report/{eventId}`
///
/// Reports an inappropriate event to homeserver admins
#[tracing::instrument(skip_all, fields(%client), name = "report_event")]
#[tracing::instrument(skip_all, fields(%client), name = "report_event", level = "info")]
pub(crate) async fn report_event_route(
State(services): State<crate::State>,
InsecureClientIp(client): InsecureClientIp,
@@ -133,7 +133,7 @@ pub(crate) async fn report_event_route(
Ok(report_content::v3::Response {})
}
#[tracing::instrument(skip_all, fields(%client), name = "report_user")]
#[tracing::instrument(skip_all, fields(%client), name = "report_user", level = "info")]
pub(crate) async fn report_user_route(
State(services): State<crate::State>,
InsecureClientIp(client): InsecureClientIp,

View File

@@ -492,7 +492,7 @@ pub(crate) async fn create_room_route(
.boxed()
.await
{
warn!(%e, "Failed to send invite");
warn!(?e, "Failed to send invite");
}
}
@@ -627,7 +627,7 @@ async fn room_alias_check(
.map_err(|e| {
err!(Request(InvalidParam(debug_error!(
?e,
?room_alias_name,
%room_alias_name,
"Failed to parse room alias.",
))))
})?;
@@ -711,7 +711,7 @@ fn custom_room_id_check(services: &Services, custom_room_id: &str) -> Result<Own
}
})
.inspect(|full_room_id| {
debug_info!(?full_room_id, "Full custom room ID");
debug_info!(%full_room_id, "Full custom room ID");
})
.inspect_err(|e| warn!(?e, ?custom_room_id, "Failed to create room with custom room ID",))
.inspect_err(|e| warn!(?e, %custom_room_id, "Failed to create room with custom room ID",))
}

View File

@@ -1,11 +1,11 @@
use axum::extract::State;
use axum_client_ip::InsecureClientIp;
use conduwuit::{
Err, Result, debug_warn, trace,
Err, Result, debug, debug_warn, info, trace,
utils::{IterStream, future::TryExtExt},
};
use futures::{
FutureExt, StreamExt,
FutureExt, StreamExt, TryFutureExt,
future::{OptionFuture, join3},
stream::FuturesUnordered,
};
@@ -46,7 +46,7 @@ pub(crate) async fn get_room_summary_legacy(
/// # `GET /_matrix/client/v1/room_summary/{roomIdOrAlias}`
///
/// Returns a short description of the state of a room.
#[tracing::instrument(skip_all, fields(%client), name = "room_summary")]
#[tracing::instrument(skip_all, fields(%client), name = "room_summary", level = "info")]
pub(crate) async fn get_room_summary(
State(services): State<crate::State>,
InsecureClientIp(client): InsecureClientIp,
@@ -79,9 +79,15 @@ async fn room_summary_response(
.server_in_room(services.globals.server_name(), room_id)
.await
{
return local_room_summary_response(services, room_id, sender_user)
match local_room_summary_response(services, room_id, sender_user)
.boxed()
.await;
.await
{
| Ok(response) => return Ok(response),
| Err(e) => {
debug_warn!("Failed to get local room summary: {e:?}, falling back to remote");
},
}
}
let room =
@@ -110,27 +116,31 @@ async fn local_room_summary_response(
room_id: &RoomId,
sender_user: Option<&UserId>,
) -> Result<get_summary::msc3266::Response> {
trace!(?sender_user, "Sending local room summary response for {room_id:?}");
let join_rule = services.rooms.state_accessor.get_join_rules(room_id);
let world_readable = services.rooms.state_accessor.is_world_readable(room_id);
let guest_can_join = services.rooms.state_accessor.guest_can_join(room_id);
let (join_rule, world_readable, guest_can_join) =
join3(join_rule, world_readable, guest_can_join).await;
trace!("{join_rule:?}, {world_readable:?}, {guest_can_join:?}");
user_can_see_summary(
services,
room_id,
&join_rule.clone().into(),
guest_can_join,
world_readable,
join_rule.allowed_rooms(),
sender_user,
trace!(
sender_user = sender_user.map(tracing::field::display),
"Sending local room summary response for {room_id:?}"
);
let (join_rule, world_readable, guest_can_join) = join3(
services.rooms.state_accessor.get_join_rules(room_id),
services.rooms.state_accessor.is_world_readable(room_id),
services.rooms.state_accessor.guest_can_join(room_id),
)
.await?;
.await;
// Synapse allows server admins to bypass visibility checks.
// That seems neat so we'll copy that behaviour.
if sender_user.is_none() || !services.users.is_admin(sender_user.unwrap()).await {
user_can_see_summary(
services,
room_id,
&join_rule.clone().into(),
guest_can_join,
world_readable,
join_rule.allowed_rooms(),
sender_user,
)
.await?;
}
let canonical_alias = services
.rooms
@@ -221,7 +231,7 @@ async fn remote_room_summary_hierarchy_response(
servers: &[OwnedServerName],
sender_user: Option<&UserId>,
) -> Result<SpaceHierarchyParentSummary> {
trace!(?sender_user, ?servers, "Sending remote room summary response for {room_id:?}");
trace!(sender_user = ?sender_user.map(tracing::field::display), ?servers, "Sending remote room summary response for {room_id:?}");
if !services.config.allow_federation {
return Err!(Request(Forbidden("Federation is disabled.")));
}
@@ -231,15 +241,27 @@ async fn remote_room_summary_hierarchy_response(
"Federaton of room {room_id} is currently disabled on this server."
)));
}
if servers.is_empty() {
return Err!(Request(MissingParam(
"No servers were provided to fetch the room over federation"
)));
}
let request = get_hierarchy::v1::Request::new(room_id.to_owned());
let mut requests: FuturesUnordered<_> = servers
.iter()
.map(|server| {
info!("Fetching room summary for {room_id} from server {server}");
services
.sending
.send_federation_request(server, request.clone())
.inspect_ok(move |v| {
debug!("Fetched room summary for {room_id} from server {server}: {v:?}");
})
.inspect_err(move |e| {
info!("Failed to fetch room summary for {room_id} from server {server}: {e}");
})
})
.collect();
@@ -255,23 +277,23 @@ async fn remote_room_summary_hierarchy_response(
continue;
}
return user_can_see_summary(
services,
room_id,
&room.join_rule,
room.guest_can_join,
room.world_readable,
room.allowed_room_ids.iter().map(AsRef::as_ref),
sender_user,
)
.await
.map(|()| room);
if sender_user.is_none() || !services.users.is_admin(sender_user.unwrap()).await {
return user_can_see_summary(
services,
room_id,
&room.join_rule,
room.guest_can_join,
room.world_readable,
room.allowed_room_ids.iter().map(AsRef::as_ref),
sender_user,
)
.await
.map(|()| room);
}
return Ok(room);
}
Err!(Request(NotFound(
"Room is unknown to this server and was unable to fetch over federation with the \
provided servers available"
)))
Err!(Request(NotFound("Room not found or is not accessible")))
}
async fn user_can_see_summary<'a, I>(
@@ -311,21 +333,14 @@ async fn user_can_see_summary<'a, I>(
return Ok(());
}
Err!(Request(Forbidden(
"Room is not world readable, not publicly accessible/joinable, restricted room \
conditions not met, and guest access is forbidden. Not allowed to see details \
of this room."
)))
Err!(Request(Forbidden("Room is not accessible")))
},
| None => {
if is_public_room || world_readable {
return Ok(());
}
Err!(Request(Forbidden(
"Room is not world readable or publicly accessible/joinable, authentication is \
required"
)))
Err!(Request(Forbidden("Room is not accessible")))
},
}
}

View File

@@ -35,7 +35,7 @@
///
/// Get the supported login types of this server. One of these should be used as
/// the `type` field when logging in.
#[tracing::instrument(skip_all, fields(%client), name = "login")]
#[tracing::instrument(skip_all, fields(%client), name = "login", level = "info")]
pub(crate) async fn get_login_types_route(
State(services): State<crate::State>,
InsecureClientIp(client): InsecureClientIp,
@@ -53,7 +53,7 @@ pub(crate) async fn get_login_types_route(
/// Authenticates the given user by its ID and its password.
///
/// Returns the user ID if successful, and an error otherwise.
#[tracing::instrument(skip_all, fields(%user_id), name = "password")]
#[tracing::instrument(skip_all, fields(%user_id), name = "password", level = "debug")]
pub(crate) async fn password_login(
services: &Services,
user_id: &UserId,
@@ -96,7 +96,7 @@ pub(crate) async fn password_login(
///
/// Creates the user if the user is found in the LDAP and do not already have an
/// account.
#[tracing::instrument(skip_all, fields(%user_id), name = "ldap")]
#[tracing::instrument(skip_all, fields(%user_id), name = "ldap", level = "debug")]
pub(super) async fn ldap_login(
services: &Services,
user_id: &UserId,
@@ -212,7 +212,7 @@ pub(crate) async fn handle_login(
/// Note: You can use [`GET
/// /_matrix/client/r0/login`](fn.get_supported_versions_route.html) to see
/// supported login types.
#[tracing::instrument(skip_all, fields(%client), name = "login")]
#[tracing::instrument(skip_all, fields(%client), name = "login", level = "info")]
pub(crate) async fn login_route(
State(services): State<crate::State>,
InsecureClientIp(client): InsecureClientIp,
@@ -345,7 +345,7 @@ pub(crate) async fn login_route(
/// to log in with the m.login.token flow.
///
/// <https://spec.matrix.org/v1.13/client-server-api/#post_matrixclientv1loginget_token>
#[tracing::instrument(skip_all, fields(%client), name = "login_token")]
#[tracing::instrument(skip_all, fields(%client), name = "login_token", level = "info")]
pub(crate) async fn login_token_route(
State(services): State<crate::State>,
InsecureClientIp(client): InsecureClientIp,
@@ -413,7 +413,7 @@ pub(crate) async fn login_token_route(
/// last seen ts)
/// - Forgets to-device events
/// - Triggers device list updates
#[tracing::instrument(skip_all, fields(%client), name = "logout")]
#[tracing::instrument(skip_all, fields(%client), name = "logout", level = "info")]
pub(crate) async fn logout_route(
State(services): State<crate::State>,
InsecureClientIp(client): InsecureClientIp,
@@ -440,7 +440,7 @@ pub(crate) async fn logout_route(
/// Note: This is equivalent to calling [`GET
/// /_matrix/client/r0/logout`](fn.logout_route.html) from each device of this
/// user.
#[tracing::instrument(skip_all, fields(%client), name = "logout")]
#[tracing::instrument(skip_all, fields(%client), name = "logout", level = "info")]
pub(crate) async fn logout_all_route(
State(services): State<crate::State>,
InsecureClientIp(client): InsecureClientIp,

View File

@@ -140,8 +140,8 @@ pub(crate) async fn get_state_events_for_key_route(
.await
.map_err(|_| {
err!(Request(NotFound(debug_warn!(
room_id = ?body.room_id,
event_type = ?body.event_type,
room_id = %body.room_id,
event_type = %body.event_type,
"State event not found in room.",
))))
})?;
@@ -226,7 +226,7 @@ async fn allowed_to_send_state_event(
match event_type {
| StateEventType::RoomCreate => {
return Err!(Request(BadJson(debug_warn!(
?room_id,
%room_id,
"You cannot update m.room.create after a room has been created."
))));
},
@@ -237,7 +237,7 @@ async fn allowed_to_send_state_event(
| Ok(acl_content) => {
if acl_content.allow_is_empty() {
return Err!(Request(BadJson(debug_warn!(
?room_id,
%room_id,
"Sending an ACL event with an empty allow key will permanently \
brick the room for non-conduwuit's as this equates to no servers \
being allowed to participate in this room."
@@ -246,7 +246,7 @@ async fn allowed_to_send_state_event(
if acl_content.deny_contains("*") && acl_content.allow_contains("*") {
return Err!(Request(BadJson(debug_warn!(
?room_id,
%room_id,
"Sending an ACL event with a deny and allow key value of \"*\" will \
permanently brick the room for non-conduwuit's as this equates to \
no servers being allowed to participate in this room."
@@ -258,7 +258,7 @@ async fn allowed_to_send_state_event(
&& !acl_content.allow_contains(services.globals.server_name().as_str())
{
return Err!(Request(BadJson(debug_warn!(
?room_id,
%room_id,
"Sending an ACL event with a deny key value of \"*\" and without \
your own server name in the allow key will result in you being \
unable to participate in this room."
@@ -270,7 +270,7 @@ async fn allowed_to_send_state_event(
&& !acl_content.allow_contains(services.globals.server_name().as_str())
{
return Err!(Request(BadJson(debug_warn!(
?room_id,
%room_id,
"Sending an ACL event for an allow key without \"*\" and without \
your own server name in the allow key will result in you being \
unable to participate in this room."

View File

@@ -50,8 +50,8 @@
level = "debug",
skip_all,
fields(
room_id = ?room_id,
syncing_user = ?sync_context.syncing_user,
room_id = %room_id,
syncing_user = %sync_context.syncing_user,
),
)]
pub(super) async fn load_joined_room(
@@ -578,7 +578,7 @@ async fn build_notification_counts(
)
.await;
trace!(?notification_count, ?highlight_count, "syncing new notification counts");
trace!(%notification_count, %highlight_count, "syncing new notification counts");
Ok(Some(UnreadNotificationsCount {
notification_count: Some(notification_count),
@@ -692,8 +692,8 @@ async fn build_room_summary(
};
trace!(
?joined_member_count,
?invited_member_count,
%joined_member_count,
%invited_member_count,
heroes_length = heroes.as_ref().map(HashSet::len),
"syncing updated summary"
);

View File

@@ -307,8 +307,8 @@ async fn build_left_state_and_timeline(
}
trace!(
?timeline_start_count,
?timeline_end_count,
%timeline_start_count,
%timeline_end_count,
"syncing {} timeline events (limited = {}) and {} state events",
timeline.pdus.len(),
timeline.limited,

View File

@@ -275,7 +275,7 @@ pub(crate) async fn build_sync_events(
match joined_room {
| Ok((room, updates)) => Some((room_id, room, updates)),
| Err(err) => {
warn!(?err, ?room_id, "error loading joined room {}", room_id);
warn!(?err, %room_id, "error loading joined room");
None
},
}

View File

@@ -217,7 +217,7 @@ pub(super) async fn build_state_incremental<'a>(
the performance penalty is acceptable.
*/
trace!(?timeline_is_linear, ?timeline.limited, "computing state for incremental sync");
trace!(%timeline_is_linear, %timeline.limited, "computing state for incremental sync");
// fetch the shorteventids of state events in the timeline
let state_events_in_timeline: BTreeSet<ShortEventId> = services

View File

@@ -26,7 +26,7 @@
/// TODO: Implement pagination, currently this just returns everything
///
/// An implementation of [MSC2666](https://github.com/matrix-org/matrix-spec-proposals/pull/2666)
#[tracing::instrument(skip_all, fields(%client), name = "mutual_rooms")]
#[tracing::instrument(skip_all, fields(%client), name = "mutual_rooms", level = "info")]
pub(crate) async fn get_mutual_rooms_route(
State(services): State<crate::State>,
InsecureClientIp(client): InsecureClientIp,

View File

@@ -1,6 +1,5 @@
use axum::{Json, extract::State, response::IntoResponse};
use conduwuit::{Error, Result};
use futures::StreamExt;
use ruma::api::client::{
discovery::{
discover_homeserver::{self, HomeserverInfo, SlidingSyncProxyInfo},
@@ -71,21 +70,18 @@ pub(crate) async fn well_known_support(
// Try to add admin users as contacts if no contacts are configured
if contacts.is_empty() {
if let Ok(admin_room) = services.admin.get_admin_room().await {
let admin_users = services.rooms.state_cache.room_members(&admin_room);
let mut stream = admin_users;
let admin_users = services.admin.get_admins().await;
while let Some(user_id) = stream.next().await {
// Skip server user
if *user_id == services.globals.server_user {
continue;
}
contacts.push(Contact {
role: role_value.clone(),
email_address: None,
matrix_id: Some(user_id.to_owned()),
});
for user_id in &admin_users {
if *user_id == services.globals.server_user {
continue;
}
contacts.push(Contact {
role: role_value.clone(),
email_address: None,
matrix_id: Some(user_id.to_owned()),
});
}
}

View File

@@ -40,7 +40,7 @@ pub(crate) async fn get_missing_events_route(
while i < queued_events.len() && events.len() < limit {
let Ok(pdu) = services.rooms.timeline.get_pdu(&queued_events[i]).await else {
debug!(
?body.origin,
body.origin = body.origin.as_ref().map(tracing::field::display),
"Event {} does not exist locally, skipping", &queued_events[i]
);
i = i.saturating_add(1);
@@ -59,7 +59,7 @@ pub(crate) async fn get_missing_events_route(
.await
{
debug!(
?body.origin,
body.origin = body.origin.as_ref().map(tracing::field::display),
"Server cannot see {:?} in {:?}, skipping", pdu.event_id, pdu.room_id
);
i = i.saturating_add(1);
@@ -68,7 +68,7 @@ pub(crate) async fn get_missing_events_route(
let Ok(event) = to_canonical_object(&pdu) else {
debug_error!(
?body.origin,
body.origin = body.origin.as_ref().map(tracing::field::display),
"Failed to convert PDU in database to canonical JSON: {pdu:?}"
);
i = i.saturating_add(1);

View File

@@ -19,7 +19,7 @@
/// # `PUT /_matrix/federation/v2/invite/{roomId}/{eventId}`
///
/// Invites a remote user to a room.
#[tracing::instrument(skip_all, fields(%client), name = "invite")]
#[tracing::instrument(skip_all, fields(%client), name = "invite", level = "info")]
pub(crate) async fn create_invite_route(
State(services): State<crate::State>,
InsecureClientIp(client): InsecureClientIp,

View File

@@ -22,7 +22,7 @@
/// # `GET /_matrix/federation/v1/make_join/{roomId}/{userId}`
///
/// Creates a join template.
#[tracing::instrument(skip_all, fields(room_id = %body.room_id, user_id = %body.user_id, origin = %body.origin()))]
#[tracing::instrument(skip_all, fields(room_id = %body.room_id, user_id = %body.user_id, origin = %body.origin()), level = "info")]
pub(crate) async fn create_join_event_template_route(
State(services): State<crate::State>,
body: Ruma<prepare_join_event::v1::Request>,

View File

@@ -3,9 +3,7 @@
use axum::extract::State;
use axum_client_ip::InsecureClientIp;
use conduwuit::{
Err, Error, Result, debug,
debug::INFO_SPAN_LEVEL,
debug_warn, err, error,
Err, Error, Result, debug, debug_warn, err, error,
result::LogErr,
trace,
utils::{
@@ -48,7 +46,7 @@
/// Push EDUs and PDUs to this server.
#[tracing::instrument(
name = "txn",
level = INFO_SPAN_LEVEL,
level = "debug",
skip_all,
fields(
%client,
@@ -83,8 +81,8 @@ pub(crate) async fn send_transaction_message_route(
pdus = body.pdus.len(),
edus = body.edus.len(),
elapsed = ?txn_start_time.elapsed(),
id = ?body.transaction_id,
origin =?body.origin(),
id = %body.transaction_id,
origin = %body.origin(),
"Starting txn",
);
@@ -110,8 +108,8 @@ pub(crate) async fn send_transaction_message_route(
pdus = body.pdus.len(),
edus = body.edus.len(),
elapsed = ?txn_start_time.elapsed(),
id = ?body.transaction_id,
origin =?body.origin(),
id = %body.transaction_id,
origin = %body.origin(),
"Finished txn",
);
for (id, result) in &results {

View File

@@ -26,7 +26,7 @@
use crate::Ruma;
/// helper method for /send_join v1 and v2
#[tracing::instrument(skip(services, pdu, omit_members), fields(room_id = room_id.as_str(), origin = origin.as_str()))]
#[tracing::instrument(skip(services, pdu, omit_members), fields(room_id = room_id.as_str(), origin = origin.as_str()), level = "info")]
async fn create_join_event(
services: &Services,
origin: &ServerName,

View File

@@ -1,3 +1,5 @@
use std::time::Duration;
use axum::extract::State;
use conduwuit::{Error, Result};
use futures::{FutureExt, StreamExt, TryFutureExt};
@@ -96,6 +98,7 @@ pub(crate) async fn get_keys_route(
&body.device_keys,
|u| Some(u.server_name()) == body.origin.as_deref(),
services.globals.allow_device_name_federation(),
Duration::from_secs(0),
)
.await?;
@@ -124,7 +127,8 @@ pub(crate) async fn claim_keys_route(
));
}
let result = claim_keys_helper(&services, &body.one_time_keys).await?;
let result =
claim_keys_helper(&services, &body.one_time_keys, Duration::from_secs(0)).await?;
Ok(claim_keys::v1::Response { one_time_keys: result.one_time_keys })
}

View File

@@ -340,7 +340,7 @@ fn set<T>(key: &Key, val: T) -> Result<T>
#[tracing::instrument(
name = "get",
level = "trace"
level = "trace",
skip_all,
fields(?key)
)]
@@ -357,7 +357,7 @@ fn get<T>(key: &Key) -> Result<T>
#[tracing::instrument(
name = "xchg",
level = "trace"
level = "trace",
skip_all,
fields(?key, ?val)
)]

View File

@@ -59,7 +59,7 @@ fn deref(&self) -> &Self::Target { HANDLE.with_borrow_mut(|handle| self.load(han
/// Update the active configuration, returning prior configuration.
#[implement(Manager)]
#[tracing::instrument(skip_all)]
#[tracing::instrument(skip_all, level = "info")]
pub fn update(&self, config: Config) -> Result<Arc<Config>> {
let config = Arc::new(config);
let new = Arc::into_raw(config);

View File

@@ -1819,6 +1819,22 @@ pub struct Config {
#[serde(default = "default_admin_room_tag")]
pub admin_room_tag: String,
/// A list of Matrix IDs that are qualified as server admins.
///
/// Any Matrix IDs within this list are regarded as an admin
/// regardless of whether they are in the admin room or not
///
/// default: []
#[serde(default)]
pub admins_list: Vec<OwnedUserId>,
/// Defines whether those within the admin room are added to the
/// admins_list.
///
/// default: true
#[serde(default = "true_fn")]
pub admins_from_room: bool,
/// Sentry.io crash/panic reporting, performance monitoring/metrics, etc.
/// This is NOT enabled by default.
#[serde(default)]

View File

@@ -532,8 +532,8 @@ pub async fn auth_check<E, F, Fut>(
if sender_power_level < invite_level {
warn!(
%sender,
has=?sender_power_level,
required=?invite_level,
has=%sender_power_level,
required=%invite_level,
"sender cannot send invites in this room"
);
return Ok(false);
@@ -558,12 +558,19 @@ pub async fn auth_check<E, F, Fut>(
// If type is m.room.power_levels
if *incoming_event.event_type() == TimelineEventType::RoomPowerLevels {
debug!("starting m.room.power_levels check");
let mut creators = BTreeSet::new();
if room_version.explicitly_privilege_room_creators {
creators.insert(create_event.sender().to_owned());
for creator in room_create_content.additional_creators.iter().flatten() {
creators.insert(creator.deserialize()?);
}
}
match check_power_levels(
room_version,
incoming_event,
power_levels_event.as_ref(),
sender_power_level,
&creators,
) {
| Some(required_pwr_lvl) =>
if !required_pwr_lvl {
@@ -598,8 +605,8 @@ pub async fn auth_check<E, F, Fut>(
if !check_redaction(room_version, incoming_event, sender_power_level, redact_level)? {
warn!(
%sender,
?sender_power_level,
?redact_level,
%sender_power_level,
%redact_level,
"redaction event was not allowed"
);
return Ok(false);
@@ -765,11 +772,12 @@ struct GetThirdPartyInvite {
power_levels_event.as_ref().is_some(),
) || auth_user_pl >= invite_level;
trace!(
auth_user_pl=?auth_user_pl,
invite_level=?invite_level,
user_joined=?user_joined,
okay_power=?okay_power,
passing=?(user_joined && okay_power),
%auth_user_pl,
%auth_user_pl,
%invite_level,
%user_joined,
%okay_power,
passing=%(user_joined && okay_power),
"user for join auth is valid check details"
);
user_joined && okay_power
@@ -1204,7 +1212,7 @@ fn can_send_event(event: &impl Event, ple: Option<&impl Event>, user_level: Int)
{
warn!(
%user_level,
required=?event_type_power_level,
required=%event_type_power_level,
state_key=?event.state_key(),
sender=%event.sender(),
"state_key starts with @ but does not match sender",
@@ -1221,8 +1229,8 @@ fn check_power_levels(
power_event: &impl Event,
previous_power_event: Option<&impl Event>,
user_level: Int,
creators: &BTreeSet<OwnedUserId>,
) -> Option<bool> {
// TODO(hydra): This function does not care about creators!
match power_event.state_key() {
| Some("") => {},
| Some(key) => {
@@ -1287,6 +1295,10 @@ fn check_power_levels(
for user in user_levels_to_check {
let old_level = old_state.users.get(user);
let new_level = new_state.users.get(user);
if new_level.is_some() && creators.contains(user) {
warn!("creators cannot appear in the users list of m.room.power_levels");
return Some(false); // cannot alter creator power level
}
if old_level.is_some() && new_level.is_some() && old_level == new_level {
continue;
}

View File

@@ -7,7 +7,7 @@
use crate::util::map_err;
#[implement(Engine)]
#[tracing::instrument(skip(self))]
#[tracing::instrument(skip(self), level = "info")]
pub fn backup(&self) -> Result {
let mut engine = self.backup_engine()?;
let config = &self.ctx.server.config;

View File

@@ -4,7 +4,7 @@
#[tracing::instrument(
parent = None,
name = "rocksdb",
level = "trace"
level = "trace",
skip(msg),
)]
pub(crate) fn handle(level: LogLevel, msg: &str) {

View File

@@ -17,7 +17,7 @@
use crate::{Context, or_else};
#[implement(Engine)]
#[tracing::instrument(skip_all)]
#[tracing::instrument(skip_all, level = "info")]
pub(crate) async fn open(ctx: Arc<Context>, desc: &[Descriptor]) -> Result<Arc<Self>> {
let server = &ctx.server;
let config = &server.config;
@@ -63,7 +63,7 @@ pub(crate) async fn open(ctx: Arc<Context>, desc: &[Descriptor]) -> Result<Arc<S
}
#[implement(Engine)]
#[tracing::instrument(name = "configure", skip_all)]
#[tracing::instrument(name = "configure", skip_all, level = "debug")]
fn configure_cfds(
ctx: &Arc<Context>,
db_opts: &Options,
@@ -119,7 +119,7 @@ fn configure_cfds(
}
#[implement(Engine)]
#[tracing::instrument(name = "discover", skip_all)]
#[tracing::instrument(name = "discover", skip_all, level = "debug")]
fn discover_cfs(path: &Path, opts: &Options) -> BTreeSet<String> {
Db::list_cf(opts, path)
.unwrap_or_default()

View File

@@ -26,7 +26,7 @@ pub struct Options {
#[implement(super::Map)]
#[tracing::instrument(
name = "compact",
level = "info"
level = "info",
skip(self),
fields(%self),
)]

View File

@@ -113,7 +113,7 @@ fn drop(&mut self) {
}
#[implement(Pool)]
#[tracing::instrument(skip_all)]
#[tracing::instrument(skip_all, level = "debug")]
pub(crate) fn close(&self) {
let workers = take(&mut *self.workers.lock());
@@ -147,8 +147,8 @@ pub(crate) fn close(&self) {
.map(|result| result.map_err(Error::from_panic))
.enumerate()
.for_each(|(id, result)| match result {
| Ok(()) => trace!(?id, "worker joined"),
| Err(error) => error!(?id, "worker joined with error: {error}"),
| Ok(()) => trace!(%id, "worker joined"),
| Err(error) => error!(%id, "worker joined with error: {error}"),
});
}
@@ -297,7 +297,7 @@ fn worker_init(&self, id: usize) {
}
debug!(
?group,
%group,
affinity = ?affinity.collect::<Vec<_>>(),
"worker ready"
);

View File

@@ -105,8 +105,8 @@ pub(super) fn configure(server: &Arc<Server>) -> (usize, Vec<usize>, Vec<usize>)
.unwrap_or("None"),
?worker_counts,
?queue_sizes,
?total_workers,
stream_width = ?stream::automatic_width(),
%total_workers,
stream_width = %stream::automatic_width(),
"Frontend topology",
);
@@ -139,13 +139,13 @@ fn update_stream_width(server: &Arc<Server>, num_queues: usize, total_workers: u
let (old_width, new_width) = stream::set_width(req_width);
let (old_amp, new_amp) = stream::set_amplification(req_amp);
debug!(
scale = ?config.stream_width_scale,
?num_queues,
?req_width,
?old_width,
?new_width,
?old_amp,
?new_amp,
scale = %config.stream_width_scale,
%num_queues,
%req_width,
%old_width,
%new_width,
%old_amp,
%new_amp,
"Updated global stream width"
);
}

View File

@@ -64,15 +64,15 @@ standard = [
"url_preview",
"zstd_compression",
"sentry_telemetry",
"otlp_telemetry"
"otlp_telemetry",
"console",
]
full = [
"standard",
# "hardened_malloc", # Conflicts with jemalloc
"jemalloc_prof",
"perf_measurements",
"tokio_console"
# sentry_telemetry
"tokio_console",
]
blurhashing = [

View File

@@ -6,6 +6,7 @@
debug_warn, err,
log::{ConsoleFormat, ConsoleWriter, LogLevelReloadHandles, capture, fmt_span},
result::UnwrapOrErr,
warn,
};
#[cfg(feature = "otlp_telemetry")]
use opentelemetry::trace::TracerProvider;
@@ -85,7 +86,7 @@ pub(crate) fn init(
let exporter = match config.otlp_protocol.as_str() {
| "grpc" => opentelemetry_otlp::SpanExporter::builder()
.with_tonic()
.with_protocol(opentelemetry_otlp::Protocol::Grpc)
.with_protocol(opentelemetry_otlp::Protocol::Grpc) // TODO: build from env when 0.32 is released
.build()
.expect("Failed to create OTLP gRPC exporter"),
| "http" => opentelemetry_otlp::SpanExporter::builder()
@@ -93,7 +94,7 @@ pub(crate) fn init(
.build()
.expect("Failed to create OTLP HTTP exporter"),
| protocol => {
debug_warn!(
warn!(
"Invalid OTLP protocol '{}', falling back to HTTP. Valid options are \
'http' or 'grpc'.",
protocol

View File

@@ -50,7 +50,8 @@ pub fn run_with_args(args: &Args) -> Result<()> {
#[tracing::instrument(
name = "main",
parent = None,
skip_all
skip_all,
level = "info"
)]
async fn async_main(server: &Arc<Server>) -> Result<(), Error> {
extern crate conduwuit_router as router;

View File

@@ -6,7 +6,7 @@
use super::server::Server;
#[cfg(unix)]
#[tracing::instrument(skip_all)]
#[tracing::instrument(skip_all, level = "info")]
pub(super) async fn signal(server: Arc<Server>) {
use signal::unix;
use unix::SignalKind;
@@ -39,13 +39,13 @@ pub(super) async fn signal(server: Arc<Server>) {
};
if let Err(e) = result {
debug_error!(?sig, "signal: {e}");
debug_error!(%sig, "signal: {e}");
}
}
}
#[cfg(not(unix))]
#[tracing::instrument(skip_all)]
#[tracing::instrument(skip_all, level = "info")]
pub(super) async fn signal(server: Arc<Server>) {
loop {
tokio::select! {

View File

@@ -66,7 +66,10 @@ pub(crate) fn build(services: &Arc<Services>) -> Result<(Router, Guard)> {
.layer(RequestBodyTimeoutLayer::new(Duration::from_secs(
server.config.client_receive_timeout,
)))
.layer(TimeoutLayer::with_status_code(StatusCode::REQUEST_TIMEOUT, Duration::from_secs(server.config.client_request_timeout)))
.layer(TimeoutLayer::with_status_code(
StatusCode::REQUEST_TIMEOUT,
Duration::from_secs(server.config.client_request_timeout),
))
.layer(SetResponseHeaderLayer::if_not_present(
HeaderName::from_static("origin-agent-cluster"), // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Origin-Agent-Cluster
HeaderValue::from_static("?1"),

View File

@@ -102,13 +102,13 @@ fn handle_result(method: &Method, uri: &Uri, result: Response) -> Result<Respons
let reason = status.canonical_reason().unwrap_or("Unknown Reason");
if status.is_server_error() {
error!(method = ?method, uri = ?uri, "{code} {reason}");
error!(%method, %uri, "{code} {reason}");
} else if status.is_client_error() {
debug_error!(method = ?method, uri = ?uri, "{code} {reason}");
debug_error!(%method, %uri, "{code} {reason}");
} else if status.is_redirection() {
debug!(method = ?method, uri = ?uri, "{code} {reason}");
debug!(%method, %uri, "{code} {reason}");
} else {
trace!(method = ?method, uri = ?uri, "{code} {reason}");
trace!(%method, %uri, "{code} {reason}");
}
if status == StatusCode::METHOD_NOT_ALLOWED {

View File

@@ -19,7 +19,7 @@
use crate::serve;
/// Main loop base
#[tracing::instrument(skip_all)]
#[tracing::instrument(skip_all, level = "info")]
pub(crate) async fn run(services: Arc<Services>) -> Result<()> {
let server = &services.server;
debug!("Start");
@@ -58,7 +58,7 @@ pub(crate) async fn run(services: Arc<Services>) -> Result<()> {
}
/// Async initializations
#[tracing::instrument(skip_all)]
#[tracing::instrument(skip_all, level = "info")]
pub(crate) async fn start(server: Arc<Server>) -> Result<Arc<Services>> {
debug!("Starting...");
@@ -73,7 +73,7 @@ pub(crate) async fn start(server: Arc<Server>) -> Result<Arc<Services>> {
}
/// Async destructions
#[tracing::instrument(skip_all)]
#[tracing::instrument(skip_all, level = "info")]
pub(crate) async fn stop(services: Arc<Services>) -> Result<()> {
debug!("Shutting down...");
@@ -108,7 +108,7 @@ pub(crate) async fn stop(services: Arc<Services>) -> Result<()> {
Ok(())
}
#[tracing::instrument(skip_all)]
#[tracing::instrument(skip_all, level = "info")]
async fn signal(server: Arc<Server>, tx: Sender<()>, handle: axum_server::Handle) {
server
.clone()
@@ -126,7 +126,7 @@ async fn handle_shutdown(server: Arc<Server>, tx: Sender<()>, handle: axum_serve
let timeout = Duration::from_secs(timeout);
debug!(
?timeout,
handle_active = ?server.metrics.requests_handle_active.load(Ordering::Relaxed),
handle_active = %server.metrics.requests_handle_active.load(Ordering::Relaxed),
"Notifying for graceful shutdown"
);

View File

@@ -9,7 +9,10 @@
use termimad::MadSkin;
use tokio::task::JoinHandle;
use crate::{Dep, admin};
use crate::{
Dep,
admin::{self, InvocationSource},
};
pub struct Console {
server: Arc<Server>,
@@ -160,7 +163,11 @@ async fn handle(self: Arc<Self>, line: String) {
}
async fn process(self: Arc<Self>, line: String) {
match self.admin.command_in_place(line, None).await {
match self
.admin
.command_in_place(line, None, InvocationSource::Console)
.await
{
| Ok(Some(ref content)) => self.output(content),
| Err(ref content) => self.output_err(content),
| _ => unreachable!(),

View File

@@ -2,6 +2,8 @@
use ruma::events::room::message::RoomMessageEventContent;
use tokio::time::{Duration, sleep};
use crate::admin::InvocationSource;
pub(super) const SIGNAL: &str = "SIGUSR2";
/// Possibly spawn the terminal console at startup if configured.
@@ -88,7 +90,10 @@ pub(super) async fn signal_execute(&self) -> Result {
async fn execute_command(&self, i: usize, command: String) -> Result {
debug!("Execute command #{i}: executing {command:?}");
match self.command_in_place(command, None).await {
match self
.command_in_place(command, None, InvocationSource::Console)
.await
{
| Ok(Some(output)) => Self::execute_command_output(i, &output),
| Err(output) => Self::execute_command_error(i, &output),
| Ok(None) => {

View File

@@ -1,6 +1,8 @@
use std::collections::BTreeMap;
use conduwuit::{Err, Result, debug_info, debug_warn, error, implement, matrix::pdu::PduBuilder};
use conduwuit::{
Err, Result, debug_info, debug_warn, error, implement, matrix::pdu::PduBuilder, warn,
};
use ruma::{
RoomId, UserId,
events::{
@@ -120,7 +122,7 @@ pub async fn make_user_admin(&self, user_id: &UserId) -> Result {
let room_tag = self.services.server.config.admin_room_tag.as_str();
if !room_tag.is_empty() {
if let Err(e) = self.set_room_tag(&room_id, user_id, room_tag).await {
error!(?room_id, ?user_id, ?room_tag, "Failed to set tag for admin grant: {e}");
error!(%room_id, %user_id, %room_tag, "Failed to set tag for admin grant: {e}");
}
}
@@ -176,6 +178,19 @@ async fn set_room_tag(&self, room_id: &RoomId, user_id: &UserId, tag: &str) -> R
pub async fn revoke_admin(&self, user_id: &UserId) -> Result {
use MembershipState::{Invite, Join, Knock, Leave};
if self
.services
.server
.config
.admins_list
.contains(&user_id.to_owned())
{
warn!(
"Revoking the admin status of {user_id} will not work correctly as they are within \
the admins_list config."
);
}
let Ok(room_id) = self.get_admin_room().await else {
return Err!(error!("No admin room available or created."));
};

View File

@@ -14,10 +14,10 @@
Error, Event, Result, Server, debug, err, error, error::default_log, pdu::PduBuilder,
};
pub use create::create_admin_room;
use futures::{Future, FutureExt, TryFutureExt};
use futures::{Future, FutureExt, StreamExt, TryFutureExt};
use loole::{Receiver, Sender};
use ruma::{
Mxc, OwnedEventId, OwnedMxcUri, OwnedRoomId, RoomId, UInt, UserId,
Mxc, OwnedEventId, OwnedMxcUri, OwnedRoomId, OwnedUserId, RoomId, UInt, UserId,
events::{
Mentions,
room::{
@@ -54,15 +54,37 @@ struct Services {
media: Dep<crate::media::Service>,
}
/// Inputs to a command are a multi-line string, optional reply_id, and optional
/// sender.
/// Inputs to a command are a multi-line string, invocation source, optional
/// reply_id, and optional sender.
#[derive(Debug)]
pub struct CommandInput {
pub command: String,
pub reply_id: Option<OwnedEventId>,
pub source: InvocationSource,
pub sender: Option<Box<UserId>>,
}
/// Where a command is being invoked from.
#[derive(Debug, Clone, Copy)]
pub enum InvocationSource {
/// The server's private admin room
AdminRoom,
/// An escaped `\!admin` command in a public room
EscapedCommand,
/// The server's admin console
Console,
/// Some other trusted internal source
Internal,
}
impl InvocationSource {
/// Returns whether this invocation source allows "restricted"
/// commands, i.e. ones that could be potentially dangerous if executed by
/// an attacker or in a public room.
#[must_use]
pub fn allows_restricted(&self) -> bool { !matches!(self, Self::EscapedCommand) }
}
/// Prototype of the tab-completer. The input is buffered text when tab
/// asserted; the output will fully replace the input buffer.
pub type Completer = fn(&str) -> String;
@@ -276,10 +298,15 @@ pub async fn text_to_file(&self, body: &str) -> Result<OwnedMxcUri> {
/// Posts a command to the command processor queue and returns. Processing
/// will take place on the service worker's task asynchronously. Errors if
/// the queue is full.
pub fn command(&self, command: String, reply_id: Option<OwnedEventId>) -> Result<()> {
pub fn command(
&self,
command: String,
reply_id: Option<OwnedEventId>,
source: InvocationSource,
) -> Result<()> {
self.channel
.0
.send(CommandInput { command, reply_id, sender: None })
.send(CommandInput { command, reply_id, source, sender: None })
.map_err(|e| err!("Failed to enqueue admin command: {e:?}"))
}
@@ -290,11 +317,17 @@ pub fn command_with_sender(
&self,
command: String,
reply_id: Option<OwnedEventId>,
source: InvocationSource,
sender: Box<UserId>,
) -> Result<()> {
self.channel
.0
.send(CommandInput { command, reply_id, sender: Some(sender) })
.send(CommandInput {
command,
reply_id,
source,
sender: Some(sender),
})
.map_err(|e| err!("Failed to enqueue admin command: {e:?}"))
}
@@ -304,8 +337,9 @@ pub async fn command_in_place(
&self,
command: String,
reply_id: Option<OwnedEventId>,
source: InvocationSource,
) -> ProcessorResult {
self.process_command(CommandInput { command, reply_id, sender: None })
self.process_command(CommandInput { command, reply_id, source, sender: None })
.await
}
@@ -349,16 +383,50 @@ async fn process_command(&self, command: CommandInput) -> ProcessorResult {
handle(services, command).await
}
/// Returns the list of admins for this server. First loads
/// the admin_list from the configuration, then adds users from
/// the admin room if applicable.
pub async fn get_admins(&self) -> Vec<OwnedUserId> {
let mut generated_admin_list: Vec<OwnedUserId> =
self.services.server.config.admins_list.clone();
if self.services.server.config.admins_from_room {
if let Ok(admin_room) = self.get_admin_room().await {
let admin_users = self.services.state_cache.room_members(&admin_room);
let mut stream = admin_users;
while let Some(user_id) = stream.next().await {
generated_admin_list.push(user_id.to_owned());
}
}
}
generated_admin_list
}
/// Checks whether a given user is an admin of this server
pub async fn user_is_admin(&self, user_id: &UserId) -> bool {
let Ok(admin_room) = self.get_admin_room().await else {
return false;
};
if self
.services
.server
.config
.admins_list
.contains(&user_id.to_owned())
{
return true;
}
self.services
.state_cache
.is_joined(user_id, &admin_room)
.await
if self.services.server.config.admins_from_room {
if let Ok(admin_room) = self.get_admin_room().await {
return self
.services
.state_cache
.is_joined(user_id, &admin_room)
.await;
}
}
false
}
/// Gets the room ID of the admin room
@@ -459,59 +527,59 @@ async fn handle_response_error(
Ok(())
}
pub async fn is_admin_command<E>(&self, event: &E, body: &str) -> bool
pub async fn is_admin_command<E>(&self, event: &E, body: &str) -> Option<InvocationSource>
where
E: Event + Send + Sync,
{
// Server-side command-escape with public echo
let is_escape = body.starts_with('\\');
let is_public_escape = is_escape && body.trim_start_matches('\\').starts_with("!admin");
// Admin command with public echo (in admin room)
let server_user = &self.services.globals.server_user;
let is_public_prefix =
body.starts_with("!admin") || body.starts_with(server_user.as_str());
// Expected backward branch
if !is_public_escape && !is_public_prefix {
return false;
}
let user_is_local = self.services.globals.user_is_local(event.sender());
// only allow public escaped commands by local admins
if is_public_escape && !user_is_local {
return false;
}
// Check if server-side command-escape is disabled by configuration
if is_public_escape && !self.services.server.config.admin_escape_commands {
return false;
}
// Prevent unescaped !admin from being used outside of the admin room
if event.room_id().is_some()
&& is_public_prefix
&& !self.is_admin_room(event.room_id().unwrap()).await
{
return false;
}
// Only senders who are admin can proceed
// If the user isn't an admin they definitely can't run admin commands
if !self.user_is_admin(event.sender()).await {
return false;
return None;
}
// This will evaluate to false if the emergency password is set up so that
// the administrator can execute commands as the server user
let emergency_password_set = self.services.server.config.emergency_password.is_some();
let from_server = event.sender() == server_user && !emergency_password_set;
if from_server && self.is_admin_room(event.room_id().unwrap()).await {
return false;
}
if let Some(room_id) = event.room_id()
&& self.is_admin_room(room_id).await
{
// This is a message in the admin room
// Authentic admin command
true
// Ignore messages which aren't admin commands
let server_user = &self.services.globals.server_user;
if !(body.starts_with("!admin") || body.starts_with(server_user.as_str())) {
return None;
}
// Ignore messages from the server user _unless_ the emergency password is set
let emergency_password_set = self.services.server.config.emergency_password.is_some();
if event.sender() == server_user && !emergency_password_set {
return None;
}
// Looks good
Some(InvocationSource::AdminRoom)
} else {
// This is a message outside the admin room
// Is it an escaped admin command? i.e. `\!admin --help`
let is_public_escape =
body.starts_with('\\') && body.trim_start_matches('\\').starts_with("!admin");
// Ignore the message if it's not
if !is_public_escape {
return None;
}
// Only admin users belonging to this server can use escaped commands
if !self.services.globals.user_is_local(event.sender()) {
return None;
}
// Check if escaped commands are disabled in the config
if !self.services.server.config.admin_escape_commands {
return None;
}
// Looks good
Some(InvocationSource::EscapedCommand)
}
}
#[must_use]

View File

@@ -96,7 +96,7 @@ async fn worker(self: Arc<Self>) -> Result<()> {
}
if let Err(e) = self.check().await {
warn!(%e, "Failed to check for announcements");
warn!(?e, "Failed to check for announcements");
}
}

View File

@@ -88,7 +88,7 @@ async fn perform<T>(
let url = request.url().clone();
let method = request.method().clone();
debug!(?method, ?url, "Sending request");
debug!(%method, %url, "Sending request");
match client.execute(request).await {
| Ok(response) => handle_response::<T>(dest, actual, &method, &url, response).await,
| Err(error) =>
@@ -144,9 +144,9 @@ async fn into_http_response(
) -> Result<http::Response<Bytes>> {
let status = response.status();
trace!(
?status, ?method,
request_url = ?url,
response_url = ?response.url(),
%status, %method,
request_url = %url,
response_url = %response.url(),
"Received response from {}",
actual.string(),
);
@@ -196,9 +196,9 @@ fn handle_error(
debug_warn!("{e:?}");
} else if e.is_redirect() {
debug_error!(
method = ?method,
url = ?url,
final_url = ?e.url(),
%method,
%url,
final_url = e.url().map(tracing::field::display),
"Redirect loop {}: {}",
actual.host,
e,

View File

@@ -124,16 +124,16 @@ pub(super) async fn search_file_metadata(
.next()
.map(string_from_bytes)
.transpose()
.map_err(|e| err!(Database(error!(?mxc, "Content-type is invalid: {e}"))))?;
.map_err(|e| err!(Database(error!(%mxc, "Content-type is invalid: {e}"))))?;
let content_disposition = parts
.next()
.map(Some)
.ok_or_else(|| err!(Database(error!(?mxc, "Media ID in db is invalid."))))?
.ok_or_else(|| err!(Database(error!(%mxc, "Media ID in db is invalid."))))?
.filter(|bytes| !bytes.is_empty())
.map(string_from_bytes)
.transpose()
.map_err(|e| err!(Database(error!(?mxc, "Content-type is invalid: {e}"))))?
.map_err(|e| err!(Database(error!(%mxc, "Content-type is invalid: {e}"))))?
.as_deref()
.map(str::parse)
.transpose()?;

View File

@@ -118,14 +118,14 @@ pub async fn delete(&self, mxc: &Mxc<'_>) -> Result<()> {
match self.db.search_mxc_metadata_prefix(mxc).await {
| Ok(keys) => {
for key in keys {
trace!(?mxc, "MXC Key: {key:?}");
debug_info!(?mxc, "Deleting from filesystem");
trace!(%mxc, "MXC Key: {key:?}");
debug_info!(%mxc, "Deleting from filesystem");
if let Err(e) = self.remove_media_file(&key).await {
debug_error!(?mxc, "Failed to remove media file: {e}");
debug_error!(%mxc, "Failed to remove media file: {e}");
}
debug_info!(?mxc, "Deleting from database");
debug_info!(%mxc, "Deleting from database");
self.db.delete_file_mxc(mxc).await;
}
@@ -148,7 +148,7 @@ pub async fn delete_from_user(&self, user: &UserId) -> Result<usize> {
for mxc in mxcs {
let Ok(mxc) = mxc.as_str().try_into().inspect_err(|e| {
debug_error!(?mxc, "Failed to parse MXC URI from database: {e}");
debug_error!(%mxc, "Failed to parse MXC URI from database: {e}");
}) else {
continue;
};
@@ -210,7 +210,7 @@ pub async fn get_all_mxcs(&self) -> Result<Vec<OwnedMxcUri>> {
let Some(mxc_s) = mxc else {
debug_warn!(
?mxc,
mxc,
"Parsed MXC URL unicode bytes from database but is still invalid"
);
continue;
@@ -256,7 +256,7 @@ pub async fn delete_all_media_within_timeframe(
let Some(mxc_s) = mxc else {
debug_warn!(
?mxc,
mxc,
"Parsed MXC URL unicode bytes from database but is still invalid"
);
continue;

View File

@@ -71,10 +71,10 @@ async fn request_url_preview(&self, url: &Url) -> Result<UrlPreviewData> {
let client = &self.services.client.url_preview;
let response = client.head(url.as_str()).send().await?;
debug!(?url, "URL preview response headers: {:?}", response.headers());
debug!(%url, "URL preview response headers: {:?}", response.headers());
if let Some(remote_addr) = response.remote_addr() {
debug!(?url, "URL preview response remote address: {:?}", remote_addr);
debug!(%url, "URL preview response remote address: {:?}", remote_addr);
if let Ok(ip) = IPAddress::parse(remote_addr.ip().to_string()) {
if !self.services.client.valid_cidr_range(&ip) {

View File

@@ -247,7 +247,7 @@ async fn handle_location(
) -> Result<FileMeta> {
self.location_request(location).await.map_err(|error| {
err!(Request(NotFound(
debug_warn!(%mxc, ?user, ?location, ?error, "Fetching media from location failed")
debug_warn!(%mxc, user = user.map(tracing::field::display), ?location, ?error, "Fetching media from location failed")
)))
})
}
@@ -320,7 +320,7 @@ fn handle_federation_error(
) -> Error {
let fallback = || {
err!(Request(NotFound(
debug_error!(%mxc, ?user, ?server, ?error, "Remote media not found")
debug_error!(%mxc, user = user.map(tracing::field::display), server = server.map(tracing::field::display), ?error, "Remote media not found")
)))
};

View File

@@ -120,7 +120,7 @@ async fn get_thumbnail_generate(
let mut cursor = std::io::Cursor::new(&mut thumbnail_bytes);
thumbnail
.write_to(&mut cursor, image::ImageFormat::Png)
.map_err(|error| err!(error!(?error, "Error writing PNG thumbnail.")))?;
.map_err(|error| err!(error!(%error, "Error writing PNG thumbnail.")))?;
// Save thumbnail in database so we don't have to generate it again next time
let thumbnail_key = self.db.create_file_metadata(

View File

@@ -677,7 +677,7 @@ async fn populate_userroomid_leftstate_table(services: &Services) -> Result {
shortstatehash_cache.insert(room_id.to_owned(), shortstatehash);
shortstatehash
} else {
warn!(?room_id, ?user_id, "room has no shortstatehash");
warn!(%room_id, %user_id, "room has no shortstatehash");
return Ok((total, fixed, shortstatehash_cache));
};
@@ -698,8 +698,8 @@ async fn populate_userroomid_leftstate_table(services: &Services) -> Result {
},
| Err(_) => {
warn!(
?room_id,
?user_id,
%room_id,
%user_id,
"room cached as left has no leave event for user, removing \
cache entry"
);

View File

@@ -198,11 +198,11 @@ pub async fn unset_all_presence(&self) {
presence.presence,
PresenceState::Unavailable | PresenceState::Online | PresenceState::Busy
) {
trace!(?user_id, ?presence, "Skipping user");
trace!(%user_id, ?presence, "Skipping user");
continue;
}
trace!(?user_id, ?presence, "Resetting presence to offline");
trace!(%user_id, ?presence, "Resetting presence to offline");
_ = self
.set_presence(

View File

@@ -427,20 +427,20 @@ async fn send_notice<E>(
}
let d = vec![device];
let mut notifi = Notification::new(d);
let mut notify = Notification::new(d);
notifi.event_id = Some(event.event_id().to_owned());
notifi.room_id = Some(event.room_id().unwrap().to_owned());
notify.event_id = Some(event.event_id().to_owned());
notify.room_id = Some(event.room_id().unwrap().to_owned());
if http
.data
.get("org.matrix.msc4076.disable_badge_count")
.is_none() && http.data.get("disable_badge_count").is_none()
{
notifi.counts = NotificationCounts::new(unread, uint!(0));
notify.counts = NotificationCounts::new(unread, uint!(0));
} else {
// counts will not be serialised if it's the default (0, 0)
// skip_serializing_if = "NotificationCounts::is_default"
notifi.counts = NotificationCounts::default();
notify.counts = NotificationCounts::default();
}
if !event_id_only {
@@ -449,30 +449,30 @@ async fn send_notice<E>(
.iter()
.any(|t| matches!(t, Tweak::Highlight(true) | Tweak::Sound(_)))
{
notifi.prio = NotificationPriority::High;
notify.prio = NotificationPriority::High;
} else {
notifi.prio = NotificationPriority::Low;
notify.prio = NotificationPriority::Low;
}
notifi.sender = Some(event.sender().to_owned());
notifi.event_type = Some(event.kind().to_owned());
notifi.content = serde_json::value::to_raw_value(event.content()).ok();
notify.sender = Some(event.sender().to_owned());
notify.event_type = Some(event.kind().to_owned());
notify.content = serde_json::value::to_raw_value(event.content()).ok();
if *event.kind() == TimelineEventType::RoomMember {
notifi.user_is_target =
notify.user_is_target =
event.state_key() == Some(event.sender().as_str());
}
notifi.sender_display_name =
notify.sender_display_name =
self.services.users.displayname(event.sender()).await.ok();
notifi.room_name = self
notify.room_name = self
.services
.state_accessor
.get_name(event.room_id().unwrap())
.await
.ok();
notifi.room_alias = self
notify.room_alias = self
.services
.state_accessor
.get_canonical_alias(event.room_id().unwrap())
@@ -480,7 +480,7 @@ async fn send_notice<E>(
.ok();
}
self.send_request(&http.url, send_event_notification::v1::Request::new(notifi))
self.send_request(&http.url, send_event_notification::v1::Request::new(notify))
.await?;
Ok(())

View File

@@ -151,7 +151,7 @@ async fn get_auth_chain_outer(
let auth_chain = self.get_auth_chain_inner(room_id, event_id).await?;
self.cache_auth_chain_vec(vec![shortid], auth_chain.as_slice());
debug!(
?event_id,
%event_id,
elapsed = ?started.elapsed(),
"Cache missed event"
);
@@ -188,18 +188,18 @@ async fn get_auth_chain_inner(
let mut found = HashSet::new();
while let Some(event_id) = todo.pop_front() {
trace!(?event_id, "processing auth event");
trace!(%event_id, "processing auth event");
match self.services.timeline.get_pdu(&event_id).await {
| Err(e) => {
debug_error!(?event_id, ?e, "Could not find pdu mentioned in auth events");
debug_error!(%event_id, ?e, "Could not find pdu mentioned in auth events");
},
| Ok(pdu) => {
if let Some(claimed_room_id) = pdu.room_id.clone() {
if claimed_room_id != *room_id {
return Err!(Request(Forbidden(error!(
?event_id,
?room_id,
%event_id,
%room_id,
wrong_room_id = ?pdu.room_id.unwrap(),
"auth event for incorrect room"
))));
@@ -214,7 +214,7 @@ async fn get_auth_chain_inner(
.await;
if found.insert(sauthevent) {
trace!(?event_id, ?auth_event, "adding auth event to processing queue");
trace!(%event_id, ?auth_event, "adding auth event to processing queue");
todo.push_back(auth_event.clone());
}

View File

@@ -104,9 +104,9 @@ fn check_room_id<Pdu: Event>(room_id: &RoomId, pdu: &Pdu) -> Result {
.is_some_and(|claimed_room_id| claimed_room_id != room_id)
{
return Err!(Request(InvalidParam(error!(
pdu_event_id = ?pdu.event_id(),
pdu_room_id = ?pdu.room_id(),
?room_id,
pdu_event_id = %pdu.event_id(),
pdu_room_id = pdu.room_id().map(tracing::field::display),
%room_id,
"Found event from room in room",
))));
}

View File

@@ -31,7 +31,7 @@
/// contacted for whatever reason, Err(e) is returned, which generally is a
/// fail-open operation.
#[implement(super::Service)]
#[tracing::instrument(skip(self, pdu, pdu_json, room_id))]
#[tracing::instrument(skip(self, pdu, pdu_json, room_id), level = "info")]
pub async fn ask_policy_server(
&self,
pdu: &PduEvent,
@@ -184,7 +184,7 @@ pub async fn ask_policy_server(
/// Asks a remote policy server for a signature on this event.
/// If the policy server signs this event, the original data is mutated.
#[implement(super::Service)]
#[tracing::instrument(skip_all, fields(event_id=%pdu.event_id(), via=%via))]
#[tracing::instrument(skip_all, fields(event_id=%pdu.event_id(), via=%via), level = "info")]
pub async fn fetch_policy_server_signature(
&self,
pdu: &PduEvent,

View File

@@ -335,10 +335,11 @@ pub async fn append_pdu<'a, Leaves>(
if let Some(body) = content.body {
self.services.search.index_pdu(shortroomid, &pdu_id, &body);
if self.services.admin.is_admin_command(pdu, &body).await {
if let Some(source) = self.services.admin.is_admin_command(pdu, &body).await {
self.services.admin.command_with_sender(
body,
Some((pdu.event_id()).into()),
source,
pdu.sender.clone().into(),
)?;
}

View File

@@ -28,7 +28,7 @@ pub async fn redact_pdu<Pdu: Event + Send + Sync>(
.await
.map(Event::into_pdu)
.map_err(|e| {
err!(Database(error!(?pdu_id, ?event_id, ?e, "PDU ID points to invalid PDU.")))
err!(Database(error!(?pdu_id, %event_id, ?e, "PDU ID points to invalid PDU.")))
})?;
if let Ok(content) = pdu.get_content::<ExtractBody>() {
@@ -48,7 +48,7 @@ pub async fn redact_pdu<Pdu: Event + Send + Sync>(
pdu.redact(&room_version_id, reason.to_value())?;
let obj = utils::to_canonical_object(&pdu).map_err(|e| {
err!(Database(error!(?event_id, ?e, "Failed to convert PDU to canonical JSON")))
err!(Database(error!(%event_id, ?e, "Failed to convert PDU to canonical JSON")))
})?;
self.replace_pdu(&pdu_id, &obj).await

View File

@@ -535,12 +535,12 @@ async fn select_edus_receipts_room(
}
let Ok(event) = serde_json::from_str(read_receipt.json().get()) else {
error!(?user_id, ?count, ?read_receipt, "Invalid edu event in read_receipts.");
error!(%user_id, %count, ?read_receipt, "Invalid edu event in read_receipts.");
continue;
};
let AnySyncEphemeralRoomEvent::Receipt(r) = event else {
error!(?user_id, ?count, ?event, "Invalid event type in read_receipts");
error!(%user_id, %count, ?event, "Invalid event type in read_receipts");
continue;
};
@@ -755,7 +755,7 @@ async fn send_events_dest_push(
let Ok(pusher) = self.services.pusher.get_pusher(&user_id, &pushkey).await else {
return Err((
Destination::Push(user_id.clone(), pushkey.clone()),
err!(Database(error!(?user_id, ?pushkey, "Missing pusher"))),
err!(Database(error!(%user_id, ?pushkey, "Missing pusher"))),
));
};

View File

@@ -118,7 +118,7 @@ pub async fn acquire_pubkeys<'a, S, K>(&self, batch: S)
}
for (server, key_ids) in missing {
debug_warn!(?server, ?key_ids, "missing");
debug_warn!(%server, ?key_ids, "missing");
}
}
@@ -174,8 +174,8 @@ async fn acquire_origin(
timeout: Instant,
) -> (OwnedServerName, Vec<OwnedServerSigningKeyId>) {
match timeout_at(timeout, self.server_request(&origin)).await {
| Err(e) => debug_warn!(?origin, "timed out: {e}"),
| Ok(Err(e)) => debug_error!(?origin, "{e}"),
| Err(e) => debug_warn!(%origin, "timed out: {e}"),
| Ok(Err(e)) => debug_error!(%origin, "{e}"),
| Ok(Ok(server_keys)) => {
trace!(
%origin,

View File

@@ -63,7 +63,7 @@ pub async fn get_pubkeys_for<'a, I>(&self, origin: &ServerName, key_ids: I) -> P
}
#[implement(super::Service)]
#[tracing::instrument(skip(self))]
#[tracing::instrument(skip(self), level = "debug")]
pub async fn get_verify_key(
&self,
origin: &ServerName,
@@ -96,8 +96,8 @@ pub async fn get_verify_key(
}
Err!(BadServerResponse(debug_error!(
?key_id,
?origin,
%key_id,
%origin,
"Failed to fetch federation signing-key"
)))
}

View File

@@ -112,7 +112,7 @@ async fn add_signing_keys(&self, new_keys: ServerSigningKeys) {
}
#[implement(Service)]
#[tracing::instrument(skip(self, object))]
#[tracing::instrument(skip(self, object), level = "debug")]
pub async fn required_keys_exist(
&self,
object: &CanonicalJsonObject,
@@ -135,7 +135,7 @@ pub async fn required_keys_exist(
}
#[implement(Service)]
#[tracing::instrument(skip(self))]
#[tracing::instrument(skip(self), level = "debug")]
pub async fn verify_key_exists(&self, origin: &ServerName, key_id: &ServerSigningKeyId) -> bool {
type KeysMap<'a> = BTreeMap<&'a ServerSigningKeyId, &'a RawJsonValue>;

View File

@@ -494,8 +494,8 @@ pub async fn set_token(
let key = (user_id, device_id);
if self.db.userdeviceid_metadata.qry(&key).await.is_err() {
return Err!(Database(error!(
?user_id,
?device_id,
%user_id,
%device_id,
"User does not exist or device has no metadata."
)));
}
@@ -539,8 +539,8 @@ pub async fn add_one_time_key(
let key = (user_id, device_id);
if self.db.userdeviceid_metadata.qry(&key).await.is_err() {
return Err!(Database(error!(
?user_id,
?device_id,
%user_id,
%device_id,
"User does not exist or device has no metadata."
)));
}
@@ -1153,7 +1153,7 @@ pub async fn find_from_login_token(&self, token: &str) -> Result<OwnedUserId> {
let (expires_at, user_id): (u64, OwnedUserId) = value.deserialized()?;
if expires_at < utils::millis_since_unix_epoch() {
trace!(?user_id, ?token, "Removing expired login token");
trace!(%user_id, ?token, "Removing expired login token");
self.db.logintoken_expiresatuserid.remove(token);
@@ -1231,7 +1231,7 @@ pub async fn search_ldap(&self, user_id: &UserId) -> Result<Vec<(String, bool)>>
debug!(?uri, "LDAP creating connection...");
let (conn, mut ldap) = LdapConnAsync::new(uri.as_str())
.await
.map_err(|e| err!(Ldap(error!(?user_id, "LDAP connection setup error: {e}"))))?;
.map_err(|e| err!(Ldap(error!(%user_id, "LDAP connection setup error: {e}"))))?;
let driver = self.services.server.runtime().spawn(async move {
match conn.drive().await {