Compare commits

..

2 Commits

Author SHA1 Message Date
Jade Ellis
9d0c89bd04 fix(v12): Create tombstone event on room upgrade 2025-09-24 18:22:16 +00:00
nexy7574
965db4aa43 fix: V12 room upgrades 2025-09-24 18:22:16 +00:00
40 changed files with 880 additions and 985 deletions

View File

@@ -32,7 +32,7 @@ jobs:
echo "Debian distribution: $DISTRIBUTION ($VERSION)"
- name: Checkout repository with full history
uses: https://code.forgejo.org/actions/checkout@v5
uses: https://code.forgejo.org/actions/checkout@v4
with:
fetch-depth: 0
@@ -132,7 +132,7 @@ jobs:
path: ${{ steps.cargo-deb.outputs.path }}
- name: Publish to Forgejo package registry
if: ${{ forge.event_name == 'push' || forge.event_name == 'workflow_dispatch' || forge.event_name == 'schedule' }}
if: ${{ forge.event_name == 'push' || forge.event_name == 'workflow_dispatch' }}
run: |
OWNER="continuwuation"
DISTRIBUTION=${{ steps.debian-version.outputs.distribution }}

View File

@@ -30,7 +30,7 @@ jobs:
echo "Fedora version: $VERSION"
- name: Checkout repository with full history
uses: https://code.forgejo.org/actions/checkout@v5
uses: https://code.forgejo.org/actions/checkout@v4
with:
fetch-depth: 0
@@ -250,7 +250,7 @@ jobs:
path: artifacts/*debuginfo*.rpm
- name: Publish to RPM Package Registry
if: ${{ github.event_name == 'push' || github.event_name == 'workflow_dispatch' || github.event_name == 'schedule' }}
if: ${{ github.event_name == 'push' || github.event_name == 'workflow_dispatch' }}
run: |
# Find the main binary RPM (exclude debug and source RPMs)
RPM=$(find artifacts -name "continuwuity-*.rpm" \

View File

@@ -55,7 +55,7 @@ jobs:
- name: Setup Node.js
if: steps.runner-env.outputs.node_major == '' || steps.runner-env.outputs.node_major < '20'
uses: https://github.com/actions/setup-node@v6
uses: https://github.com/actions/setup-node@v5
with:
node-version: 22

View File

@@ -24,7 +24,7 @@ jobs:
steps:
- name: 📦 Setup Node.js
uses: https://github.com/actions/setup-node@v6
uses: https://github.com/actions/setup-node@v5
with:
node-version: "22"

View File

@@ -43,7 +43,7 @@ jobs:
name: Renovate
runs-on: ubuntu-latest
container:
image: ghcr.io/renovatebot/renovate:41.146.4@sha256:bb70194b7405faf10a6f279b60caa10403a440ba37d158c5a4ef0ae7b67a0f92
image: ghcr.io/renovatebot/renovate:41.127.2@sha256:66bc84e2f889025fbb3c9df863500dcc18bc64ac85bcf629d015064377d77f31
options: --tmpfs /tmp:exec
steps:
- name: Checkout

View File

@@ -7,7 +7,6 @@ on:
- "Cargo.lock"
- "Cargo.toml"
- "rust-toolchain.toml"
- ".forgejo/workflows/update-flake-hashes.yml"
jobs:
update-flake-hashes:
@@ -15,13 +14,13 @@ jobs:
steps:
- uses: https://code.forgejo.org/actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5
with:
fetch-depth: 0
fetch-depth: 1
fetch-tags: false
fetch-single-branch: true
submodules: false
persist-credentials: false
- uses: https://github.com/cachix/install-nix-action@7ab6e7fd29da88e74b1e314a4ae9ac6b5cda3801 # v31.8.0
- uses: https://github.com/cachix/install-nix-action@a809471b5c7c913aa67bec8f459a11a0decc3fce # v31.6.2
with:
nix_path: nixpkgs=channel:nixos-unstable

1028
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -351,7 +351,7 @@ version = "0.1.2"
# Used for matrix spec type definitions and helpers
[workspace.dependencies.ruma]
git = "https://forgejo.ellis.link/continuwuation/ruwuma"
rev = "50b2a91b2ab8f9830eea80b9911e11234e0eac66"
rev = "d18823471ab3c09e77ff03eea346d4c07e572654"
features = [
"compat",
"rand",
@@ -551,9 +551,9 @@ features = ["std"]
version = "1.0.2"
[workspace.dependencies.ldap3]
version = "0.12.0"
version = "0.11.5"
default-features = false
features = ["sync", "tls-rustls", "rustls-provider"]
features = ["sync", "tls-rustls"]
[workspace.dependencies.resolv-conf]
version = "0.7.5"

View File

@@ -957,21 +957,6 @@
#
#rocksdb_bottommost_compression = true
# Compression algorithm for RocksDB's Write-Ahead-Log (WAL).
#
# At present, only ZSTD compression is supported by RocksDB for WAL
# compression. Enabling this can reduce WAL size at the expense of some
# CPU usage during writes.
#
# The options are:
# - "none" = No compression
# - "zstd" = ZSTD compression
#
# For more information on WAL compression, see:
# https://github.com/facebook/rocksdb/wiki/WAL-Compression
#
#rocksdb_wal_compression = "zstd"
# Database recovery mode (for RocksDB WAL corruption).
#
# Use this option when the server reports corruption and refuses to start.
@@ -1512,19 +1497,6 @@
#
#block_non_admin_invites = false
# Enable or disable making requests to MSC4284 Policy Servers.
# It is recommended you keep this enabled unless you experience frequent
# connectivity issues, such as in a restricted networking environment.
#
#enable_msc4284_policy_servers = true
# Enable running locally generated events through configured MSC4284
# policy servers. You may wish to disable this if your server is
# single-user for a slight speed benefit in some rooms, but otherwise
# should leave it enabled.
#
#policy_server_check_own_events = true
# Allow admins to enter commands in rooms other than "#admins" (admin
# room) by prefixing your message with "\!admin" or "\\!admin" followed up
# a normal continuwuity admin command. The reply will be publicly visible

View File

@@ -48,7 +48,7 @@ EOF
# Developer tool versions
# renovate: datasource=github-releases depName=cargo-bins/cargo-binstall
ENV BINSTALL_VERSION=1.15.7
ENV BINSTALL_VERSION=1.15.5
# renovate: datasource=github-releases depName=psastras/sbom-rs
ENV CARGO_SBOM_VERSION=0.9.1
# renovate: datasource=crate depName=lddtree

View File

@@ -18,7 +18,7 @@ RUN --mount=type=cache,target=/etc/apk/cache apk add \
# Developer tool versions
# renovate: datasource=github-releases depName=cargo-bins/cargo-binstall
ENV BINSTALL_VERSION=1.15.7
ENV BINSTALL_VERSION=1.15.5
# renovate: datasource=github-releases depName=psastras/sbom-rs
ENV CARGO_SBOM_VERSION=0.9.1
# renovate: datasource=crate depName=lddtree

30
flake.lock generated
View File

@@ -10,11 +10,11 @@
"nixpkgs-stable": "nixpkgs-stable"
},
"locked": {
"lastModified": 1758711588,
"narHash": "sha256-0nZlCCDC5PfndsQJXXtcyrtrfW49I3KadGMDlutzaGU=",
"lastModified": 1757683818,
"narHash": "sha256-q7q0pWT+wu5AUU1Qlbwq8Mqb+AzHKhaMCVUq/HNZfo8=",
"owner": "zhaofengli",
"repo": "attic",
"rev": "12cbeca141f46e1ade76728bce8adc447f2166c6",
"rev": "7c5d79ad62cda340cb8c80c99b921b7b7ffacf69",
"type": "github"
},
"original": {
@@ -99,11 +99,11 @@
},
"crane_2": {
"locked": {
"lastModified": 1759893430,
"narHash": "sha256-yAy4otLYm9iZ+NtQwTMEbqHwswSFUbhn7x826RR6djw=",
"lastModified": 1757183466,
"narHash": "sha256-kTdCCMuRE+/HNHES5JYsbRHmgtr+l9mOtf5dpcMppVc=",
"owner": "ipetkov",
"repo": "crane",
"rev": "1979a2524cb8c801520bd94c38bb3d5692419d93",
"rev": "d599ae4847e7f87603e7082d73ca673aa93c916d",
"type": "github"
},
"original": {
@@ -152,11 +152,11 @@
"rust-analyzer-src": "rust-analyzer-src"
},
"locked": {
"lastModified": 1760510549,
"narHash": "sha256-NP+kmLMm7zSyv4Fufv+eSJXyqjLMUhUfPT6lXRlg/bU=",
"lastModified": 1758004879,
"narHash": "sha256-kV7tQzcNbmo58wg2uE2MQ/etaTx+PxBMHeNrLP8vOgk=",
"owner": "nix-community",
"repo": "fenix",
"rev": "ef7178cf086f267113b5c48fdeb6e510729c8214",
"rev": "07e5ce53dd020e6b337fdddc934561bee0698fa2",
"type": "github"
},
"original": {
@@ -455,11 +455,11 @@
},
"nixpkgs_3": {
"locked": {
"lastModified": 1760504863,
"narHash": "sha256-h13YFQMi91nXkkRoJMIfezorz5SbD6849jw5L0fjK4I=",
"lastModified": 1758029226,
"narHash": "sha256-TjqVmbpoCqWywY9xIZLTf6ANFvDCXdctCjoYuYPYdMI=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "82c2e0d6dde50b17ae366d2aa36f224dc19af469",
"rev": "08b8f92ac6354983f5382124fef6006cade4a1c1",
"type": "github"
},
"original": {
@@ -484,11 +484,11 @@
"rust-analyzer-src": {
"flake": false,
"locked": {
"lastModified": 1760457219,
"narHash": "sha256-WJOUGx42hrhmvvYcGkwea+BcJuQJLcns849OnewQqX4=",
"lastModified": 1757362324,
"narHash": "sha256-/PAhxheUq4WBrW5i/JHzcCqK5fGWwLKdH6/Lu1tyS18=",
"owner": "rust-lang",
"repo": "rust-analyzer",
"rev": "8747cf81540bd1bbbab9ee2702f12c33aa887b46",
"rev": "9edc9cbe5d8e832b5864e09854fa94861697d2fd",
"type": "github"
},
"original": {

View File

@@ -12,14 +12,13 @@ Group=conduwuit
Type=notify-reload
ReloadSignal=SIGUSR1
Environment="CONTINUWUITY_CONFIG=/etc/conduwuit/conduwuit.toml"
Environment="CONTINUWUITY_LOG_TO_JOURNALD=true"
Environment="CONTINUWUITY_JOURNALD_IDENTIFIER=%N"
Environment="CONTINUWUITY_DATABASE_PATH=%S/conduwuit"
Environment="CONTINUWUITY_CONFIG_RELOAD_SIGNAL=true"
Environment="CONTINUWUITY_DATABASE_PATH=/var/lib/conduwuit"
LoadCredential=conduwuit.toml:/etc/conduwuit/conduwuit.toml
ExecStart=/usr/bin/conduwuit --config ${CREDENTIALS_DIRECTORY}/conduwuit.toml
ExecStart=/usr/bin/conduwuit
AmbientCapabilities=
CapabilityBoundingSet=
@@ -53,9 +52,8 @@ SystemCallFilter=@system-service @resources
SystemCallFilter=~@clock @debug @module @mount @reboot @swap @cpu-emulation @obsolete @timer @chown @setuid @privileged @keyring @ipc
SystemCallErrorNumber=EPERM
# ConfigurationDirectory isn't specified here because it's created by
# the distro's package manager.
StateDirectory=conduwuit
ConfigurationDirectory=conduwuit
RuntimeDirectory=conduwuit
RuntimeDirectoryMode=0750

View File

@@ -51,7 +51,7 @@ find .cargo/registry/ -executable -name "*.rs" -exec chmod -x {} +
%install
install -Dpm0755 target/rpm/conduwuit -t %{buildroot}%{_bindir}
install -Dpm0644 pkg/conduwuit.service -t %{buildroot}%{_unitdir}
install -Dpm0600 conduwuit-example.toml %{buildroot}%{_sysconfdir}/conduwuit/conduwuit.toml
install -Dpm0644 conduwuit-example.toml %{buildroot}%{_sysconfdir}/conduwuit/conduwuit.toml
%files
%license LICENSE
@@ -60,7 +60,7 @@ install -Dpm0600 conduwuit-example.toml %{buildroot}%{_sysconfdir}/conduwuit/con
%doc CONTRIBUTING.md
%doc README.md
%doc SECURITY.md
%config(noreplace) %{_sysconfdir}/conduwuit/conduwuit.toml
%config %{_sysconfdir}/conduwuit/conduwuit.toml
%{_bindir}/conduwuit
%{_unitdir}/conduwuit.service

View File

@@ -64,8 +64,12 @@
"matchDatasources": ["docker"],
"matchPackageNames": ["ghcr.io/renovatebot/renovate"],
"automerge": true,
"automergeStrategy": "fast-forward",
"extends": ["schedule:earlyMondays"]
"automergeStrategy": "fast-forward"
},
{
"description": "Group lockfile updates into a single PR",
"matchUpdateTypes": ["lockFileMaintenance"],
"groupName": "lockfile-maintenance"
}
],
"customManagers": [
@@ -77,7 +81,7 @@
"/(^|/|\\.)([Dd]ocker|[Cc]ontainer)file$/"
],
"matchStrings": [
"# renovate: datasource=(?<datasource>[a-zA-Z0-9-._]+?) depName=(?<depName>[^\\s]+?)(?: (lookupName|packageName)=(?<packageName>[^\\s]+?))?(?: versioning=(?<versioning>[^\\s]+?))?(?: extractVersion=(?<extractVersion>[^\\s]+?))?(?: registryUrl=(?<registryUrl>[^\\s]+?))?\\s+(?:ENV\\s+|ARG\\s+)?[A-Za-z0-9_]+?_VERSION[ =][\"']?(?<currentValue>.+?)[\"']?\\s+(?:(?:ENV\\s+|ARG\\s+)?[A-Za-z0-9_]+?_CHECKSUM[ =][\"']?(?<currentDigest>.+?)[\"']?\\s)?"
"# renovate: datasource=(?<datasource>[a-z-.]+?) depName=(?<depName>[^\\s]+?)(?: (lookupName|packageName)=(?<packageName>[^\\s]+?))?(?: versioning=(?<versioning>[^\\s]+?))?(?: extractVersion=(?<extractVersion>[^\\s]+?))?(?: registryUrl=(?<registryUrl>[^\\s]+?))?\\s+(?:ENV|ARG)\\s+[A-Za-z0-9_]+?_VERSION[ =][\"']?(?<currentValue>.+?)[\"']?\\s"
]
}
]

View File

@@ -64,14 +64,10 @@ pub(crate) async fn create_content_route(
media_id: &utils::random_string(MXC_LENGTH),
};
if let Err(e) = services
services
.media
.create(mxc, Some(user), Some(&content_disposition), content_type, &body.file)
.await
{
err!("Failed to save uploaded media: {e}");
return Err!(Request(Unknown("Failed to save uploaded media")));
}
.await?;
let blurhash = body.generate_blurhash.then(|| {
services

View File

@@ -97,12 +97,11 @@ pub(crate) async fn upgrade_room_route(
// Create a replacement room
let room_features = RoomVersion::new(&body.new_version)?;
let replacement_room_owned = if !room_features.room_ids_as_hashes {
Some(RoomId::new(services.globals.server_name()))
} else {
let replacement_room: Option<&RoomId> = if room_features.room_ids_as_hashes {
None
} else {
Some(&RoomId::new(services.globals.server_name()))
};
let replacement_room: Option<&RoomId> = replacement_room_owned.as_ref().map(AsRef::as_ref);
let replacement_room_tmp = match replacement_room {
| Some(v) => v,
| None => &RoomId::new(services.globals.server_name()),

View File

@@ -320,7 +320,6 @@ async fn handle_lists<'a, Rooms, AllRooms>(
for mut range in ranges {
range.0 = uint!(0);
range.1 = range.1.checked_add(uint!(1)).unwrap_or(range.1);
range.1 = range
.1
.clamp(range.0, UInt::try_from(active_rooms.len()).unwrap_or(UInt::MAX));

View File

@@ -226,7 +226,6 @@ pub fn build(router: Router<State>, server: &Server) -> Router<State> {
.ruma_route(&server::well_known_server)
.ruma_route(&server::get_content_route)
.ruma_route(&server::get_content_thumbnail_route)
.ruma_route(&server::get_edutypes_route)
.route("/_conduwuit/local_user_count", get(client::conduwuit_local_user_count))
.route("/_continuwuity/local_user_count", get(client::conduwuit_local_user_count));
} else {

View File

@@ -34,19 +34,6 @@ pub(super) async fn from(
let max_body_size = services.server.config.max_request_size;
// Check if the Content-Length header is present and valid, saves us streaming
// the response into memory
if let Some(content_length) = parts.headers.get(http::header::CONTENT_LENGTH) {
if let Ok(content_length) = content_length
.to_str()
.map(|s| s.parse::<usize>().unwrap_or_default())
{
if content_length > max_body_size {
return Err(err!(Request(TooLarge("Request body too large"))));
}
}
}
let body = axum::body::to_bytes(body, max_body_size)
.await
.map_err(|e| err!(Request(TooLarge("Request body too large: {e}"))))?;

View File

@@ -1,19 +0,0 @@
use axum::extract::State;
use conduwuit::Result;
use ruma::api::federation::edutypes::get_edutypes;
use crate::Ruma;
/// # `GET /_matrix/federation/v1/edutypes`
///
/// Lists EDU types we wish to receive
pub(crate) async fn get_edutypes_route(
State(services): State<crate::State>,
_body: Ruma<get_edutypes::unstable::Request>,
) -> Result<get_edutypes::unstable::Response> {
Ok(get_edutypes::unstable::Response {
typing: services.config.allow_incoming_typing,
presence: services.config.allow_incoming_presence,
receipt: services.config.allow_incoming_read_receipts,
})
}

View File

@@ -1,5 +1,4 @@
pub(super) mod backfill;
pub(super) mod edutypes;
pub(super) mod event;
pub(super) mod event_auth;
pub(super) mod get_missing_events;
@@ -24,7 +23,6 @@
pub(super) mod well_known;
pub(super) use backfill::*;
pub(super) use edutypes::*;
pub(super) use event::*;
pub(super) use event_auth::*;
pub(super) use get_missing_events::*;

View File

@@ -1128,23 +1128,6 @@ pub struct Config {
#[serde(default = "true_fn")]
pub rocksdb_bottommost_compression: bool,
/// Compression algorithm for RocksDB's Write-Ahead-Log (WAL).
///
/// At present, only ZSTD compression is supported by RocksDB for WAL
/// compression. Enabling this can reduce WAL size at the expense of some
/// CPU usage during writes.
///
/// The options are:
/// - "none" = No compression
/// - "zstd" = ZSTD compression
///
/// For more information on WAL compression, see:
/// https://github.com/facebook/rocksdb/wiki/WAL-Compression
///
/// default: "zstd"
#[serde(default = "default_rocksdb_wal_compression")]
pub rocksdb_wal_compression: String,
/// Database recovery mode (for RocksDB WAL corruption).
///
/// Use this option when the server reports corruption and refuses to start.
@@ -1727,19 +1710,6 @@ pub struct Config {
#[serde(default)]
pub block_non_admin_invites: bool,
/// Enable or disable making requests to MSC4284 Policy Servers.
/// It is recommended you keep this enabled unless you experience frequent
/// connectivity issues, such as in a restricted networking environment.
#[serde(default = "true_fn")]
pub enable_msc4284_policy_servers: bool,
/// Enable running locally generated events through configured MSC4284
/// policy servers. You may wish to disable this if your server is
/// single-user for a slight speed benefit in some rooms, but otherwise
/// should leave it enabled.
#[serde(default = "true_fn")]
pub policy_server_check_own_events: bool,
/// Allow admins to enter commands in rooms other than "#admins" (admin
/// room) by prefixing your message with "\!admin" or "\\!admin" followed up
/// a normal continuwuity admin command. The reply will be publicly visible
@@ -2471,8 +2441,6 @@ fn default_rocksdb_compression_algo() -> String {
.to_owned()
}
fn default_rocksdb_wal_compression() -> String { "zstd".to_owned() }
/// Default RocksDB compression level is 32767, which is internally read by
/// RocksDB as the default magic number and translated to the library's default
/// compression level as they all differ. See their `kDefaultCompressionLevel`.

View File

@@ -5,17 +5,13 @@
use std::{collections::BTreeMap, sync::OnceLock};
use crate::utils::exchange;
use crate::{SyncMutex, utils::exchange};
/// Raw capture of rustc flags used to build each crate in the project. Informed
/// by rustc_flags_capture macro (one in each crate's mod.rs). This is
/// done during static initialization which is why it's mutex-protected and pub.
/// Should not be written to by anything other than our macro.
///
/// We specifically use a std mutex here because parking_lot cannot be used
/// after thread local storage is destroyed on MacOS.
pub static FLAGS: std::sync::Mutex<BTreeMap<&str, &[&str]>> =
std::sync::Mutex::new(BTreeMap::new());
pub static FLAGS: SyncMutex<BTreeMap<&str, &[&str]>> = SyncMutex::new(BTreeMap::new());
/// Processed list of enabled features across all project crates. This is
/// generated from the data in FLAGS.
@@ -28,7 +24,6 @@ fn init_features() -> Vec<&'static str> {
let mut features = Vec::new();
FLAGS
.lock()
.expect("locked")
.iter()
.for_each(|(_, flags)| append_features(&mut features, flags));

View File

@@ -200,15 +200,11 @@ pub async fn auth_check<E, F, Fut>(
if incoming_event.room_id().is_some() {
let Some(room_id_server_name) = incoming_event.room_id().unwrap().server_name()
else {
warn!("legacy room ID has no server name");
warn!("room ID has no servername");
return Ok(false);
};
if room_id_server_name != sender.server_name() {
warn!(
expected = %sender.server_name(),
received = %room_id_server_name,
"server name of legacy room ID does not match server name of sender"
);
warn!("servername of room ID does not match servername of sender");
return Ok(false);
}
}
@@ -219,12 +215,12 @@ pub async fn auth_check<E, F, Fut>(
.room_version
.is_some_and(|v| v.deserialize().is_err())
{
warn!("unsupported room version found in m.room.create event");
warn!("invalid room version found in m.room.create event");
return Ok(false);
}
if room_version.room_ids_as_hashes && incoming_event.room_id().is_some() {
warn!("room create event incorrectly claims to have a room ID when it should not");
warn!("room create event incorrectly claims a room ID");
return Ok(false);
}
@@ -233,7 +229,7 @@ pub async fn auth_check<E, F, Fut>(
{
// If content has no creator field, reject
if content.creator.is_none() {
warn!("m.room.create event incorrectly omits 'creator' field");
warn!("no creator field found in m.room.create content");
return Ok(false);
}
}
@@ -286,19 +282,16 @@ pub async fn auth_check<E, F, Fut>(
.room_version
.is_some_and(|v| v.deserialize().is_err())
{
warn!(
create_event_id = %room_create_event.event_id(),
"unsupported room version found in m.room.create event"
);
warn!("invalid room version found in m.room.create event");
return Ok(false);
}
let expected_room_id = room_create_event.room_id_or_hash();
if incoming_event.room_id().expect("event must have a room ID") != expected_room_id {
if incoming_event.room_id().unwrap() != expected_room_id {
warn!(
expected = %expected_room_id,
received = %incoming_event.room_id().unwrap(),
"room_id of incoming event ({}) does not match that of the m.room.create event ({})",
"room_id of incoming event ({}) does not match room_id of m.room.create event ({})",
incoming_event.room_id().unwrap(),
expected_room_id,
);
@@ -311,15 +304,12 @@ pub async fn auth_check<E, F, Fut>(
.auth_events()
.any(|id| id == room_create_event.event_id());
if room_version.room_ids_as_hashes && claims_create_event {
warn!("event incorrectly references m.room.create event in auth events");
warn!("m.room.create event incorrectly found in auth events");
return Ok(false);
} else if !room_version.room_ids_as_hashes && !claims_create_event {
// If the create event is not referenced in the event's auth events, and this is
// a v11 room, reject
warn!(
missing = %room_create_event.event_id(),
"event incorrectly did not reference an m.room.create in its auth events"
);
warn!("no m.room.create event found in auth events");
return Ok(false);
}
@@ -328,7 +318,7 @@ pub async fn auth_check<E, F, Fut>(
warn!(
expected = %expected_room_id,
received = %pe.room_id().unwrap(),
"room_id of referenced power levels event does not match that of the m.room.create event"
"room_id of power levels event does not match room_id of m.room.create event"
);
return Ok(false);
}
@@ -342,9 +332,8 @@ pub async fn auth_check<E, F, Fut>(
&& room_create_event.sender().server_name() != incoming_event.sender().server_name()
{
warn!(
sender = %incoming_event.sender(),
create_sender = %room_create_event.sender(),
"room is not federated and event's sender domain does not match create event's sender domain"
"room is not federated and event's sender domain does not match create event's \
sender domain"
);
return Ok(false);
}
@@ -427,6 +416,7 @@ pub async fn auth_check<E, F, Fut>(
&user_for_join_auth_membership,
&room_create_event,
)? {
warn!("membership change not valid for some reason");
return Ok(false);
}
@@ -439,7 +429,7 @@ pub async fn auth_check<E, F, Fut>(
let sender_member_event = match sender_member_event {
| Some(mem) => mem,
| None => {
warn!("sender has no membership event");
warn!("sender not found in room");
return Ok(false);
},
};
@@ -450,7 +440,7 @@ pub async fn auth_check<E, F, Fut>(
!= expected_room_id
{
warn!(
"room_id of incoming event ({}) does not match that of the m.room.create event ({})",
"room_id of incoming event ({}) does not match room_id of m.room.create event ({})",
sender_member_event
.room_id()
.expect("event must have a room ID"),
@@ -463,7 +453,8 @@ pub async fn auth_check<E, F, Fut>(
from_json_str(sender_member_event.content().get())?;
let Some(membership_state) = sender_membership_event_content.membership else {
warn!(
?sender_membership_event_content,
sender_membership_event_content = format!("{sender_membership_event_content:?}"),
event_id = format!("{}", incoming_event.event_id()),
"Sender membership event content missing membership field"
);
return Err(Error::InvalidPdu("Missing membership field".to_owned()));
@@ -471,11 +462,7 @@ pub async fn auth_check<E, F, Fut>(
let membership_state = membership_state.deserialize()?;
if !matches!(membership_state, MembershipState::Join) {
warn!(
%sender,
?membership_state,
"sender cannot send events without being joined to the room"
);
warn!("sender's membership is not join");
return Ok(false);
}
@@ -535,12 +522,7 @@ pub async fn auth_check<E, F, Fut>(
};
if sender_power_level < invite_level {
warn!(
%sender,
has=?sender_power_level,
required=?invite_level,
"sender cannot send invites in this room"
);
warn!("sender's cannot send invites in this room");
return Ok(false);
}
@@ -552,11 +534,7 @@ pub async fn auth_check<E, F, Fut>(
// level, reject If the event has a state_key that starts with an @ and does
// not match the sender, reject.
if !can_send_event(incoming_event, power_levels_event.as_ref(), sender_power_level) {
warn!(
%sender,
event_type=?incoming_event.kind(),
"sender cannot send event"
);
warn!("user cannot send event");
return Ok(false);
}
@@ -601,12 +579,6 @@ pub async fn auth_check<E, F, Fut>(
};
if !check_redaction(room_version, incoming_event, sender_power_level, redact_level)? {
warn!(
%sender,
?sender_power_level,
?redact_level,
"redaction event was not allowed"
);
return Ok(false);
}
}
@@ -787,7 +759,7 @@ struct GetThirdPartyInvite {
if prev_event_is_create_event && no_more_prev_events {
trace!(
%sender,
sender = %sender,
target_user = %target_user,
?sender_creator,
?target_creator,
@@ -807,33 +779,22 @@ struct GetThirdPartyInvite {
);
if sender != target_user {
// If the sender does not match state_key, reject.
warn!(
%sender,
target_user = %target_user,
"sender cannot join on behalf of another user"
);
warn!("Can't make other user join");
false
} else if target_user_current_membership == MembershipState::Ban {
// If the sender is banned, reject.
warn!(
%sender,
membership_event_id = ?target_user_membership_event_id,
"sender cannot join as they are banned from the room"
);
warn!(?target_user_membership_event_id, "Banned user can't join");
false
} else {
match join_rules {
| JoinRule::Invite =>
if !membership_allows_join {
warn!(
%sender,
membership_event_id = ?target_user_membership_event_id,
membership = ?target_user_current_membership,
"sender cannot join as they are not invited to the invite-only room"
membership=?target_user_current_membership,
"Join rule is invite but membership does not allow join"
);
false
} else {
trace!(sender=%sender, "sender is invited to room, allowing join");
true
},
| JoinRule::Knock if !room_version.allow_knocking => {
@@ -843,14 +804,11 @@ struct GetThirdPartyInvite {
| JoinRule::Knock =>
if !membership_allows_join {
warn!(
%sender,
membership_event_id = ?target_user_membership_event_id,
membership=?target_user_current_membership,
"sender cannot join a knock room without being invited or already joined"
"Join rule is knock but membership does not allow join"
);
false
} else {
trace!(sender=%sender, "sender is invited or already joined to room, allowing join");
true
},
| JoinRule::KnockRestricted(_) if !room_version.knock_restricted_join_rule =>
@@ -862,55 +820,33 @@ struct GetThirdPartyInvite {
},
| JoinRule::KnockRestricted(_) => {
if membership_allows_join || user_for_join_auth_is_valid {
trace!(
%sender,
%membership_allows_join,
%user_for_join_auth_is_valid,
"sender is invited, already joined to, or authorised to join the room, allowing join"
);
true
} else {
warn!(
%sender,
membership_event_id = ?target_user_membership_event_id,
membership=?target_user_current_membership,
%user_for_join_auth_is_valid,
?user_for_join_auth,
"sender cannot join as they are not invited nor already joined to the room, nor was a \
valid authorising user given to permit the join"
"Join rule is a restricted one, but no valid authorising user \
was given and the sender's current membership does not permit \
a join transition"
);
false
}
},
| JoinRule::Restricted(_) =>
if membership_allows_join || user_for_join_auth_is_valid {
trace!(
%sender,
%membership_allows_join,
%user_for_join_auth_is_valid,
"sender is invited, already joined to, or authorised to join the room, allowing join"
);
true
} else {
warn!(
%sender,
membership_event_id = ?target_user_membership_event_id,
membership=?target_user_current_membership,
%user_for_join_auth_is_valid,
?user_for_join_auth,
"sender cannot join as they are not invited nor already joined to the room, nor was a \
valid authorising user given to permit the join"
"Join rule is a restricted one but no valid authorising user \
was given"
);
false
},
| JoinRule::Public => {
trace!(%sender, "join rule is public, allowing join");
true
},
| JoinRule::Public => true,
| _ => {
warn!(
join_rule=?join_rules,
"Join rule is unknown, or the rule's conditions were not met"
membership=?target_user_current_membership,
"Unknown join rule doesn't allow joining, or the rule's conditions were not met"
);
false
},
@@ -937,23 +873,16 @@ struct GetThirdPartyInvite {
}
allow
},
| _ =>
if !sender_is_joined {
warn!(
%sender,
?sender_membership_event_id,
?sender_membership,
"sender cannot produce an invite without being joined to the room",
);
false
} else if matches!(
target_user_current_membership,
MembershipState::Join | MembershipState::Ban
) {
| _ => {
if !sender_is_joined
|| target_user_current_membership == MembershipState::Join
|| target_user_current_membership == MembershipState::Ban
{
warn!(
?target_user_membership_event_id,
?target_user_current_membership,
"cannot invite a user who is banned or already joined",
?sender_membership_event_id,
"Can't invite user if sender not joined or the user is currently \
joined or banned",
);
false
} else {
@@ -963,107 +892,56 @@ struct GetThirdPartyInvite {
.is_some();
if !allow {
warn!(
%sender,
has=?sender_power,
required=?power_levels.invite,
"sender does not have enough power to produce invites",
?target_user_membership_event_id,
?power_levels_event_id,
"User does not have enough power to invite",
);
}
trace!(
%sender,
?sender_membership_event_id,
?sender_membership,
?target_user_membership_event_id,
?target_user_current_membership,
sender_pl=?sender_power,
required_pl=?power_levels.invite,
"allowing invite"
);
allow
},
}
},
}
},
| MembershipState::Leave => {
let can_unban = if target_user_current_membership == MembershipState::Ban {
sender_creator || sender_power.filter(|&p| p < &power_levels.ban).is_some()
} else {
true
};
let can_kick = if !matches!(
target_user_current_membership,
MembershipState::Ban | MembershipState::Leave
) {
sender_creator || sender_power.filter(|&p| p < &power_levels.kick).is_some()
} else {
true
};
| MembershipState::Leave =>
if sender == target_user {
// self-leave
// let allow = target_user_current_membership == MembershipState::Join
// || target_user_current_membership == MembershipState::Invite
// || target_user_current_membership == MembershipState::Knock;
let allow = matches!(
target_user_current_membership,
MembershipState::Join | MembershipState::Invite | MembershipState::Knock
);
let allow = target_user_current_membership == MembershipState::Join
|| target_user_current_membership == MembershipState::Invite
|| target_user_current_membership == MembershipState::Knock;
if !allow {
warn!(
%sender,
current_membership_event_id=?target_user_membership_event_id,
current_membership=?target_user_current_membership,
"sender cannot leave as they are not already knocking on, invited to, or joined to the room"
?target_user_membership_event_id,
?target_user_current_membership,
"Can't leave if sender is not already invited, knocked, or joined"
);
}
trace!(sender=%sender, "allowing leave");
allow
} else if !sender_is_joined {
} else if !sender_is_joined
|| target_user_current_membership == MembershipState::Ban
&& (sender_creator
|| sender_power.filter(|&p| p < &power_levels.ban).is_some())
{
warn!(
%sender,
?target_user_membership_event_id,
?sender_membership_event_id,
"sender cannot kick another user as they are not joined to the room",
);
false
} else if !can_unban {
// If the target is banned, only a room creator or someone with ban power
// level can unban them
warn!(
%sender,
?target_user_membership_event_id,
?power_levels_event_id,
"sender lacks the power level required to unban users",
);
false
} else if !can_kick {
warn!(
%sender,
%target_user,
?target_user_membership_event_id,
?target_user_current_membership,
?power_levels_event_id,
"sender does not have enough power to kick the target",
"Can't kick if sender not joined or user is already banned",
);
false
} else {
trace!(
%sender,
%target_user,
?target_user_membership_event_id,
?target_user_current_membership,
sender_pl=?sender_power,
target_pl=?target_power,
required_pl=?power_levels.kick,
"allowing kick/unban",
);
true
}
},
let allow = sender_creator
|| (sender_power.filter(|&p| p >= &power_levels.kick).is_some()
&& target_power < sender_power);
if !allow {
warn!(
?target_user_membership_event_id,
?power_levels_event_id,
"User does not have enough power to kick",
);
}
allow
},
| MembershipState::Ban =>
if !sender_is_joined {
warn!(
%sender,
?sender_membership_event_id,
"sender cannot ban another user as they are not joined to the room",
);
warn!(?sender_membership_event_id, "Can't ban user if sender is not joined");
false
} else {
let allow = sender_creator
@@ -1071,11 +949,9 @@ struct GetThirdPartyInvite {
&& target_power < sender_power);
if !allow {
warn!(
%sender,
%target_user,
?target_user_membership_event_id,
?power_levels_event_id,
"sender does not have enough power to ban the target",
"User does not have enough power to ban",
);
}
allow
@@ -1101,9 +977,9 @@ struct GetThirdPartyInvite {
} else if sender != target_user {
// 3. If `sender` does not match `state_key`, reject.
warn!(
%sender,
%target_user,
"sender cannot knock on behalf of another user",
?sender,
?target_user,
"Can't make another user knock, sender did not match target"
);
false
} else if matches!(
@@ -1115,25 +991,15 @@ struct GetThirdPartyInvite {
// 5. Otherwise, reject.
warn!(
?target_user_membership_event_id,
?sender_membership,
"Knocking with a membership state of ban, invite or join is invalid",
);
false
} else {
trace!(%sender, "allowing knock");
true
}
},
| _ => {
warn!(
%sender,
?target_membership,
%target_user,
%target_user_current_membership,
"Unknown or invalid membership transition {} -> {}",
target_user_current_membership,
target_membership
);
warn!("Unknown membership transition");
false
},
})
@@ -1163,13 +1029,6 @@ fn can_send_event(event: &impl Event, ple: Option<&impl Event>, user_level: Int)
if event.state_key().is_some_and(|k| k.starts_with('@'))
&& event.state_key() != Some(event.sender().as_str())
{
warn!(
%user_level,
required=?event_type_power_level,
state_key=?event.state_key(),
sender=%event.sender(),
"state_key starts with @ but does not match sender",
);
return false; // permission required to post in this room
}
@@ -1254,14 +1113,7 @@ fn check_power_levels(
// If the current value is equal to the sender's current power level, reject
if user != power_event.sender() && old_level == Some(&user_level) {
warn!(
?old_level,
?new_level,
?user,
%user_level,
sender=%power_event.sender(),
"cannot alter the power level of a user with the same power level as sender's own"
);
warn!("m.room.power_level cannot remove ops == to own");
return Some(false); // cannot remove ops level == to own
}
@@ -1269,26 +1121,8 @@ fn check_power_levels(
// If the new value is higher than the sender's current power level, reject
let old_level_too_big = old_level > Some(&user_level);
let new_level_too_big = new_level > Some(&user_level);
if old_level_too_big {
warn!(
?old_level,
?new_level,
?user,
%user_level,
sender=%power_event.sender(),
"cannot alter the power level of a user with a higher power level than sender's own"
);
return Some(false); // cannot add ops greater than own
}
if new_level_too_big {
warn!(
?old_level,
?new_level,
?user,
%user_level,
sender=%power_event.sender(),
"cannot set the power level of a user to a level higher than sender's own"
);
if old_level_too_big || new_level_too_big {
warn!("m.room.power_level failed to add ops > than own");
return Some(false); // cannot add ops greater than own
}
}
@@ -1305,26 +1139,8 @@ fn check_power_levels(
// If the new value is higher than the sender's current power level, reject
let old_level_too_big = old_level > Some(&user_level);
let new_level_too_big = new_level > Some(&user_level);
if old_level_too_big {
warn!(
?old_level,
?new_level,
?ev_type,
%user_level,
sender=%power_event.sender(),
"cannot alter the power level of an event with a higher power level than sender's own"
);
return Some(false); // cannot add ops greater than own
}
if new_level_too_big {
warn!(
?old_level,
?new_level,
?ev_type,
%user_level,
sender=%power_event.sender(),
"cannot set the power level of an event to a level higher than sender's own"
);
if old_level_too_big || new_level_too_big {
warn!("m.room.power_level failed to add ops > than own");
return Some(false); // cannot add ops greater than own
}
}
@@ -1339,13 +1155,7 @@ fn check_power_levels(
let old_level_too_big = old_level > user_level;
let new_level_too_big = new_level > user_level;
if old_level_too_big || new_level_too_big {
warn!(
?old_level,
?new_level,
%user_level,
sender=%power_event.sender(),
"cannot alter the power level of notifications greater than sender's own"
);
warn!("m.room.power_level failed to add ops > than own");
return Some(false); // cannot add ops greater than own
}
}
@@ -1369,14 +1179,7 @@ fn check_power_levels(
let new_level_too_big = new_lvl > user_level;
if old_level_too_big || new_level_too_big {
warn!(
?old_lvl,
?new_lvl,
%user_level,
sender=%power_event.sender(),
action=%lvl_name,
"cannot alter the power level of action greater than sender's own",
);
warn!("cannot add ops > than own");
return Some(false);
}
}

View File

@@ -36,7 +36,7 @@
room_version::RoomVersion,
};
use crate::{
debug, debug_error, err,
debug, debug_error,
matrix::{Event, StateKey},
state_res::room_version::StateResolutionVersion,
trace,
@@ -319,19 +319,8 @@ async fn calculate_conflicted_subgraph<F, Fut, E>(
path.pop();
continue;
}
trace!(event_id = event_id.as_str(), "fetching event for its auth events");
let evt = fetch_event(event_id.clone()).await;
if evt.is_none() {
err!("could not fetch event {} to calculate conflicted subgraph", event_id);
path.pop();
continue;
}
stack.push(
evt.expect("checked")
.auth_events()
.map(ToOwned::to_owned)
.collect(),
);
let evt = fetch_event(event_id.clone()).await?;
stack.push(evt.auth_events().map(ToOwned::to_owned).collect());
seen.insert(event_id);
}
Some(subgraph)

View File

@@ -1,9 +1,7 @@
use std::{cmp, convert::TryFrom};
use conduwuit::{Config, Result, utils, warn};
use rocksdb::{
Cache, DBCompressionType, DBRecoveryMode, Env, LogLevel, Options, statistics::StatsLevel,
};
use conduwuit::{Config, Result, utils};
use rocksdb::{Cache, DBRecoveryMode, Env, LogLevel, Options, statistics::StatsLevel};
use super::{cf_opts::cache_size_f64, logger::handle as handle_log};
@@ -60,20 +58,6 @@ pub(crate) fn db_options(config: &Config, env: &Env, row_cache: &Cache) -> Resul
opts.set_max_total_wal_size(1024 * 1024 * 512);
opts.set_writable_file_max_buffer_size(1024 * 1024 * 2);
// WAL compression
let wal_compression = match config.rocksdb_wal_compression.as_ref() {
| "zstd" => DBCompressionType::Zstd,
| "none" => DBCompressionType::None,
| value => {
warn!(
"Invalid rocksdb_wal_compression value '{value}'. Supported values are 'none' \
or 'zstd'. Defaulting to 'none'."
);
DBCompressionType::None
},
};
opts.set_wal_compression_type(wal_compression);
// Misc
opts.set_disable_auto_compactions(!config.rocksdb_compaction);
opts.create_missing_column_families(true);

View File

@@ -15,13 +15,13 @@ pub(super) fn flags_capture(args: TokenStream) -> TokenStream {
#[ctor]
fn _set_rustc_flags() {
conduwuit_core::info::rustc::FLAGS.lock().expect("locked").insert(#crate_name, &RUSTC_FLAGS);
conduwuit_core::info::rustc::FLAGS.lock().insert(#crate_name, &RUSTC_FLAGS);
}
// static strings have to be yanked on module unload
#[dtor]
fn _unset_rustc_flags() {
conduwuit_core::info::rustc::FLAGS.lock().expect("locked").remove(#crate_name);
conduwuit_core::info::rustc::FLAGS.lock().remove(#crate_name);
}
};

View File

@@ -156,7 +156,6 @@ sentry_telemetry = [
]
systemd = [
"conduwuit-router/systemd",
"conduwuit-service/systemd"
]
journald = [ # This is a stub on non-unix platforms
"dep:tracing-journald",

View File

@@ -40,6 +40,7 @@ io_uring = [
"conduwuit-admin/io_uring",
"conduwuit-api/io_uring",
"conduwuit-service/io_uring",
"conduwuit-api/io_uring",
]
jemalloc = [
"conduwuit-admin/jemalloc",

View File

@@ -65,7 +65,7 @@ pub(crate) async fn start(server: Arc<Server>) -> Result<Arc<Services>> {
let services = Services::build(server).await?.start().await?;
#[cfg(all(feature = "systemd", target_os = "linux"))]
sd_notify::notify(false, &[sd_notify::NotifyState::Ready])
sd_notify::notify(true, &[sd_notify::NotifyState::Ready])
.expect("failed to notify systemd of ready state");
debug!("Started");
@@ -78,7 +78,7 @@ pub(crate) async fn stop(services: Arc<Services>) -> Result<()> {
debug!("Shutting down...");
#[cfg(all(feature = "systemd", target_os = "linux"))]
sd_notify::notify(false, &[sd_notify::NotifyState::Stopping])
sd_notify::notify(true, &[sd_notify::NotifyState::Stopping])
.expect("failed to notify systemd of stopping state");
// Wait for all completions before dropping or we'll lose them to the module

View File

@@ -67,9 +67,6 @@ release_max_log_level = [
"tracing/max_level_trace",
"tracing/release_max_level_info",
]
systemd = [
"dep:sd-notify",
]
url_preview = [
"dep:image",
"dep:webpage",
@@ -122,9 +119,5 @@ blurhash.optional = true
recaptcha-verify = { version = "0.1.5", default-features = false }
ctor.workspace = true
[target.'cfg(all(unix, target_os = "linux"))'.dependencies]
sd-notify.workspace = true
sd-notify.optional = true
[lints]
workspace = true

View File

@@ -45,16 +45,13 @@ fn deref(&self) -> &Self::Target { &self.server.config }
fn handle_reload(&self) -> Result {
if self.server.config.config_reload_signal {
#[cfg(all(feature = "systemd", target_os = "linux"))]
sd_notify::notify(false, &[
sd_notify::NotifyState::Reloading,
sd_notify::NotifyState::monotonic_usec_now().expect("Failed to read monotonic time"),
])
.expect("failed to notify systemd of reloading state");
sd_notify::notify(true, &[sd_notify::NotifyState::Reloading])
.expect("failed to notify systemd of reloading state");
self.reload(iter::empty())?;
#[cfg(all(feature = "systemd", target_os = "linux"))]
sd_notify::notify(false, &[sd_notify::NotifyState::Ready])
sd_notify::notify(true, &[sd_notify::NotifyState::Ready])
.expect("failed to notify systemd of ready state");
}

View File

@@ -90,22 +90,17 @@ pub async fn create(
file: &[u8],
) -> Result<()> {
// Width, Height = 0 if it's not a thumbnail
let key = self
.db
.create_file_metadata(mxc, user, &Dim::default(), content_disposition, content_type)
.map_err(|e| {
err!(Database(error!("Failed to create media metadata for MXC {mxc}: {e}")))
})?;
let key = self.db.create_file_metadata(
mxc,
user,
&Dim::default(),
content_disposition,
content_type,
)?;
//TODO: Dangling metadata in database if creation fails
let mut f = self.create_media_file(&key).await.map_err(|e| {
err!(Database(error!(
"Failed to create media file for MXC {mxc} at key {key:?}: {e}"
)))
})?;
f.write_all(file).await.map_err(|e| {
err!(Database(error!("Failed to write media file for MXC {mxc} at key {key:?}: {e}")))
})?;
let mut f = self.create_media_file(&key).await?;
f.write_all(file).await?;
Ok(())
}

View File

@@ -9,7 +9,6 @@
},
warn,
};
use database::Json;
use futures::{FutureExt, StreamExt, TryStreamExt};
use itertools::Itertools;
use ruma::{
@@ -607,7 +606,7 @@ async fn fix_corrupt_msc4133_fields(services: &Services) -> Result {
);
};
useridprofilekey_value.put((user, key), Json(new_value));
useridprofilekey_value.put((user, key), new_value);
fixed = fixed.saturating_add(1);
}
total = total.saturating_add(1);

View File

@@ -4,8 +4,9 @@
};
use conduwuit::{
Event, PduEvent, debug, debug_warn, implement, matrix::event::gen_event_id_canonical_json,
trace, utils::continue_exponential_backoff_secs, warn,
Event, PduEvent, debug, debug_error, debug_warn, implement,
matrix::event::gen_event_id_canonical_json, trace, utils::continue_exponential_backoff_secs,
warn,
};
use ruma::{
CanonicalJsonValue, EventId, OwnedEventId, RoomId, ServerName,
@@ -51,14 +52,12 @@ pub(super) async fn fetch_and_handle_outliers<'a, Pdu, Events>(
};
let mut events_with_auth_events = Vec::with_capacity(events.clone().count());
trace!("Fetching {} outlier pdus", events.clone().count());
for id in events {
// a. Look in the main timeline (pduid_pdu tree)
// b. Look at outlier pdu tree
// (get_pdu_json checks both)
if let Ok(local_pdu) = self.services.timeline.get_pdu(id).await {
trace!("Found {id} in main timeline or outlier tree");
events_with_auth_events.push((id.to_owned(), Some(local_pdu), vec![]));
continue;
}
@@ -105,7 +104,7 @@ pub(super) async fn fetch_and_handle_outliers<'a, Pdu, Events>(
continue;
}
debug!("Fetching {next_id} over federation from {origin}.");
debug!("Fetching {next_id} over federation.");
match self
.services
.sending
@@ -116,7 +115,7 @@ pub(super) async fn fetch_and_handle_outliers<'a, Pdu, Events>(
.await
{
| Ok(res) => {
debug!("Got {next_id} over federation from {origin}");
debug!("Got {next_id} over federation");
let Ok(room_version_id) = get_room_version_id(create_event) else {
back_off((*next_id).to_owned());
continue;
@@ -146,9 +145,6 @@ pub(super) async fn fetch_and_handle_outliers<'a, Pdu, Events>(
auth_event.clone().into(),
) {
| Ok(auth_event) => {
trace!(
"Found auth event id {auth_event} for event {next_id}"
);
todo_auth_events.push_back(auth_event);
},
| _ => {
@@ -164,7 +160,7 @@ pub(super) async fn fetch_and_handle_outliers<'a, Pdu, Events>(
events_all.insert(next_id);
},
| Err(e) => {
warn!("Failed to fetch auth event {next_id} from {origin}: {e}");
debug_error!("Failed to fetch event {next_id}: {e}");
back_off((*next_id).to_owned());
},
}
@@ -179,7 +175,7 @@ pub(super) async fn fetch_and_handle_outliers<'a, Pdu, Events>(
// b. Look at outlier pdu tree
// (get_pdu_json checks both)
if let Some(local_pdu) = local_pdu {
trace!("Found {id} in main timeline or outlier tree");
trace!("Found {id} in db");
pdus.push((local_pdu.clone(), None));
}
@@ -205,7 +201,6 @@ pub(super) async fn fetch_and_handle_outliers<'a, Pdu, Events>(
}
}
trace!("Handling outlier {next_id}");
match Box::pin(self.handle_outlier_pdu(
origin,
create_event,
@@ -218,7 +213,6 @@ pub(super) async fn fetch_and_handle_outliers<'a, Pdu, Events>(
{
| Ok((pdu, json)) =>
if next_id == *id {
trace!("Handled outlier {next_id} (original request)");
pdus.push((pdu, Some(json)));
},
| Err(e) => {
@@ -228,6 +222,6 @@ pub(super) async fn fetch_and_handle_outliers<'a, Pdu, Events>(
}
}
}
trace!("Fetched and handled {} outlier pdus", pdus.len());
pdus
}

View File

@@ -1,12 +1,11 @@
use std::collections::{BTreeMap, HashMap, hash_map};
use conduwuit::{
Err, Event, PduEvent, Result, debug, debug_info, debug_warn, err, implement, state_res, trace,
Err, Event, PduEvent, Result, debug, debug_info, err, implement, state_res, trace, warn,
};
use futures::future::ready;
use ruma::{
CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, RoomId, ServerName,
events::StateEventType,
CanonicalJsonObject, CanonicalJsonValue, EventId, RoomId, ServerName, events::StateEventType,
};
use super::{check_room_id, get_room_version_id, to_room_version};
@@ -75,73 +74,36 @@ pub(super) async fn handle_outlier_pdu<'a, Pdu>(
check_room_id(room_id, &pdu_event)?;
// Fetch all auth events
let mut auth_events: HashMap<OwnedEventId, PduEvent> = HashMap::new();
for aid in pdu_event.auth_events() {
if let Ok(auth_event) = self.services.timeline.get_pdu(aid).await {
check_room_id(room_id, &auth_event)?;
trace!("Found auth event {aid} for outlier event {event_id} locally");
auth_events.insert(aid.to_owned(), auth_event);
} else {
debug_warn!("Could not find auth event {aid} for outlier event {event_id} locally");
}
}
// Fetch any missing ones & reject invalid ones
let missing_auth_events = if auth_events_known {
pdu_event
.auth_events()
.filter(|id| !auth_events.contains_key(*id))
.collect::<Vec<_>>()
} else {
pdu_event.auth_events().collect::<Vec<_>>()
};
if !missing_auth_events.is_empty() || !auth_events_known {
debug_info!(
"Fetching {} missing auth events for outlier event {event_id}",
missing_auth_events.len()
);
for (pdu, _) in self
.fetch_and_handle_outliers(
origin,
missing_auth_events.iter().copied(),
create_event,
room_id,
)
.await
{
auth_events.insert(pdu.event_id().to_owned(), pdu);
}
} else {
debug!("No missing auth events for outlier event {event_id}");
}
// reject if we are still missing some
let still_missing = pdu_event
.auth_events()
.filter(|id| !auth_events.contains_key(*id))
.collect::<Vec<_>>();
if !still_missing.is_empty() {
return Err!(Request(InvalidParam(
"Could not fetch all auth events for outlier event {event_id}, still missing: \
{still_missing:?}"
)));
if !auth_events_known {
// 4. fetch any missing auth events doing all checks listed here starting at 1.
// These are not timeline events
// 5. Reject "due to auth events" if can't get all the auth events or some of
// the auth events are also rejected "due to auth events"
// NOTE: Step 5 is not applied anymore because it failed too often
debug!("Fetching auth events");
Box::pin(self.fetch_and_handle_outliers(
origin,
pdu_event.auth_events(),
create_event,
room_id,
))
.await;
}
// 6. Reject "due to auth events" if the event doesn't pass auth based on the
// auth events
debug!("Checking based on auth events");
let mut auth_events_by_key: HashMap<_, _> = HashMap::with_capacity(auth_events.len());
// Build map of auth events
let mut auth_events = HashMap::with_capacity(pdu_event.auth_events().count());
for id in pdu_event.auth_events() {
let auth_event = auth_events
.get(id)
.expect("we just checked that we have all auth events")
.to_owned();
let Ok(auth_event) = self.services.timeline.get_pdu(id).await else {
warn!("Could not find auth event {id}");
continue;
};
check_room_id(room_id, &auth_event)?;
match auth_events_by_key.entry((
match auth_events.entry((
auth_event.kind.to_string().into(),
auth_event
.state_key
@@ -161,7 +123,7 @@ pub(super) async fn handle_outlier_pdu<'a, Pdu>(
// The original create event must be in the auth events
if !matches!(
auth_events_by_key.get(&(StateEventType::RoomCreate, String::new().into())),
auth_events.get(&(StateEventType::RoomCreate, String::new().into())),
Some(_) | None
) {
return Err!(Request(InvalidParam("Incoming event refers to wrong create event.")));
@@ -169,7 +131,7 @@ pub(super) async fn handle_outlier_pdu<'a, Pdu>(
let state_fetch = |ty: &StateEventType, sk: &str| {
let key = (ty.to_owned(), sk.into());
ready(auth_events_by_key.get(&key).map(ToOwned::to_owned))
ready(auth_events.get(&key).map(ToOwned::to_owned))
};
let auth_check = state_res::event_auth::auth_check(

View File

@@ -5,9 +5,9 @@
use std::time::Duration;
use conduwuit::{Err, Event, PduEvent, Result, debug, debug_info, implement, trace, warn};
use conduwuit::{Err, Event, PduEvent, Result, debug, implement, warn};
use ruma::{
CanonicalJsonObject, RoomId, ServerName,
RoomId, ServerName,
api::federation::room::policy::v1::Request as PolicyRequest,
events::{StateEventType, room::policy::RoomPolicyEventContent},
};
@@ -25,25 +25,7 @@
/// fail-open operation.
#[implement(super::Service)]
#[tracing::instrument(skip_all, level = "debug")]
pub async fn ask_policy_server(
&self,
pdu: &PduEvent,
pdu_json: &CanonicalJsonObject,
room_id: &RoomId,
) -> Result<bool> {
if !self.services.server.config.enable_msc4284_policy_servers {
return Ok(true); // don't ever contact policy servers
}
if self.services.server.config.policy_server_check_own_events
&& pdu.origin.is_some()
&& self
.services
.server
.is_ours(pdu.origin.as_ref().unwrap().as_str())
{
return Ok(true); // don't contact policy servers for locally generated events
}
pub async fn ask_policy_server(&self, pdu: &PduEvent, room_id: &RoomId) -> Result<bool> {
if *pdu.event_type() == StateEventType::RoomPolicy.into() {
debug!(
room_id = %room_id,
@@ -65,12 +47,12 @@ pub async fn ask_policy_server(
let via = match policyserver.via {
| Some(ref via) => ServerName::parse(via)?,
| None => {
trace!("No policy server configured for room {room_id}");
debug!("No policy server configured for room {room_id}");
return Ok(true);
},
};
if via.is_empty() {
trace!("Policy server is empty for room {room_id}, skipping spam check");
debug!("Policy server is empty for room {room_id}, skipping spam check");
return Ok(true);
}
if !self.services.state_cache.server_in_room(via, room_id).await {
@@ -84,12 +66,12 @@ pub async fn ask_policy_server(
let outgoing = self
.services
.sending
.convert_to_outgoing_federation_event(pdu_json.clone())
.convert_to_outgoing_federation_event(pdu.to_canonical_object())
.await;
debug_info!(
debug!(
room_id = %room_id,
via = %via,
outgoing = ?pdu_json,
outgoing = ?outgoing,
"Checking event for spam with policy server"
);
let response = tokio::time::timeout(
@@ -103,10 +85,7 @@ pub async fn ask_policy_server(
)
.await;
let response = match response {
| Ok(Ok(response)) => {
debug!("Response from policy server: {:?}", response);
response
},
| Ok(Ok(response)) => response,
| Ok(Err(e)) => {
warn!(
via = %via,
@@ -118,18 +97,16 @@ pub async fn ask_policy_server(
// default.
return Err(e);
},
| Err(elapsed) => {
| Err(_) => {
warn!(
%via,
via = %via,
event_id = %pdu.event_id(),
%room_id,
%elapsed,
room_id = %room_id,
"Policy server request timed out after 10 seconds"
);
return Err!("Request to policy server timed out");
},
};
trace!("Recommendation from policy server was {}", response.recommendation);
if response.recommendation == "spam" {
warn!(
via = %via,

View File

@@ -255,10 +255,7 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu<Pdu>(
// 14-pre. If the event is not a state event, ask the policy server about it
if incoming_pdu.state_key.is_none() {
debug!(event_id = %incoming_pdu.event_id, "Checking policy server for event");
match self
.ask_policy_server(&incoming_pdu, &incoming_pdu.to_canonical_object(), room_id)
.await
{
match self.ask_policy_server(&incoming_pdu, room_id).await {
| Ok(false) => {
warn!(
event_id = %incoming_pdu.event_id,

View File

@@ -9,7 +9,6 @@
state_res::{self, RoomVersion},
},
utils::{self, IterStream, ReadyExt, stream::TryIgnore},
warn,
};
use futures::{StreamExt, TryStreamExt, future, future::ready};
use ruma::{
@@ -20,6 +19,7 @@
uint,
};
use serde_json::value::{RawValue, to_raw_value};
use tracing::warn;
use super::RoomMutexGuard;
@@ -267,19 +267,23 @@ fn from_evt(
| _ => Err!(Request(Unknown(warn!("Signing event failed: {e}")))),
};
}
// Generate event id
pdu.event_id = gen_event_id(&pdu_json, &room_version_id)?;
// Check with the policy server
pdu_json.insert("event_id".into(), CanonicalJsonValue::String(pdu.event_id.clone().into()));
// Check with the policy server
if room_id.is_some() {
trace!(
"Checking event in room {} with policy server",
"Checking event {} in room {} with policy server",
pdu.event_id,
pdu.room_id.as_ref().map_or("None", |id| id.as_str())
);
match self
.services
.event_handler
.ask_policy_server(&pdu, &pdu_json, pdu.room_id().expect("has room ID"))
.ask_policy_server(&pdu, &pdu.room_id_or_hash())
.await
{
| Ok(true) => {},