Compare commits

..

138 Commits

Author SHA1 Message Date
Renovate Bot 91e8c0ea71 chore(deps): update https://github.com/softprops/action-gh-release action to v3 2026-05-01 17:50:20 +00:00
Renovate Bot d963b89a07 chore(deps): update rust-zerover-patch-updates 2026-05-01 17:40:13 +00:00
Ginger 680c972b44 chore: News fragment 2026-05-01 13:17:00 -04:00
Ginger 88b59eb053 fix: Include target user's membership when building stripped state 2026-05-01 13:15:55 -04:00
timedout 4a99de0d28 fix: Store incoming federated invite membership events correctly
Co-Authored-By: Ginger <ginger@gingershaped.computer>
Reviewed-By: Ginger <ginger@gingershaped.computer>
2026-05-01 14:49:27 +01:00
Renovate Bot 0e1f0683c6 chore(deps): update pre-commit hook crate-ci/typos to v1.46.0 2026-05-01 05:04:01 +00:00
Renovate Bot cec4abc7cd chore(deps): update ruma digest to 5742fec 2026-04-30 05:05:10 +00:00
Ginger e6cae5b8ed fix: Fix membership check in kick handler 2026-04-29 12:45:15 -04:00
Ginger 02ccf64d2e fix: Properly create room summary stripped state 2026-04-29 12:44:57 -04:00
Renovate Bot 4d4d875231 chore(deps): update node-patch-updates to v2.0.10 2026-04-29 14:14:58 +00:00
Renovate Bot cdf05b9a8b chore(deps): update rust crate serde-saphyr to 0.0.25 2026-04-29 14:14:17 +00:00
Ginger 9491be928d fix: Fix panic when creating rooms 2026-04-29 09:26:13 -04:00
Ginger 049babc7ca fix: Fix appservice authentication 2026-04-29 09:09:09 -04:00
Ginger 7b99757337 chore: Update news fragment 2026-04-28 09:16:57 -04:00
Ginger d09de005e3 refactor: Drop unused MSC3843 endpoint definition 2026-04-28 09:16:57 -04:00
Ginger e34fd76dc0 fix: Re-add support for MSC4293 2026-04-28 09:16:57 -04:00
Ginger 72dfe579ec docs: Remove ruwuma mention from development.md 2026-04-28 09:16:57 -04:00
Ginger cfae9a34f4 fix: Panic on PL content deserialization failures 2026-04-28 09:16:57 -04:00
Ginger 0a4808ea79 fix: Add stable routes for admin suspension endpoints 2026-04-28 09:16:57 -04:00
Ginger a9a18fc5f0 fix: Re-add support for custom room IDs 2026-04-28 09:16:57 -04:00
Ginger c1434c7935 refactor: Remove mystery initial state hack 2026-04-28 09:16:57 -04:00
Ginger 2e98ba3ed8 fix: Increase max length for report reasons 2026-04-28 09:16:57 -04:00
Ginger 551cf48642 fix: Add bounds checking for profile data 2026-04-28 09:16:57 -04:00
Ginger d256a1c1fa fix: Add bounds checking for profile data 2026-04-28 09:16:57 -04:00
Ginger 5578144da9 refactor: Clean up api/client/membership/kick.rs 2026-04-28 09:16:57 -04:00
Ginger 5309a064e8 refactor: Remove old project name in api/client/membership/ 2026-04-28 09:16:57 -04:00
Ginger 56d35b4e39 refactor: Clean up api/client/membership.ban.rs 2026-04-28 09:16:57 -04:00
Ginger 7375d1cad4 fix: Check existing key equality when uploading new E2EE keys 2026-04-28 09:16:57 -04:00
Ginger 80baf948ae refactor: Switch back to upstream Ruma 2026-04-28 09:16:57 -04:00
Ginger ed37696cef fix: Enable compatibility feature to fix federation with old conduit-family homeservers 2026-04-28 09:16:57 -04:00
Ginger 0a04c60f31 refactor: Improve summary service logging 2026-04-28 09:16:57 -04:00
Ginger e44ac230a6 fix: Fix failing test 2026-04-28 09:16:57 -04:00
Ginger 57c4567380 chore: Changelog 2026-04-28 09:16:57 -04:00
Ginger a8a8e1ea51 chore: Clippy fixes 2026-04-28 09:16:57 -04:00
Ginger 02f69a7160 fix: FIx code that was causing rustc to panic somehow 2026-04-28 09:16:56 -04:00
Ginger f68205a341 refactor: Remove pointless assert 2026-04-28 09:16:56 -04:00
Ginger 9899632b8b chore: Clippy fixes 2026-04-28 09:16:56 -04:00
Ginger a0524a9566 refactor: Fix errors in api/client/directory.rs, again 2026-04-28 09:16:56 -04:00
Ginger e70004c98f chore: Clippy fixes 2026-04-28 09:16:56 -04:00
Ginger e185f56f3a refactor: Fix errors in admin/processor.rs 2026-04-28 09:16:56 -04:00
Ginger 5058b7979a refactor: Fix errors in admin/user/ 2026-04-28 09:16:56 -04:00
Ginger 7f06a61242 refactor: Fix errors in admin/room/ 2026-04-28 09:16:56 -04:00
Ginger 54fefb421b refactor: Fix errors in admin/query/ 2026-04-28 09:16:56 -04:00
Ginger 9d39321deb refactor: Fix errors in admin/media/commands.rs 2026-04-28 09:16:56 -04:00
Ginger c64a4a71bc refactor: Resolve errors in admin/federation/commands.rs 2026-04-28 09:16:56 -04:00
Ginger 385b4b10d1 refactor: Fix errors in admin/debug/commands.rs 2026-04-28 09:16:56 -04:00
Ginger c12dd20431 refactor: Fix errors in admin/check/commands.rs 2026-04-28 09:16:56 -04:00
Ginger 3ad7c3b30d refactor: Fix remaining errors in api/ (and temporarily switch to a fork of ruma) 2026-04-28 09:16:56 -04:00
Ginger 7a58074a0d refactor: Fix errors in web/ 2026-04-28 09:16:56 -04:00
Ginger 0c7abd792d refactor: Fix errors in api/router/ 2026-04-28 09:16:56 -04:00
Ginger 0f64e6b49c refactor: Fix errors in api/server/well_known.rs 2026-04-28 09:16:56 -04:00
Ginger e7a1c71a25 refactor: Fix errors in api/server/version.rs 2026-04-28 09:16:56 -04:00
Ginger cd3b97ea26 refactor: Fix errors in api/server/user.rs 2026-04-28 09:16:56 -04:00
Ginger 845b731f8c refactor: Fix errors in api/server/state.rs 2026-04-28 09:16:56 -04:00
Ginger 97d2388717 refactor: Fix errors in api/server/state_ids.rs 2026-04-28 09:16:56 -04:00
Ginger 962a4aedc6 refactor: Fix errors in api/server/send.rs 2026-04-28 09:16:56 -04:00
Ginger 0eee63f7a1 refactor: Fix errors in api/server/send_leave.rs 2026-04-28 09:16:52 -04:00
Ginger eba38c2fa0 refactor: Fix errors in api/server/send_knock.rs 2026-04-28 09:16:52 -04:00
Ginger 338cdc2a75 refactor: Fix errors in api/server/send_join.rs 2026-04-28 09:16:52 -04:00
Ginger 2dacb8e071 refactor: Fix errors in api/server/query.rs 2026-04-28 09:16:52 -04:00
Ginger 398f73b690 refactor: Fix errors in api/server/publicrooms.rs 2026-04-28 09:16:52 -04:00
Ginger 78d9c29a05 refactor: Fix errors in api/server/media.rs 2026-04-28 09:16:52 -04:00
Ginger 0406f755c2 refactor: Fix errors in api/server/make_leave.rs 2026-04-28 09:16:52 -04:00
Ginger 1827888f09 refactor: Fix errors in api/server/make_knock.rs 2026-04-28 09:16:52 -04:00
Ginger 8871b1f74b refactor: Fix most errors in api/server/make_join.rs 2026-04-28 09:16:52 -04:00
Ginger c7489fd008 refactor: Fix errors in api/server/key.rs 2026-04-28 09:16:52 -04:00
Ginger 7f5f4df64e refactor: Fix errors in api/server/invite.rs 2026-04-28 09:16:52 -04:00
Ginger 15d87c00bf refactor: Fix errors in api/server/get_missing_events.rs 2026-04-28 09:16:52 -04:00
Ginger 7cae42634e refactor: Fix errors in api/server/event.rs 2026-04-28 09:16:52 -04:00
Ginger bd94ec4033 refactor: Fix errors in api/server/event_auth.rs 2026-04-28 09:16:52 -04:00
Ginger db7d378a2e refactor: Fix errors in api/server/backfill.rs 2026-04-28 09:16:52 -04:00
Ginger 39b2e461be refactor: Fix remaining errors in api/cient/message.rs 2026-04-28 09:16:52 -04:00
Ginger ca358438ee refactor: Fix mystery weirdness in api/client/sync/v3/mod.rs 2026-04-28 09:16:52 -04:00
Ginger 4282d60181 refactor: Fix errors in api/client/well_known.rs 2026-04-28 09:16:52 -04:00
Ginger 10dbea72e8 refactor: Fix errors in api/client/voip.rs 2026-04-28 09:16:52 -04:00
Ginger aa7c2ea1ad refactor: Fix errors in api/client/user_directory.rs 2026-04-28 09:16:52 -04:00
Ginger 698d959407 refactor: Fix errors in api/client/unversioned.rs 2026-04-28 09:16:52 -04:00
Ginger 4c831c3531 refactor: Fix errors in api/client/typing.rs 2026-04-28 09:16:52 -04:00
Ginger 4dfdce303f refactor: Fix errors in api/client/to_device.rs 2026-04-28 09:16:52 -04:00
Ginger 8d8c310a64 refactor: Fix errors in api/client/threads.rs 2026-04-28 09:16:52 -04:00
Ginger e50e24e22d refactor: Fix errors in api/client/thirdparty.rs 2026-04-28 09:16:52 -04:00
Ginger a215b63077 refactor: Fix errors in api/client/tag.rs 2026-04-28 09:16:52 -04:00
Ginger 1d39210a0c refactor: Fix errors in api/client/state.rs 2026-04-28 09:16:52 -04:00
Ginger 360e0dada8 refactor: Fix errors in api/client/session.rs 2026-04-28 09:16:52 -04:00
Ginger cbf24a9483 refactor: Fix errors in api/client/send.rs 2026-04-28 09:16:52 -04:00
Ginger 6cb3f909c9 refactor: Fix errors in api/client/search.rs 2026-04-28 09:16:52 -04:00
Ginger b7c9ef89f0 refactor: Fix errors in api/client/report.rs 2026-04-28 09:16:52 -04:00
Ginger 64f7791ddb refactor: Fix errors in api/client/relations.rs 2026-04-28 09:16:52 -04:00
Ginger 836047b54e refactor: Fix errors in api/client/redact.rs 2026-04-28 09:16:52 -04:00
Ginger 256f8f679d refactor: Fix errors in api/client/read_marker.rs 2026-04-28 09:16:52 -04:00
Ginger 154cda35f3 refactor: Fix errors in api/client/push.rs 2026-04-28 09:16:52 -04:00
Ginger 1bf6d2a117 refactor: Fix errors in api/client/profile.rs and api/client/unstable.rs 2026-04-28 09:16:52 -04:00
Ginger 69d33931fa refactor: Fix errors in api/client/presence.rs 2026-04-28 09:16:52 -04:00
Ginger 83902a584b refactor: Fix errors in api/client/openid.rs 2026-04-28 09:16:52 -04:00
Ginger bcff259875 refactor: Fix most errors in api/client/messages.rs 2026-04-28 09:16:52 -04:00
Ginger 496ca80393 refactor: Fix errors in api/client/media.rs 2026-04-28 09:16:52 -04:00
Ginger 34b992fc40 refactor: Fix errors in api/client/media_legacy.rs
Sent from my Steam Deck
2026-04-28 09:16:52 -04:00
Ginger 1ea9330df8 refactor: Fix errors in api/client/keys.rs 2026-04-28 09:16:52 -04:00
Ginger 267e1c5d65 refactor: Fix errors in api/client/directory.rs 2026-04-28 09:16:52 -04:00
Ginger 36285e7784 refactor: Fix errors in api/client/device.rs 2026-04-28 09:16:52 -04:00
Ginger 53ab20d1cd refactor: Fix errors in api/client/dehydrated_device.rs 2026-04-28 09:16:52 -04:00
Ginger 96adf034e6 refactor: Fix errors in api/client/context.rs 2026-04-28 09:16:51 -04:00
Ginger a75bf32a34 refactor: Fix errors in api/client/capabilities.rs 2026-04-28 09:16:51 -04:00
Ginger c89ecd7b63 refactor: Fix errors in api/client/backup.rs 2026-04-28 09:16:51 -04:00
Ginger 7f30f8419b refactor: Fix errors in api/client/appservice.rs 2026-04-28 09:16:51 -04:00
Ginger 0a81f4d629 refactor: Fix errors in api/client/account_data.rs 2026-04-28 09:16:51 -04:00
Ginger 4e456249ac refactor: Fix errors in api/client/sync 2026-04-28 09:16:51 -04:00
Ginger 01e403f05f refactor: Resolve remaining errors in threepid.rs 2026-04-28 09:16:51 -04:00
Ginger a2f6141f4b refactor: Fix errors in api/client/room/ 2026-04-28 09:16:51 -04:00
Ginger 97a01a1500 refactor: Rename PduBuilder to PartialPdu 2026-04-28 09:16:51 -04:00
Ginger bf9c9716eb refactor: Add function to state_accessor to get create event 2026-04-28 09:16:51 -04:00
Ginger 471eb54c66 refactor: Consolidate hierarchy and summary logic in a new service 2026-04-28 09:16:51 -04:00
Ginger 755006c66d refactor: Fix errors in api/client/membership/ 2026-04-28 09:16:51 -04:00
Ginger ccd6072f2d refactor: Fix (most) errors in api/client/account/ 2026-04-28 09:16:51 -04:00
Ginger 24f7e1d658 chore: Clippy fixes 2026-04-28 09:16:51 -04:00
Ginger d62eeda130 refactor: Replace more uses of RoomVersionId with RoomVersionRules 2026-04-28 09:16:51 -04:00
Ginger 3e1f97487f fix: Resolve errors in recently added services 2026-04-28 09:16:51 -04:00
Jade Ellis a4e64383b7 refactor: Ruma upstraming, bake a little more 2026-04-28 09:16:51 -04:00
Ginger 204bc1367e refactor: Ruma upstreaming, half-baked edition
Co-authored-by: Jade Ellis <jade@ellis.link>
2026-04-28 09:16:51 -04:00
Jade Ellis 1cc9dbf2a4 chore: Update lockfile 2026-04-28 09:27:00 +01:00
Renovate Bot 2cf28baf03 chore(deps): update pre-commit hook crate-ci/typos to v1.45.2 2026-04-28 05:03:35 +00:00
timedout f3fb218652 style: Clippy conflicts with cargo fmt, apparently 2026-04-27 22:15:52 +00:00
timedout 0924b7d27e style: Use debug assert instead of a normal assert 2026-04-27 22:15:52 +00:00
timedout 8575f191a0 style: Simplify build_local_dag return 2026-04-27 22:15:52 +00:00
timedout fe7cfd96e7 feat: Assert that no events were dropped during sorting 2026-04-27 22:15:52 +00:00
timedout 8b0e86a05d fix: Don't consider out-of-scope nodes as prev events before sorting incoming events 2026-04-27 22:15:52 +00:00
Jade Ellis 8b8fef998c fix(deps): Enable rustls roots on old rustls 2026-04-27 22:51:21 +01:00
Jade Ellis decd6083a0 fix(deps): Enable a TLS backend for outdated reqwest 2026-04-27 13:10:47 +01:00
Renovate Bot 06184d8c9f chore(deps): update https://github.com/taiki-e/install-action digest to 787505c 2026-04-25 12:21:19 +01:00
Renovate Bot 7c20e22b75 chore(deps): pin https://github.com/dorny/paths-filter action to fbd0ab8 2026-04-25 11:19:09 +00:00
Jade Ellis 3f862b58cb ci: Fix unstable builds for repo packages 2026-04-25 11:33:25 +01:00
Jade Ellis 046a6356f3 ci: Automaticallly upload release binaries 2026-04-25 11:17:43 +01:00
Jade Ellis 3af0240ff5 style: Fix clipy lint 2026-04-25 10:07:17 +01:00
ginger 5dcfff51cf chore: Admin announcement 2026-04-24 20:33:07 +00:00
Ginger b9989f1713 chore: Release 2026-04-24 15:21:47 -04:00
Ginger 1d3e3e7e62 chore: Update changelog 2026-04-24 15:21:40 -04:00
Jade Ellis 0adf3aa956 fix: Revert 7b1aabda9f
Yeah that didn't work sadly.
2026-04-24 16:22:46 +01:00
Jade Ellis 7b1aabda9f feat: Re-enable http3
This required the previous commit, and relies on
the included flag to make fat LTO builds
work correctly.
2026-04-24 14:51:11 +01:00
273 changed files with 6703 additions and 7513 deletions
+1 -1
View File
@@ -71,7 +71,7 @@ runs:
- name: Install timelord-cli and git-warp-time
if: steps.check-binaries.outputs.need-install == 'true'
uses: https://github.com/taiki-e/install-action@74e87cbfa15a59692b158178d8905a61bf6fca95 # v2
uses: https://github.com/taiki-e/install-action@787505cde8a44ea468a00478fe52baf23b15bccd # v2
with:
tool: git-warp-time,timelord-cli@3.0.1
+1 -1
View File
@@ -96,7 +96,7 @@ jobs:
if [[ ${{ forge.ref_name }} =~ ^v+[0-9]\.+[0-9]\.+[0-9]$ ]]; then
# Use the "stable" component for tagged semver releases
COMPONENT="stable"
elif [[ ${{ forge.ref }} =~ ^refs/tags/^v+[0-9]\.+[0-9]\.+[0-9] ]]; then
elif [[ ${{ forge.ref_name }} =~ ^v+[0-9]\.+[0-9]\.+[0-9] ]]; then
# Use the "unstable" component for tagged semver pre-releases
COMPONENT="unstable"
else
+6 -2
View File
@@ -105,7 +105,7 @@ jobs:
RELEASE_SUFFIX=""
TAG_NAME="${{ github.ref_name }}"
# Extract version from tag (remove v prefix if present)
TAG_VERSION=$(echo "$TAG_NAME" | sed 's/^v//')
TAG_VERSION=$(echo "$TAG_NAME" | sed 's/^v//' | tr '-' '~')
# Create spec file with tag version
sed -e "s/^Version:.*$/Version: $TAG_VERSION/" \
@@ -270,9 +270,13 @@ jobs:
# Determine the group based on ref type and branch
if [[ "${{ github.ref }}" == "refs/tags/"* ]]; then
GROUP="stable"
# For tags, extract the tag name for version info
TAG_NAME="${{ github.ref_name }}"
if [[ "$TAG_NAME" == *"-"* ]]; then
GROUP="unstable"
else
GROUP="stable"
fi
elif [ "${{ github.ref_name }}" = "main" ]; then
GROUP="dev"
else
+1 -1
View File
@@ -53,7 +53,7 @@ jobs:
persist-credentials: false
- name: Check for file changes
uses: https://github.com/dorny/paths-filter@v4
uses: https://github.com/dorny/paths-filter@fbd0ab8f3e69293af611ebaee6363fc25e6d187d # v4
id: filter
with:
filters: |
+22
View File
@@ -199,6 +199,28 @@ jobs:
registry_user: ${{ vars.BUILTIN_REGISTRY_USER || github.actor }}
registry_password: ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }}
release-binaries:
name: "Release Binaries"
runs-on: ubuntu-latest
needs:
- build-release
- build-maxperf
permissions:
contents: write
if: startsWith(github.ref, 'refs/tags/')
steps:
- name: Download binary artifacts
uses: forgejo/download-artifact@v4
with:
pattern: conduwuit*
path: binaries
merge-multiple: true
- name: Create Release and Upload
uses: https://github.com/softprops/action-gh-release@b4309332981a82ec1c5618f44dd2e27cc8bfbfda # v3
with:
draft: true
files: binaries/*
mirror_images:
name: "Mirror Images"
runs-on: ubuntu-latest
+1 -1
View File
@@ -24,7 +24,7 @@ repos:
- id: check-added-large-files
- repo: https://github.com/crate-ci/typos
rev: v1.45.1
rev: v1.46.0
hooks:
- id: typos
- id: typos
+17
View File
@@ -1,3 +1,20 @@
# Continuwuity 0.5.8 (2026-04-24)
## Features
- LDAP can now optionally be connected to using StartTLS, and you may unsafely skip verification. Contributed by @getz (#1389)
- Users will now be prevented from removing their email if the server is configured to require an email when registering an account.
## Bugfixes
- Fixed a situation where multiple email addresses could be associated with one user when that user changes their email address.
## Improved Documentation
- Updated config docs to state we support room version 12, and set it as default. Contributed by @ezera. (#1622)
- Improve instructions for generic deployments, removing unnecessary parts and documenting the new initial registration token flow. Contributed by @stratself (#1677)
# Continuwuity v0.5.7 (2026-04-17)
## Features
Generated
+101 -149
View File
@@ -960,7 +960,7 @@ dependencies = [
[[package]]
name = "conduwuit"
version = "0.5.7"
version = "0.5.8"
dependencies = [
"aws-lc-rs",
"clap",
@@ -981,7 +981,7 @@ dependencies = [
"opentelemetry-otlp",
"opentelemetry_sdk",
"parking_lot",
"reqwest 0.13.2",
"reqwest 0.13.3",
"rustls",
"sentry",
"sentry-tower",
@@ -997,8 +997,9 @@ dependencies = [
[[package]]
name = "conduwuit_admin"
version = "0.5.7"
version = "0.5.8"
dependencies = [
"assign",
"clap",
"conduwuit_api",
"conduwuit_core",
@@ -1020,8 +1021,9 @@ dependencies = [
[[package]]
name = "conduwuit_api"
version = "0.5.7"
version = "0.5.8"
dependencies = [
"assign",
"async-trait",
"axum",
"axum-client-ip",
@@ -1043,10 +1045,11 @@ dependencies = [
"lettre",
"log",
"rand 0.10.1",
"reqwest 0.13.2",
"reqwest 0.13.3",
"ruma",
"ruminuwuity",
"serde",
"serde_html_form 0.4.0",
"serde_html_form",
"serde_json",
"sha1 0.11.0",
"tokio",
@@ -1055,7 +1058,7 @@ dependencies = [
[[package]]
name = "conduwuit_build_metadata"
version = "0.5.7"
version = "0.5.8"
dependencies = [
"built",
"cargo_metadata",
@@ -1063,10 +1066,11 @@ dependencies = [
[[package]]
name = "conduwuit_core"
version = "0.5.7"
version = "0.5.8"
dependencies = [
"argon2",
"arrayvec",
"assign",
"axum",
"axum-extra",
"bytes",
@@ -1102,7 +1106,7 @@ dependencies = [
"rand 0.10.1",
"rand_core 0.6.4",
"regex",
"reqwest 0.13.2",
"reqwest 0.13.3",
"ruma",
"sanitize-filename",
"serde",
@@ -1127,7 +1131,7 @@ dependencies = [
[[package]]
name = "conduwuit_database"
version = "0.5.7"
version = "0.5.8"
dependencies = [
"async-channel",
"conduwuit_core",
@@ -1147,7 +1151,7 @@ dependencies = [
[[package]]
name = "conduwuit_macros"
version = "0.5.7"
version = "0.5.8"
dependencies = [
"cargo_toml",
"itertools 0.14.0",
@@ -1158,8 +1162,9 @@ dependencies = [
[[package]]
name = "conduwuit_router"
version = "0.5.7"
version = "0.5.8"
dependencies = [
"assign",
"axum",
"axum-client-ip",
"axum-server",
@@ -1194,9 +1199,10 @@ dependencies = [
[[package]]
name = "conduwuit_service"
version = "0.5.7"
version = "0.5.8"
dependencies = [
"askama",
"assign",
"async-trait",
"base64 0.22.1",
"blurhash",
@@ -1223,8 +1229,10 @@ dependencies = [
"rand 0.10.1",
"recaptcha-verify",
"regex",
"reqwest 0.13.2",
"reqwest 0.12.28",
"reqwest 0.13.3",
"ruma",
"ruminuwuity",
"rustyline-async",
"sd-notify",
"serde",
@@ -1241,9 +1249,10 @@ dependencies = [
[[package]]
name = "conduwuit_web"
version = "0.5.7"
version = "0.5.8"
dependencies = [
"askama",
"assign",
"async-trait",
"axum",
"axum-extra",
@@ -1324,16 +1333,6 @@ dependencies = [
"typewit",
]
[[package]]
name = "continuwuity-admin-api"
version = "0.1.0"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d00b51a8669b21689c4eb47fb81f3a8b27c3e371#d00b51a8669b21689c4eb47fb81f3a8b27c3e371"
dependencies = [
"ruma-common",
"serde",
"serde_json",
]
[[package]]
name = "convert_case"
version = "0.10.0"
@@ -1780,16 +1779,6 @@ dependencies = [
"litrs",
]
[[package]]
name = "draupnir-antispam"
version = "0.1.0"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d00b51a8669b21689c4eb47fb81f3a8b27c3e371#d00b51a8669b21689c4eb47fb81f3a8b27c3e371"
dependencies = [
"ruma-common",
"serde",
"serde_json",
]
[[package]]
name = "dtor"
version = "0.8.1"
@@ -1829,7 +1818,6 @@ checksum = "70e796c081cee67dc755e1a36a0a172b897fab85fc3f6bc48307991f64e4eca9"
dependencies = [
"curve25519-dalek",
"ed25519",
"rand_core 0.6.4",
"serde",
"sha2 0.10.9",
"subtle",
@@ -2617,6 +2605,7 @@ dependencies = [
"hyper",
"hyper-util",
"rustls",
"rustls-native-certs",
"tokio",
"tokio-rustls",
"tower-service",
@@ -2985,30 +2974,20 @@ dependencies = [
[[package]]
name = "js_option"
version = "0.1.1"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "68421373957a1593a767013698dbf206e2b221eefe97a44d98d18672ff38423c"
checksum = "c7dd3e281add16813cf673bf74a32249b0aa0d1c8117519a17b3ada5e8552b3c"
dependencies = [
"serde",
"serde_core",
]
[[package]]
name = "konst"
version = "0.3.17"
version = "0.4.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "97feab15b395d1860944abe6a8dd8ed9f8eadfae01750fada8427abda531d887"
checksum = "f660d5f887e3562f9ab6f4a14988795b694099d66b4f5dedc02d197ba9becb1d"
dependencies = [
"const_panic",
"konst_kernel",
"typewit",
]
[[package]]
name = "konst_kernel"
version = "0.3.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e4b1eb7788f3824c629b1116a7a9060d6e898c358ebff59070093d51103dcc3c"
dependencies = [
"typewit",
]
@@ -3339,16 +3318,6 @@ dependencies = [
"walkdir",
]
[[package]]
name = "meowlnir-antispam"
version = "0.1.0"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d00b51a8669b21689c4eb47fb81f3a8b27c3e371#d00b51a8669b21689c4eb47fb81f3a8b27c3e371"
dependencies = [
"ruma-common",
"serde",
"serde_json",
]
[[package]]
name = "mime"
version = "0.3.17"
@@ -4572,16 +4541,21 @@ dependencies = [
"http-body",
"http-body-util",
"hyper",
"hyper-rustls",
"hyper-util",
"js-sys",
"log",
"percent-encoding",
"pin-project-lite",
"rustls",
"rustls-native-certs",
"rustls-pki-types",
"serde",
"serde_json",
"serde_urlencoded",
"sync_wrapper",
"tokio",
"tokio-rustls",
"tower",
"tower-http",
"tower-service",
@@ -4593,9 +4567,9 @@ dependencies = [
[[package]]
name = "reqwest"
version = "0.13.2"
version = "0.13.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ab3f43e3283ab1488b624b44b0e988d0acea0b3214e694730a055cb6b2efa801"
checksum = "62e0021ea2c22aed41653bc7e1419abb2c97e038ff2c33d0e1309e49a97deec0"
dependencies = [
"base64 0.22.1",
"bytes",
@@ -4665,31 +4639,27 @@ dependencies = [
[[package]]
name = "ruma"
version = "0.10.1"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d00b51a8669b21689c4eb47fb81f3a8b27c3e371#d00b51a8669b21689c4eb47fb81f3a8b27c3e371"
version = "0.15.1"
source = "git+https://github.com/ruma/ruma.git?rev=5742fec0021b85fedbf5cd1f59c50a00bb5b9f7c#5742fec0021b85fedbf5cd1f59c50a00bb5b9f7c"
dependencies = [
"assign",
"continuwuity-admin-api",
"draupnir-antispam",
"js_int",
"js_option",
"meowlnir-antispam",
"ruma-appservice-api",
"ruma-client-api",
"ruma-common",
"ruma-events",
"ruma-federation-api",
"ruma-identifiers-validation",
"ruma-identity-service-api",
"ruma-push-gateway-api",
"ruma-signatures",
"ruma-state-res",
"web-time",
]
[[package]]
name = "ruma-appservice-api"
version = "0.10.0"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d00b51a8669b21689c4eb47fb81f3a8b27c3e371#d00b51a8669b21689c4eb47fb81f3a8b27c3e371"
version = "0.15.0"
source = "git+https://github.com/ruma/ruma.git?rev=5742fec0021b85fedbf5cd1f59c50a00bb5b9f7c#5742fec0021b85fedbf5cd1f59c50a00bb5b9f7c"
dependencies = [
"js_int",
"ruma-common",
@@ -4700,13 +4670,12 @@ dependencies = [
[[package]]
name = "ruma-client-api"
version = "0.18.0"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d00b51a8669b21689c4eb47fb81f3a8b27c3e371#d00b51a8669b21689c4eb47fb81f3a8b27c3e371"
version = "0.23.1"
source = "git+https://github.com/ruma/ruma.git?rev=5742fec0021b85fedbf5cd1f59c50a00bb5b9f7c#5742fec0021b85fedbf5cd1f59c50a00bb5b9f7c"
dependencies = [
"as_variant",
"assign",
"bytes",
"date_header",
"http",
"js_int",
"js_option",
@@ -4714,7 +4683,7 @@ dependencies = [
"ruma-common",
"ruma-events",
"serde",
"serde_html_form 0.2.8",
"serde_html_form",
"serde_json",
"thiserror 2.0.18",
"url",
@@ -4723,14 +4692,15 @@ dependencies = [
[[package]]
name = "ruma-common"
version = "0.13.0"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d00b51a8669b21689c4eb47fb81f3a8b27c3e371#d00b51a8669b21689c4eb47fb81f3a8b27c3e371"
version = "0.18.0"
source = "git+https://github.com/ruma/ruma.git?rev=5742fec0021b85fedbf5cd1f59c50a00bb5b9f7c#5742fec0021b85fedbf5cd1f59c50a00bb5b9f7c"
dependencies = [
"as_variant",
"base64 0.22.1",
"bytes",
"date_header",
"form_urlencoded",
"getrandom 0.2.17",
"getrandom 0.4.2",
"http",
"indexmap",
"js_int",
@@ -4741,9 +4711,8 @@ dependencies = [
"ruma-identifiers-validation",
"ruma-macros",
"serde",
"serde_html_form 0.2.8",
"serde_html_form",
"serde_json",
"smallvec",
"thiserror 2.0.18",
"time",
"tracing",
@@ -4751,37 +4720,34 @@ dependencies = [
"uuid",
"web-time",
"wildmatch",
"zeroize",
]
[[package]]
name = "ruma-events"
version = "0.28.1"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d00b51a8669b21689c4eb47fb81f3a8b27c3e371#d00b51a8669b21689c4eb47fb81f3a8b27c3e371"
version = "0.33.0"
source = "git+https://github.com/ruma/ruma.git?rev=5742fec0021b85fedbf5cd1f59c50a00bb5b9f7c#5742fec0021b85fedbf5cd1f59c50a00bb5b9f7c"
dependencies = [
"as_variant",
"indexmap",
"js_int",
"js_option",
"percent-encoding",
"pulldown-cmark",
"regex",
"ruma-common",
"ruma-identifiers-validation",
"ruma-macros",
"serde",
"serde_json",
"smallvec",
"thiserror 2.0.18",
"tracing",
"url",
"web-time",
"wildmatch",
"zeroize",
]
[[package]]
name = "ruma-federation-api"
version = "0.9.0"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d00b51a8669b21689c4eb47fb81f3a8b27c3e371#d00b51a8669b21689c4eb47fb81f3a8b27c3e371"
version = "0.14.0"
source = "git+https://github.com/ruma/ruma.git?rev=5742fec0021b85fedbf5cd1f59c50a00bb5b9f7c#5742fec0021b85fedbf5cd1f59c50a00bb5b9f7c"
dependencies = [
"bytes",
"headers",
@@ -4794,6 +4760,7 @@ dependencies = [
"rand 0.10.1",
"ruma-common",
"ruma-events",
"ruma-signatures",
"serde",
"serde_json",
"thiserror 2.0.18",
@@ -4802,28 +4769,19 @@ dependencies = [
[[package]]
name = "ruma-identifiers-validation"
version = "0.9.5"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d00b51a8669b21689c4eb47fb81f3a8b27c3e371#d00b51a8669b21689c4eb47fb81f3a8b27c3e371"
version = "0.12.1"
source = "git+https://github.com/ruma/ruma.git?rev=5742fec0021b85fedbf5cd1f59c50a00bb5b9f7c#5742fec0021b85fedbf5cd1f59c50a00bb5b9f7c"
dependencies = [
"js_int",
"thiserror 2.0.18",
]
[[package]]
name = "ruma-identity-service-api"
version = "0.9.0"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d00b51a8669b21689c4eb47fb81f3a8b27c3e371#d00b51a8669b21689c4eb47fb81f3a8b27c3e371"
dependencies = [
"js_int",
"ruma-common",
"serde",
]
[[package]]
name = "ruma-macros"
version = "0.13.0"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d00b51a8669b21689c4eb47fb81f3a8b27c3e371#d00b51a8669b21689c4eb47fb81f3a8b27c3e371"
version = "0.18.0"
source = "git+https://github.com/ruma/ruma.git?rev=5742fec0021b85fedbf5cd1f59c50a00bb5b9f7c#5742fec0021b85fedbf5cd1f59c50a00bb5b9f7c"
dependencies = [
"as_variant",
"cfg-if",
"proc-macro-crate",
"proc-macro2",
@@ -4831,13 +4789,13 @@ dependencies = [
"ruma-identifiers-validation",
"serde",
"syn",
"toml 0.8.23",
"toml 1.1.2+spec-1.1.0",
]
[[package]]
name = "ruma-push-gateway-api"
version = "0.9.0"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d00b51a8669b21689c4eb47fb81f3a8b27c3e371#d00b51a8669b21689c4eb47fb81f3a8b27c3e371"
version = "0.14.0"
source = "git+https://github.com/ruma/ruma.git?rev=5742fec0021b85fedbf5cd1f59c50a00bb5b9f7c#5742fec0021b85fedbf5cd1f59c50a00bb5b9f7c"
dependencies = [
"js_int",
"ruma-common",
@@ -4848,21 +4806,46 @@ dependencies = [
[[package]]
name = "ruma-signatures"
version = "0.15.0"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d00b51a8669b21689c4eb47fb81f3a8b27c3e371#d00b51a8669b21689c4eb47fb81f3a8b27c3e371"
version = "0.20.0"
source = "git+https://github.com/ruma/ruma.git?rev=5742fec0021b85fedbf5cd1f59c50a00bb5b9f7c#5742fec0021b85fedbf5cd1f59c50a00bb5b9f7c"
dependencies = [
"base64 0.22.1",
"ed25519-dalek",
"memchr",
"pkcs8",
"rand 0.10.1",
"rand_core 0.6.4",
"ruma-common",
"serde_json",
"sha2 0.10.9",
"subslice",
"thiserror 2.0.18",
]
[[package]]
name = "ruma-state-res"
version = "0.16.0"
source = "git+https://github.com/ruma/ruma.git?rev=5742fec0021b85fedbf5cd1f59c50a00bb5b9f7c#5742fec0021b85fedbf5cd1f59c50a00bb5b9f7c"
dependencies = [
"js_int",
"ruma-common",
"ruma-events",
"ruma-signatures",
"serde",
"serde_json",
"thiserror 2.0.18",
"tracing",
]
[[package]]
name = "ruminuwuity"
version = "0.5.8"
dependencies = [
"assign",
"ruma",
"serde",
"serde_json",
"wildmatch",
]
[[package]]
name = "rust-librocksdb-sys"
version = "0.42.0+10.10.1"
@@ -4935,9 +4918,9 @@ dependencies = [
[[package]]
name = "rustls"
version = "0.23.39"
version = "0.23.40"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7c2c118cb077cca2822033836dfb1b975355dfb784b5e8da48f7b6c5db74e60e"
checksum = "ef86cd5876211988985292b91c96a8f2d298df24e75989a43a3c73f2d4d8168b"
dependencies = [
"aws-lc-rs",
"log",
@@ -5130,7 +5113,7 @@ checksum = "eb25f439f97d26fea01d717fa626167ceffcd981addaa670001e70505b72acbb"
dependencies = [
"cfg_aliases",
"httpdate",
"reqwest 0.13.2",
"reqwest 0.13.3",
"sentry-backtrace",
"sentry-contexts",
"sentry-core",
@@ -5267,13 +5250,13 @@ dependencies = [
[[package]]
name = "serde-saphyr"
version = "0.0.24"
version = "0.0.25"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f83ad47c2f14654528a89495f8d0dbc64173176f8512c7c72386cbe81009f661"
checksum = "75e214449d107a81daf1453eb46c9314457660509534883e82db6faca2034a8a"
dependencies = [
"ahash",
"annotate-snippets",
"base64 0.22.1",
"base64 0.21.7",
"encoding_rs_io",
"getrandom 0.3.4",
"nohash-hasher",
@@ -5304,19 +5287,6 @@ dependencies = [
"syn",
]
[[package]]
name = "serde_html_form"
version = "0.2.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b2f2d7ff8a2140333718bb329f5c40fc5f0865b84c426183ce14c97d2ab8154f"
dependencies = [
"form_urlencoded",
"indexmap",
"itoa",
"ryu",
"serde_core",
]
[[package]]
name = "serde_html_form"
version = "0.4.0"
@@ -5624,15 +5594,6 @@ version = "0.11.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f"
[[package]]
name = "subslice"
version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e0a8e4809a3bb02de01f1f7faf1ba01a83af9e8eabcd4d31dd6e413d14d56aae"
dependencies = [
"memchr",
]
[[package]]
name = "subtle"
version = "2.6.1"
@@ -6272,15 +6233,6 @@ name = "typewit"
version = "1.15.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "214ca0b2191785cbc06209b9ca1861e048e39b5ba33574b3cedd58363d5bb5f6"
dependencies = [
"typewit_proc_macros",
]
[[package]]
name = "typewit_proc_macros"
version = "1.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e36a83ea2b3c704935a01b4642946aadd445cea40b10935e3f8bd8052b8193d6"
[[package]]
name = "uname"
@@ -7010,7 +6962,7 @@ dependencies = [
[[package]]
name = "xtask"
version = "0.5.7"
version = "0.5.8"
dependencies = [
"askama",
"cargo_metadata",
+29 -26
View File
@@ -12,7 +12,7 @@ license = "Apache-2.0"
# See also `rust-toolchain.toml`
readme = "README.md"
repository = "https://forgejo.ellis.link/continuwuation/continuwuity"
version = "0.5.7"
version = "0.5.8"
[workspace.metadata.crane]
name = "conduwuit"
@@ -68,7 +68,7 @@ default-features = false
version = "0.1.3"
[workspace.dependencies.rand]
version = "0.10.0"
version = "0.10.1"
# Used for the http request / response body type for Ruma endpoints used with reqwest
[workspace.dependencies.bytes]
@@ -161,7 +161,7 @@ features = ["raw_value"]
# Used for appservice registration files
[workspace.dependencies.serde-saphyr]
version = "0.0.24"
version = "0.0.25"
# Used to load forbidden room/user regex from config
[workspace.dependencies.serde_regex]
@@ -342,51 +342,50 @@ version = "0.1.88"
[workspace.dependencies.lru-cache]
version = "0.1.2"
[workspace.dependencies.assign]
version = "1.1.1"
# Used for matrix spec type definitions and helpers
[workspace.dependencies.ruma]
git = "https://forgejo.ellis.link/continuwuation/ruwuma"
#branch = "conduwuit-changes"
rev = "d00b51a8669b21689c4eb47fb81f3a8b27c3e371"
# version = "0.14.1"
git = "https://github.com/ruma/ruma.git"
rev = "5742fec0021b85fedbf5cd1f59c50a00bb5b9f7c"
features = [
"compat",
"rand",
"appservice-api-c",
"client-api",
"federation-api",
"markdown",
"push-gateway-api-c",
"unstable-exhaustive-types",
"state-res",
"rand",
"markdown",
"ring-compat",
"compat-upload-signatures",
"identifiers-validation",
"unstable-unspecified",
"compat-optional-txn-pdus",
"unstable-msc2448",
"unstable-msc2666",
"unstable-msc2867",
"unstable-msc2870",
"unstable-msc3026",
"unstable-msc3061",
"unstable-msc3814",
"unstable-msc3245",
"unstable-msc3266",
"unstable-msc3381", # polls
"unstable-msc3489", # beacon / live location
"unstable-msc3575",
"unstable-msc3930", # polls push rules
"unstable-msc3381",
"unstable-msc3489",
"unstable-msc3930",
"unstable-msc4075",
"unstable-msc4095",
"unstable-msc4121",
"unstable-msc4125",
"unstable-msc4155",
"unstable-msc4186",
"unstable-msc4203", # sending to-device events to appservices
"unstable-msc4210", # remove legacy mentions
"unstable-msc4195",
"unstable-msc4203",
"unstable-msc4310",
"unstable-msc4373",
"unstable-msc4380",
"unstable-msc4143",
"unstable-msc4293",
"unstable-msc4406",
"unstable-msc4439",
"unstable-extensible-events",
"unstable-pdu",
"unstable-msc4155",
"unstable-msc4143", # livekit well_known response
"unstable-msc4284",
"unstable-msc4439", # pgp_key in .well_known/matrix/support
]
[workspace.dependencies.rust-rocksdb]
@@ -658,6 +657,10 @@ default-features = false
package = "conduwuit"
path = "src/main"
[workspace.dependencies.ruminuwuity]
package = "ruminuwuity"
path = "src/ruminuwuity"
###############################################################################
#
# Release profiles
-1
View File
@@ -1 +0,0 @@
Users will now be prevented from removing their email if the server is configured to require an email when registering an account.
+1
View File
@@ -0,0 +1 @@
The invite recipient's membership event is now included in invite stripped state, which should fix flaky invite display in some clients. Contributed by @ginger
+1
View File
@@ -0,0 +1 @@
Switched from Continuwuity's fork of Ruma back to upstream Ruma. Contributed by @ginger.
-1
View File
@@ -1 +0,0 @@
Fixed a situation where multiple email addresses could be associated with one user when that user changes their email address.
-1
View File
@@ -1 +0,0 @@
LDAP can now optionally be connected to using StartTLS, and you may unsafely skip verification. Contributed by @getz
-1
View File
@@ -1 +0,0 @@
Updated config docs to state we support room version 12, and set it as default. Contributed by @ezera.
-1
View File
@@ -1 +0,0 @@
Improve instructions for generic deployments, removing unnecessary parts and documenting the new initial registration token flow. Contributed by @stratself
+1
View File
@@ -0,0 +1 @@
Fixed a bug that caused the server to drop events during processing if several events for the same room were sent in a singular transaction. Contributed by @nex.
-12
View File
@@ -573,18 +573,6 @@
#
#allow_public_room_directory_over_federation = false
# Allow guests/unauthenticated users to access TURN credentials.
#
# This is the equivalent of Synapse's `turn_allow_guests` config option.
# This allows any unauthenticated user to call the endpoint
# `/_matrix/client/v3/voip/turnServer`.
#
# It is unlikely you need to enable this as all major clients support
# authentication for this endpoint and prevents misuse of your TURN server
# from potential bots.
#
#turn_allow_guests = false
# Set this to true to lock down your server's public room directory and
# only allow admins to publish rooms to the room directory. Unpublishing
# is still allowed by all users with this enabled.
+2
View File
@@ -17,12 +17,14 @@ ARG LLVM_VERSION=21
# Line one: compiler tools
# Line two: curl, for downloading binaries and wget because llvm.sh is broken with curl
# Line three: for xx-verify
# golang, cmake: For aws-lc-rs bindgen
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update && apt-get install -y \
pkg-config make jq \
wget curl git software-properties-common \
file
# golang cmake
# LLVM packages
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
-2
View File
@@ -81,8 +81,6 @@ ## List of forked dependencies
All forked dependencies are maintained under the
[continuwuation organization on Forgejo](https://forgejo.ellis.link/continuwuation):
- [ruwuma][continuwuation-ruwuma] - Fork of [ruma/ruma][ruma] with various
performance improvements, more features and better client/server interop
- [rocksdb][continuwuation-rocksdb] - Fork of [facebook/rocksdb][rocksdb] via
[`@zaidoon1`][8] with liburing build fixes and GCC debug build fixes
- [jemallocator][continuwuation-jemallocator] - Fork of
@@ -6,10 +6,10 @@
"message": "Welcome to Continuwuity! Important announcements about the project will appear here."
},
{
"id": 11,
"id": 12,
"mention_room": false,
"date": "2026-04-17",
"message": "[v0.5.7](https://forgejo.ellis.link/continuwuation/continuwuity/releases/tag/v0.5.7) is out! Email verification! Terms and Conditions! Deleting notification pushers! So much good stuff. Go grab the release and read the changelog!"
"date": "2026-04-24",
"message": "[v0.5.8](https://forgejo.ellis.link/continuwuation/continuwuity/releases/tag/v0.5.8) is out! This is a patch release which fixes a bug in 0.5.7's email support -- upgrade soon if you use that feature."
}
]
}
+94 -115
View File
@@ -16,26 +16,24 @@
}
},
"node_modules/@emnapi/core": {
"version": "1.9.2",
"resolved": "https://registry.npmjs.org/@emnapi/core/-/core-1.9.2.tgz",
"integrity": "sha512-UC+ZhH3XtczQYfOlu3lNEkdW/p4dsJ1r/bP7H8+rhao3TTTMO1ATq/4DdIi23XuGoFY+Cz0JmCbdVl0hz9jZcA==",
"version": "1.10.0",
"resolved": "https://registry.npmjs.org/@emnapi/core/-/core-1.10.0.tgz",
"integrity": "sha512-yq6OkJ4p82CAfPl0u9mQebQHKPJkY7WrIuk205cTYnYe+k2Z8YBh11FrbRG/H6ihirqcacOgl2BIO8oyMQLeXw==",
"dev": true,
"license": "MIT",
"optional": true,
"peer": true,
"dependencies": {
"@emnapi/wasi-threads": "1.2.1",
"tslib": "^2.4.0"
}
},
"node_modules/@emnapi/runtime": {
"version": "1.9.2",
"resolved": "https://registry.npmjs.org/@emnapi/runtime/-/runtime-1.9.2.tgz",
"integrity": "sha512-3U4+MIWHImeyu1wnmVygh5WlgfYDtyf0k8AbLhMFxOipihf6nrWC4syIm/SwEeec0mNSafiiNnMJwbza/Is6Lw==",
"version": "1.10.0",
"resolved": "https://registry.npmjs.org/@emnapi/runtime/-/runtime-1.10.0.tgz",
"integrity": "sha512-ewvYlk86xUoGI0zQRNq/mC+16R1QeDlKQy21Ki3oSYXNgLb45GV1P6A0M+/s6nyCuNDqe5VpaY84BzXGwVbwFA==",
"dev": true,
"license": "MIT",
"optional": true,
"peer": true,
"dependencies": {
"tslib": "^2.4.0"
}
@@ -47,7 +45,6 @@
"dev": true,
"license": "MIT",
"optional": true,
"peer": true,
"dependencies": {
"tslib": "^2.4.0"
}
@@ -109,9 +106,9 @@
}
},
"node_modules/@napi-rs/wasm-runtime": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/@napi-rs/wasm-runtime/-/wasm-runtime-1.1.2.tgz",
"integrity": "sha512-sNXv5oLJ7ob93xkZ1XnxisYhGYXfaG9f65/ZgYuAu3qt7b3NadcOEhLvx28hv31PgX8SZJRYrAIPQilQmFpLVw==",
"version": "1.1.4",
"resolved": "https://registry.npmjs.org/@napi-rs/wasm-runtime/-/wasm-runtime-1.1.4.tgz",
"integrity": "sha512-3NQNNgA1YSlJb/kMH1ildASP9HW7/7kYnRI2szWJaofaS1hWmbGI4H+d3+22aGzXXN9IJ+n+GiFVcGipJP18ow==",
"dev": true,
"license": "MIT",
"optional": true,
@@ -128,13 +125,13 @@
}
},
"node_modules/@rsbuild/core": {
"version": "2.0.0-rc.1",
"resolved": "https://registry.npmjs.org/@rsbuild/core/-/core-2.0.0-rc.1.tgz",
"integrity": "sha512-eqxtRlQiFSm/ibCNGiPj8ozsGSNK91NY+GksmPuTCPmWQExGtPqM1V+s13UYeWZS6fYbMRs7NlQKD896e0QkKA==",
"version": "2.0.3",
"resolved": "https://registry.npmjs.org/@rsbuild/core/-/core-2.0.3.tgz",
"integrity": "sha512-2myp7jUgGen50saxW8OJD/eMVKp7HnuBN5MUzwRb6mDbRZZVpoorfI4LQqiGSBNjGLB6jltvx/R2yHmcmnchwg==",
"dev": true,
"license": "MIT",
"dependencies": {
"@rspack/core": "2.0.0-rc.1",
"@rspack/core": "~2.0.1",
"@swc/helpers": "^0.5.21"
},
"bin": {
@@ -153,17 +150,17 @@
}
},
"node_modules/@rsbuild/plugin-react": {
"version": "1.4.6",
"resolved": "https://registry.npmjs.org/@rsbuild/plugin-react/-/plugin-react-1.4.6.tgz",
"integrity": "sha512-LAT6xHlEyZKA0VjF/ph5d50iyG+WSmBx+7g98HNZUwb94VeeTMZFB8qVptTkbIRMss3BNKOXmHOu71Lhsh9oEw==",
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/@rsbuild/plugin-react/-/plugin-react-2.0.0.tgz",
"integrity": "sha512-/1gzt39EGUSFEqB83g46QoOwsgv172HI18i6au1b6lgIaX4sv9stuX4ijdHbHCp8PqYEq+MyQ99jIQMO6I+etg==",
"dev": true,
"license": "MIT",
"dependencies": {
"@rspack/plugin-react-refresh": "^1.6.1",
"@rspack/plugin-react-refresh": "2.0.0",
"react-refresh": "^0.18.0"
},
"peerDependencies": {
"@rsbuild/core": "^1.0.0 || ^2.0.0-0"
"@rsbuild/core": "^2.0.0-0"
},
"peerDependenciesMeta": {
"@rsbuild/core": {
@@ -172,28 +169,28 @@
}
},
"node_modules/@rspack/binding": {
"version": "2.0.0-rc.1",
"resolved": "https://registry.npmjs.org/@rspack/binding/-/binding-2.0.0-rc.1.tgz",
"integrity": "sha512-rhJqtbyiRPOjTAZW0xTZFbOrS5yP5yL1SF0DPE9kvFfzePz30IqjMDMxL0KuhkDZd/M1eUINJyoqd8NTbR9wHw==",
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/@rspack/binding/-/binding-2.0.1.tgz",
"integrity": "sha512-ynV1gw4KqFtQ0P+ZZh76SUj49wBb2FuHW3zSmHverHWuxBhzvrZS6/dZ+fCFQG8bTTPtrPz0RQUTN3uEDbPVBQ==",
"dev": true,
"license": "MIT",
"optionalDependencies": {
"@rspack/binding-darwin-arm64": "2.0.0-rc.1",
"@rspack/binding-darwin-x64": "2.0.0-rc.1",
"@rspack/binding-linux-arm64-gnu": "2.0.0-rc.1",
"@rspack/binding-linux-arm64-musl": "2.0.0-rc.1",
"@rspack/binding-linux-x64-gnu": "2.0.0-rc.1",
"@rspack/binding-linux-x64-musl": "2.0.0-rc.1",
"@rspack/binding-wasm32-wasi": "2.0.0-rc.1",
"@rspack/binding-win32-arm64-msvc": "2.0.0-rc.1",
"@rspack/binding-win32-ia32-msvc": "2.0.0-rc.1",
"@rspack/binding-win32-x64-msvc": "2.0.0-rc.1"
"@rspack/binding-darwin-arm64": "2.0.1",
"@rspack/binding-darwin-x64": "2.0.1",
"@rspack/binding-linux-arm64-gnu": "2.0.1",
"@rspack/binding-linux-arm64-musl": "2.0.1",
"@rspack/binding-linux-x64-gnu": "2.0.1",
"@rspack/binding-linux-x64-musl": "2.0.1",
"@rspack/binding-wasm32-wasi": "2.0.1",
"@rspack/binding-win32-arm64-msvc": "2.0.1",
"@rspack/binding-win32-ia32-msvc": "2.0.1",
"@rspack/binding-win32-x64-msvc": "2.0.1"
}
},
"node_modules/@rspack/binding-darwin-arm64": {
"version": "2.0.0-rc.1",
"resolved": "https://registry.npmjs.org/@rspack/binding-darwin-arm64/-/binding-darwin-arm64-2.0.0-rc.1.tgz",
"integrity": "sha512-fYbeDDDg6QKZzXYt/J0/j0Qhr01wQLuISUsYnNhu5MLwdXVUSVcqz+CTqgF3d0EQVVn6FqLV63lbNRzUGfSq9g==",
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/@rspack/binding-darwin-arm64/-/binding-darwin-arm64-2.0.1.tgz",
"integrity": "sha512-CGFO5zmajD1Itch1lxAI7+gvKiagzyqXopHv/jHG9Su2WWQ2/Nhn2/rkSpdp6ptE9ri6+6tCOOahf099/v/Xog==",
"cpu": [
"arm64"
],
@@ -205,9 +202,9 @@
]
},
"node_modules/@rspack/binding-darwin-x64": {
"version": "2.0.0-rc.1",
"resolved": "https://registry.npmjs.org/@rspack/binding-darwin-x64/-/binding-darwin-x64-2.0.0-rc.1.tgz",
"integrity": "sha512-MvXi9kr8xXn1y0PD1WI/4YphRNOdbykJjKdEsAG4JxEVoERmhIHOTwKvUqlejajizAwlVZcxQl/FacoPLsKN5Q==",
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/@rspack/binding-darwin-x64/-/binding-darwin-x64-2.0.1.tgz",
"integrity": "sha512-2vvBNBoS09/PurupBwSrlTZd8283o00B8v20ncsNUdEff41uCR/hzIrYoTIVWnVST+Gt5O1+cfcfORp397lajg==",
"cpu": [
"x64"
],
@@ -219,9 +216,9 @@
]
},
"node_modules/@rspack/binding-linux-arm64-gnu": {
"version": "2.0.0-rc.1",
"resolved": "https://registry.npmjs.org/@rspack/binding-linux-arm64-gnu/-/binding-linux-arm64-gnu-2.0.0-rc.1.tgz",
"integrity": "sha512-j6WsHEwGSdUoiy4BsQBW0RjFl+MBzozdybSYhkiyVSoHlbm7CPt3XaaS3elH5YcwuLHORmVHPP91QhwWl9UFJg==",
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/@rspack/binding-linux-arm64-gnu/-/binding-linux-arm64-gnu-2.0.1.tgz",
"integrity": "sha512-uvNXk6ahE3AH3h2avnd1Mgno68YQpS4cfX1OkOGWIC/roL+NrOP2XVXV4yfVAoydPALDO7AfbIfN0QdmBK3rsA==",
"cpu": [
"arm64"
],
@@ -236,9 +233,9 @@
]
},
"node_modules/@rspack/binding-linux-arm64-musl": {
"version": "2.0.0-rc.1",
"resolved": "https://registry.npmjs.org/@rspack/binding-linux-arm64-musl/-/binding-linux-arm64-musl-2.0.0-rc.1.tgz",
"integrity": "sha512-MPoZE0aS8oH+Wr0R5tIYch8gbUwYYf4LsiGdP6enMKMTrmpJyOVGlhPHVSwsrFgBg7fjTGOuxHuibtsvDUdLOQ==",
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/@rspack/binding-linux-arm64-musl/-/binding-linux-arm64-musl-2.0.1.tgz",
"integrity": "sha512-S/a6uN9PiZ5O/PjSqyIXhuRC1lVzeJkJV69NeLk5sIEUiDQ/aQGZG97uN+tluwpbo1tPbLJkdHYETfjspOX4Pg==",
"cpu": [
"arm64"
],
@@ -253,9 +250,9 @@
]
},
"node_modules/@rspack/binding-linux-x64-gnu": {
"version": "2.0.0-rc.1",
"resolved": "https://registry.npmjs.org/@rspack/binding-linux-x64-gnu/-/binding-linux-x64-gnu-2.0.0-rc.1.tgz",
"integrity": "sha512-gOlPCwtIg9GsFG/8ZdUyV5SyXDaGq2kmtXmyyFU7RO33MaalltNEBMf2hevRPj9z39eSzxwgJDonMOdx5Fo0Og==",
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/@rspack/binding-linux-x64-gnu/-/binding-linux-x64-gnu-2.0.1.tgz",
"integrity": "sha512-C13Kk0OkZiocZVj187Sf753UH6pDXnuEu6vzUvi3qv9ltibG1ki0H2Y8isXBYL2cHQOV+hk0g1S6/4z3TTB97A==",
"cpu": [
"x64"
],
@@ -270,9 +267,9 @@
]
},
"node_modules/@rspack/binding-linux-x64-musl": {
"version": "2.0.0-rc.1",
"resolved": "https://registry.npmjs.org/@rspack/binding-linux-x64-musl/-/binding-linux-x64-musl-2.0.0-rc.1.tgz",
"integrity": "sha512-K6Swk1rfP4z4b6bp84NlikGlUWMOPpIWCtlPr/W0TWgc2C/cd844oHdoIu7WtmOH7y9AwB5UG2bWpgFAVwykCw==",
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/@rspack/binding-linux-x64-musl/-/binding-linux-x64-musl-2.0.1.tgz",
"integrity": "sha512-TQsiBFpEDGkuvK9tNdGj/Uc+AIytzqhxXH/1jKU6M24cWB1DTw/Cx7DdrkCBDyq3129K3POLdujvbWCGqBzQUw==",
"cpu": [
"x64"
],
@@ -287,9 +284,9 @@
]
},
"node_modules/@rspack/binding-wasm32-wasi": {
"version": "2.0.0-rc.1",
"resolved": "https://registry.npmjs.org/@rspack/binding-wasm32-wasi/-/binding-wasm32-wasi-2.0.0-rc.1.tgz",
"integrity": "sha512-aa9oUTqOb1QjwsHVlMr5sV+7mcBI4MLQ/xhFO2CIEcfVnJIPl8XpKUbDEgqMwcFlzcgzKmHg5cVmIvd82BLgow==",
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/@rspack/binding-wasm32-wasi/-/binding-wasm32-wasi-2.0.1.tgz",
"integrity": "sha512-wk3gyUgBW/ayP49bI54bkY8+EQnfBHxdoe9dz3oobSTZQc8AOWwmUUDEPltW8rUvPOM6dfHECTOUMnfaf2f5yA==",
"cpu": [
"wasm32"
],
@@ -297,13 +294,15 @@
"license": "MIT",
"optional": true,
"dependencies": {
"@napi-rs/wasm-runtime": "1.1.2"
"@emnapi/core": "1.10.0",
"@emnapi/runtime": "1.10.0",
"@napi-rs/wasm-runtime": "1.1.4"
}
},
"node_modules/@rspack/binding-win32-arm64-msvc": {
"version": "2.0.0-rc.1",
"resolved": "https://registry.npmjs.org/@rspack/binding-win32-arm64-msvc/-/binding-win32-arm64-msvc-2.0.0-rc.1.tgz",
"integrity": "sha512-+UxF0c7E9bE3siFbMHi+mmoeQJzcTKl1j3x+Y6MY/PJ3V70cU23wOaxMvmSsCyq2JNJBT2RCNZ9HaL+o3kReug==",
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/@rspack/binding-win32-arm64-msvc/-/binding-win32-arm64-msvc-2.0.1.tgz",
"integrity": "sha512-rHjLcy3VcAC3+x+PxH+gwhwv6tPe0JdXTNT5eAOs9wgZIM6T9p4wre49+K4Qy98+Fb7TTbLX0ObUitlOkGwTSA==",
"cpu": [
"arm64"
],
@@ -315,9 +314,9 @@
]
},
"node_modules/@rspack/binding-win32-ia32-msvc": {
"version": "2.0.0-rc.1",
"resolved": "https://registry.npmjs.org/@rspack/binding-win32-ia32-msvc/-/binding-win32-ia32-msvc-2.0.0-rc.1.tgz",
"integrity": "sha512-gc0JdkdxSWo+o/b1qTCT6mZ3DrlGe32eW+Ps3xInxcG4UHjUG7hTDgFtOgVQ6VhQ8WMUXG+TQOz0CySVpYjsoQ==",
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/@rspack/binding-win32-ia32-msvc/-/binding-win32-ia32-msvc-2.0.1.tgz",
"integrity": "sha512-Ad1vVqMBBnd4T8rsORngu9sl2kyRTlS4kMlvFudjzl1X2UFArEDBe0YVGNN7ZvahM12CErUx2WiN8Sd8pb+qXQ==",
"cpu": [
"ia32"
],
@@ -329,9 +328,9 @@
]
},
"node_modules/@rspack/binding-win32-x64-msvc": {
"version": "2.0.0-rc.1",
"resolved": "https://registry.npmjs.org/@rspack/binding-win32-x64-msvc/-/binding-win32-x64-msvc-2.0.0-rc.1.tgz",
"integrity": "sha512-Dnj0jthyVUikf65MGEyZy3akshtSmR1xsp/Xr0h/NWTo5JFWHKAFNYFE+jFfY0uzC8e4IDcLQLYoFomqV1DsEg==",
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/@rspack/binding-win32-x64-msvc/-/binding-win32-x64-msvc-2.0.1.tgz",
"integrity": "sha512-oPM2Jtm7HOlmxl/aBfleAVlL6t9VeHx6WvEets7BBJMInemFXAQd4CErRqybf7rXutACzLeUWBOue4Jpd1/ykw==",
"cpu": [
"x64"
],
@@ -343,13 +342,13 @@
]
},
"node_modules/@rspack/core": {
"version": "2.0.0-rc.1",
"resolved": "https://registry.npmjs.org/@rspack/core/-/core-2.0.0-rc.1.tgz",
"integrity": "sha512-OIfkYn05/IWtVIdZ8Y/a0y/k4ipzqfApxIZqnJM59G/bGwQKMBrLHpOMGgV2Wmq1j9UMXzF7ZtsFMUbYBhFb9A==",
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/@rspack/core/-/core-2.0.1.tgz",
"integrity": "sha512-lgfZiExh8kDR/3obgi3RQKwKG5av1Xf5qDN1aVde777W9pbmx0Pqvrww1qtNvJ+gobEjbrrn5HEZWYGe0VLmcA==",
"dev": true,
"license": "MIT",
"dependencies": {
"@rspack/binding": "2.0.0-rc.1"
"@rspack/binding": "2.0.1"
},
"engines": {
"node": "^20.19.0 || >=22.12.0"
@@ -368,36 +367,33 @@
}
},
"node_modules/@rspack/plugin-react-refresh": {
"version": "1.6.2",
"resolved": "https://registry.npmjs.org/@rspack/plugin-react-refresh/-/plugin-react-refresh-1.6.2.tgz",
"integrity": "sha512-k+/VrfTNgo+KirjI6V+8CWRj6y+DH9jOUWv8JorYY4vKf/9xfnZ8xHzuB4iqCwTtoZl9YnxOaOuoyjJipc2tiQ==",
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/@rspack/plugin-react-refresh/-/plugin-react-refresh-2.0.0.tgz",
"integrity": "sha512-Cf6CxBStNDJbiXMc/GmsvG1G8PRlUpa0MSfWsMTI+e8npzuTN/p8nwLs3shriBZOLciqgkSZpBtPTd10BLpj1g==",
"dev": true,
"license": "MIT",
"dependencies": {
"error-stack-parser": "^2.1.4"
},
"peerDependencies": {
"react-refresh": ">=0.10.0 <1.0.0",
"webpack-hot-middleware": "2.x"
"@rspack/core": "^2.0.0-0",
"react-refresh": ">=0.10.0 <1.0.0"
},
"peerDependenciesMeta": {
"webpack-hot-middleware": {
"@rspack/core": {
"optional": true
}
}
},
"node_modules/@rspress/core": {
"version": "2.0.9",
"resolved": "https://registry.npmjs.org/@rspress/core/-/core-2.0.9.tgz",
"integrity": "sha512-cfbqqbWtdimrWIsfeyPnQOTKwJpdNLr8VnwLIL4JYC2ZcRq+xcInpszLXVpV86nONL6qI19usr2Or7uzZJ+ynA==",
"version": "2.0.10",
"resolved": "https://registry.npmjs.org/@rspress/core/-/core-2.0.10.tgz",
"integrity": "sha512-DvoV7YUW538x0CVAGyYPKfjUHgEuq7Z8LZq1cpfUgBpA1DynFUK3Ls6spvdoAHAl3l0AN+xxOHpu/sRVhzqi/A==",
"dev": true,
"license": "MIT",
"dependencies": {
"@mdx-js/mdx": "^3.1.1",
"@mdx-js/react": "^3.1.1",
"@rsbuild/core": "2.0.0-rc.1",
"@rsbuild/plugin-react": "~1.4.6",
"@rspress/shared": "2.0.9",
"@rsbuild/core": "^2.0.2",
"@rsbuild/plugin-react": "~2.0.0",
"@rspress/shared": "2.0.10",
"@shikijs/rehype": "^4.0.2",
"@types/unist": "^3.0.3",
"@unhead/react": "^2.1.13",
@@ -411,8 +407,8 @@
"mdast-util-mdxjs-esm": "^2.0.1",
"medium-zoom": "1.1.0",
"nprogress": "^0.2.0",
"react": "^19.2.4",
"react-dom": "^19.2.4",
"react": "^19.2.5",
"react-dom": "^19.2.5",
"react-lazy-with-preload": "^2.2.1",
"react-reconciler": "0.33.0",
"react-render-to-markdown": "19.0.1",
@@ -440,39 +436,39 @@
}
},
"node_modules/@rspress/plugin-client-redirects": {
"version": "2.0.9",
"resolved": "https://registry.npmjs.org/@rspress/plugin-client-redirects/-/plugin-client-redirects-2.0.9.tgz",
"integrity": "sha512-r2GyHzOSt8CeS4UIsy/cPM5Zotekt1JVQFmgOYGapvll5ktUlVcd77HLtXDbZjtpgtj0XlaMLrXueOpV2gsBoQ==",
"version": "2.0.10",
"resolved": "https://registry.npmjs.org/@rspress/plugin-client-redirects/-/plugin-client-redirects-2.0.10.tgz",
"integrity": "sha512-ImOm3h/cbXiJXIvpwv3Wn9rM91xgdhKbD2WX+WlMlWO4AtQfKR4XFrVhIZZAkrt09eeotRIklA7nu8Nuzzzbsw==",
"dev": true,
"license": "MIT",
"engines": {
"node": "^20.19.0 || >=22.12.0"
},
"peerDependencies": {
"@rspress/core": "^2.0.9"
"@rspress/core": "^2.0.10"
}
},
"node_modules/@rspress/plugin-sitemap": {
"version": "2.0.9",
"resolved": "https://registry.npmjs.org/@rspress/plugin-sitemap/-/plugin-sitemap-2.0.9.tgz",
"integrity": "sha512-GTuXuySaeaazUZoUxdk2vZ8p0ehIgulPjCP9C7gDg6lIh5JGpUbcjG4def4tWHsxUoKp2rIwu/93bHwKb8T0Mw==",
"version": "2.0.10",
"resolved": "https://registry.npmjs.org/@rspress/plugin-sitemap/-/plugin-sitemap-2.0.10.tgz",
"integrity": "sha512-PZLig9+OlnyLcy6x9BlEqWSRef6TzDWB6Dlh2/hY41FtKlhyb7d7U56RGlLselWaQV54SHVa6H/y611A56ZI2g==",
"dev": true,
"license": "MIT",
"engines": {
"node": "^20.19.0 || >=22.12.0"
},
"peerDependencies": {
"@rspress/core": "^2.0.9"
"@rspress/core": "^2.0.10"
}
},
"node_modules/@rspress/shared": {
"version": "2.0.9",
"resolved": "https://registry.npmjs.org/@rspress/shared/-/shared-2.0.9.tgz",
"integrity": "sha512-G48n3pC7AVAR58pLqwClUCYj5Nt7ZgYEStR8VTBGFuPgXtzb3+KPfo/gz0hb6wxdKJ1cL5ohPsZ6EXqllu6lew==",
"version": "2.0.10",
"resolved": "https://registry.npmjs.org/@rspress/shared/-/shared-2.0.10.tgz",
"integrity": "sha512-Kx10OAHWqi2jvW7ScmBUbkGjnwv4E6rEoelUchcL8It8nQ4nAVk0xvvES7m64knEon55zDbs8JQumCjbHu801Q==",
"dev": true,
"license": "MIT",
"dependencies": {
"@rsbuild/core": "2.0.0-rc.1",
"@rsbuild/core": "^2.0.2",
"@shikijs/rehype": "^4.0.2",
"unified": "^11.0.5"
}
@@ -972,16 +968,6 @@
"url": "https://github.com/fb55/entities?sponsor=1"
}
},
"node_modules/error-stack-parser": {
"version": "2.1.4",
"resolved": "https://registry.npmjs.org/error-stack-parser/-/error-stack-parser-2.1.4.tgz",
"integrity": "sha512-Sk5V6wVazPhq5MhpO+AUxJn5x7XSXGl1R93Vn7i+zS15KDVxQijejNCrz8340/2bgLBjR9GtEG8ZVKONDjcqGQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"stackframe": "^1.3.4"
}
},
"node_modules/esast-util-from-estree": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/esast-util-from-estree/-/esast-util-from-estree-2.0.0.tgz",
@@ -3218,13 +3204,6 @@
"url": "https://github.com/sponsors/wooorm"
}
},
"node_modules/stackframe": {
"version": "1.3.4",
"resolved": "https://registry.npmjs.org/stackframe/-/stackframe-1.3.4.tgz",
"integrity": "sha512-oeVtt7eWQS+Na6F//S4kJ2K2VbRlS9D43mAlMyVpVWovy9o+jfgH8O9agzANzaiLjclA0oYzUXEM4PurhSUChw==",
"dev": true,
"license": "MIT"
},
"node_modules/stringify-entities": {
"version": "4.0.4",
"resolved": "https://registry.npmjs.org/stringify-entities/-/stringify-entities-4.0.4.tgz",
+1
View File
@@ -84,6 +84,7 @@ ctor.workspace = true
futures.workspace = true
lettre.workspace = true
log.workspace = true
assign.workspace = true
ruma.workspace = true
serde_json.workspace = true
serde-saphyr.workspace = true
+1 -1
View File
@@ -7,7 +7,7 @@
#[implement(Context, params = "<'_>")]
pub(super) async fn check_all_users(&self) -> Result {
let timer = tokio::time::Instant::now();
let users = self.services.users.iter().collect::<Vec<_>>().await;
let users = self.services.users.stream().collect::<Vec<_>>().await;
let query_time = timer.elapsed();
let total = users.len();
+37 -24
View File
@@ -79,12 +79,14 @@ pub(super) async fn parse_pdu(&self) -> Result {
}
let string = self.body[1..self.body.len().saturating_sub(1)].join("\n");
let room_version_rules = RoomVersionId::V12.rules().unwrap();
match serde_json::from_str(&string) {
| Err(e) => return Err!("Invalid json in command body: {e}"),
| Ok(value) => match ruma::signatures::reference_hash(&value, &RoomVersionId::V6) {
| Ok(value) => match ruma::signatures::reference_hash(&value, &room_version_rules) {
| Err(e) => return Err!("Could not parse PDU JSON: {e:?}"),
| Ok(hash) => {
let event_id = OwnedEventId::parse(format!("${hash}"));
let event_id = EventId::parse(format!("${hash}"));
match serde_json::from_value::<PduEvent>(serde_json::to_value(value)?) {
| Err(e) => return Err!("EventId: {event_id:?}\nCould not parse event: {e}"),
| Ok(pdu) => write!(self, "EventId: {event_id:?}\n{pdu:#?}"),
@@ -119,7 +121,7 @@ pub(super) async fn get_pdu(&self, event_id: OwnedEventId) -> Result {
} else {
"PDU found in our database"
};
write!(self, "{msg}\n```json\n{text}\n```",)
write!(self, "{msg}\n```json\n{text}\n```")
},
}
.await
@@ -187,10 +189,7 @@ pub(super) async fn get_remote_pdu_list(&self, server: OwnedServerName, force: b
for event_id in list {
if force {
match self
.get_remote_pdu(event_id.to_owned(), server.clone())
.await
{
match self.get_remote_pdu(event_id.clone(), server.clone()).await {
| Err(e) => {
failed_count = failed_count.saturating_add(1);
self.services
@@ -205,7 +204,7 @@ pub(super) async fn get_remote_pdu_list(&self, server: OwnedServerName, force: b
},
}
} else {
self.get_remote_pdu(event_id.to_owned(), server.clone())
self.get_remote_pdu(event_id.clone(), server.clone())
.await?;
success_count = success_count.saturating_add(1);
}
@@ -237,10 +236,10 @@ pub(super) async fn get_remote_pdu(
match self
.services
.sending
.send_federation_request(&server, ruma::api::federation::event::get_event::v1::Request {
event_id: event_id.clone(),
include_unredacted_content: None,
})
.send_federation_request(
&server,
ruma::api::federation::event::get_event::v1::Request::new(event_id.clone()),
)
.await
{
| Err(e) => {
@@ -330,9 +329,9 @@ pub(super) async fn ping(&self, server: OwnedServerName) -> Result {
match self
.services
.sending
.send_federation_request(
.send_unauthenticated_request(
&server,
ruma::api::federation::discovery::get_server_version::v1::Request {},
ruma::api::federation::discovery::get_server_version::v1::Request::new(),
)
.await
{
@@ -361,7 +360,7 @@ pub(super) async fn force_device_list_updates(&self) -> Result {
self.services
.users
.stream()
.for_each(|user_id| self.services.users.mark_device_key_update(user_id))
.for_each(async |user_id| self.services.users.mark_device_key_update(&user_id).await)
.await;
write!(self, "Marked all devices for all users as having new keys to update").await
@@ -430,9 +429,16 @@ pub(super) async fn verify_json(&self) -> Result {
}
let string = self.body[1..self.body.len().checked_sub(1).unwrap()].join("\n");
let room_version_rules = RoomVersionId::V12.rules().unwrap();
match serde_json::from_str::<CanonicalJsonObject>(&string) {
| Err(e) => return Err!("Invalid json: {e}"),
| Ok(value) => match self.services.server_keys.verify_json(&value, None).await {
| Ok(value) => match self
.services
.server_keys
.verify_json(&value, &room_version_rules)
.await
{
| Err(e) => return Err!("Signature verification failed: {e}"),
| Ok(()) => write!(self, "Signature correct"),
},
@@ -445,9 +451,15 @@ pub(super) async fn verify_pdu(&self, event_id: OwnedEventId) -> Result {
use ruma::signatures::Verified;
let mut event = self.services.rooms.timeline.get_pdu_json(&event_id).await?;
let room_version_rules = RoomVersionId::V12.rules().unwrap();
event.remove("event_id");
let msg = match self.services.server_keys.verify_event(&event, None).await {
let msg = match self
.services
.server_keys
.verify_event(&event, &room_version_rules)
.await
{
| Err(e) => return Err(e),
| Ok(Verified::Signatures) => "signatures OK, but content hash failed (redaction).",
| Ok(Verified::All) => "signatures and hashes OK.",
@@ -544,16 +556,17 @@ pub(super) async fn force_set_room_state_from_server(
};
let room_version = self.services.rooms.state.get_room_version(&room_id).await?;
let room_version_rules = room_version.rules().unwrap();
let mut state: HashMap<u64, OwnedEventId> = HashMap::new();
let remote_state_response = self
.services
.sending
.send_federation_request(&server_name, get_room_state::v1::Request {
room_id: room_id.clone(),
event_id: at_event_id,
})
.send_federation_request(
&server_name,
get_room_state::v1::Request::new(at_event_id, room_id.clone()),
)
.await?;
for pdu in remote_state_response.pdus.clone() {
@@ -576,7 +589,7 @@ pub(super) async fn force_set_room_state_from_server(
for result in remote_state_response.pdus.iter().map(|pdu| {
self.services
.server_keys
.validate_and_add_event_id(pdu, &room_version)
.validate_and_add_event_id(pdu, &room_version_rules)
}) {
let Ok((event_id, value)) = result.await else {
continue;
@@ -608,7 +621,7 @@ pub(super) async fn force_set_room_state_from_server(
for result in remote_state_response.auth_chain.iter().map(|pdu| {
self.services
.server_keys
.validate_and_add_event_id(pdu, &room_version)
.validate_and_add_event_id(pdu, &room_version_rules)
}) {
let Ok((event_id, value)) = result.await else {
continue;
@@ -625,7 +638,7 @@ pub(super) async fn force_set_room_state_from_server(
.services
.rooms
.event_handler
.resolve_state(&room_id, &room_version, state)
.resolve_state(&room_id, &room_version_rules, state)
.await?;
info!("Compressing new room state");
+2 -2
View File
@@ -111,7 +111,7 @@ pub(super) async fn remote_user_in_rooms(&self, user_id: OwnedUserId) -> Result
.rooms
.state_cache
.rooms_joined(&user_id)
.then(|room_id| get_room_info(self.services, room_id))
.then(async |room_id| get_room_info(self.services, &room_id).await)
.collect()
.await;
@@ -129,6 +129,6 @@ pub(super) async fn remote_user_in_rooms(&self, user_id: OwnedUserId) -> Result
.collect::<Vec<_>>()
.join("\n");
self.write_str(&format!("Rooms {user_id} shares with us ({num}):\n```\n{body}\n```",))
self.write_str(&format!("Rooms {user_id} shares with us ({num}):\n```\n{body}\n```"))
.await
}
+5 -4
View File
@@ -6,7 +6,8 @@
warn,
};
use conduwuit_service::media::Dim;
use ruma::{Mxc, OwnedEventId, OwnedMxcUri, OwnedServerName};
use ruma::{OwnedEventId, OwnedMxcUri, OwnedServerName};
use service::media::mxc::Mxc;
use crate::{admin_command, utils::parse_local_user_id};
@@ -261,7 +262,7 @@ pub(super) async fn delete_past_remote_media(
)
.await?;
self.write_str(&format!("Deleted {deleted_count} total files.",))
self.write_str(&format!("Deleted {deleted_count} total files."))
.await
}
@@ -271,7 +272,7 @@ pub(super) async fn delete_all_from_user(&self, username: String) -> Result {
let deleted_count = self.services.media.delete_from_user(&user_id).await?;
self.write_str(&format!("Deleted {deleted_count} total files.",))
self.write_str(&format!("Deleted {deleted_count} total files."))
.await
}
@@ -330,7 +331,7 @@ pub(super) async fn delete_all_from_server(
}
}
self.write_str(&format!("Deleted {deleted_count} total files.",))
self.write_str(&format!("Deleted {deleted_count} total files."))
.await
}
+5 -5
View File
@@ -16,8 +16,8 @@
use ruma::{
EventId,
events::{
relation::InReplyTo,
room::message::{Relation::Reply, RoomMessageEventContent},
relation::{InReplyTo, Reply},
room::message::{Relation, RoomMessageEventContent},
},
};
use service::{
@@ -38,6 +38,7 @@ pub(super) fn dispatch(services: Arc<Services>, command: CommandInput) -> Proces
}
#[tracing::instrument(skip_all, name = "admin", level = "info")]
#[allow(clippy::result_large_err)]
async fn handle_command(services: Arc<Services>, command: CommandInput) -> ProcessorResult {
AssertUnwindSafe(Box::pin(process_command(services, &command)))
.catch_unwind()
@@ -277,9 +278,8 @@ fn reply(
mut content: RoomMessageEventContent,
reply_id: Option<&EventId>,
) -> RoomMessageEventContent {
content.relates_to = reply_id.map(|event_id| Reply {
in_reply_to: InReplyTo { event_id: event_id.to_owned() },
});
content.relates_to =
reply_id.map(|event_id| Relation::Reply(Reply::new(InReplyTo::new(event_id.to_owned()))));
content
}
+2 -2
View File
@@ -50,7 +50,7 @@ async fn destinations_cache(&self, server_name: Option<OwnedServerName>) -> Resu
while let Some((name, CachedDest { dest, host, expire })) = destinations.next().await {
if let Some(server_name) = server_name.as_ref() {
if name != server_name {
if name != *server_name {
continue;
}
}
@@ -76,7 +76,7 @@ async fn overrides_cache(&self, server_name: Option<String>) -> Result {
overrides.next().await
{
if let Some(server_name) = server_name.as_ref() {
if name != server_name {
if name != *server_name {
continue;
}
}
+1 -2
View File
@@ -41,7 +41,6 @@ pub(super) async fn process(subcommand: RoomAliasCommand, context: &Context<'_>)
.rooms
.alias
.local_aliases_for_room(&room_id)
.map(ToOwned::to_owned)
.collect()
.await;
let query_time = timer.elapsed();
@@ -54,7 +53,7 @@ pub(super) async fn process(subcommand: RoomAliasCommand, context: &Context<'_>)
.rooms
.alias
.all_local_aliases()
.map(|(room_id, alias)| (room_id.to_owned(), alias.to_owned()))
.map(|(room_id, alias)| (room_id, alias.to_owned()))
.collect::<Vec<_>>()
.await;
let query_time = timer.elapsed();
-8
View File
@@ -101,7 +101,6 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Context
.rooms
.state_cache
.room_servers(&room_id)
.map(ToOwned::to_owned)
.collect()
.await;
let query_time = timer.elapsed();
@@ -118,7 +117,6 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Context
.rooms
.state_cache
.server_rooms(&server)
.map(ToOwned::to_owned)
.collect()
.await;
let query_time = timer.elapsed();
@@ -135,7 +133,6 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Context
.rooms
.state_cache
.room_members(&room_id)
.map(ToOwned::to_owned)
.collect()
.await;
let query_time = timer.elapsed();
@@ -152,7 +149,6 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Context
.rooms
.state_cache
.local_users_in_room(&room_id)
.map(ToOwned::to_owned)
.collect()
.await;
let query_time = timer.elapsed();
@@ -169,7 +165,6 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Context
.rooms
.state_cache
.active_local_users_in_room(&room_id)
.map(ToOwned::to_owned)
.collect()
.await;
let query_time = timer.elapsed();
@@ -212,7 +207,6 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Context
.rooms
.state_cache
.room_useroncejoined(&room_id)
.map(ToOwned::to_owned)
.collect()
.await;
let query_time = timer.elapsed();
@@ -229,7 +223,6 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Context
.rooms
.state_cache
.room_members_invited(&room_id)
.map(ToOwned::to_owned)
.collect()
.await;
let query_time = timer.elapsed();
@@ -276,7 +269,6 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Context
.rooms
.state_cache
.rooms_joined(&user_id)
.map(ToOwned::to_owned)
.collect()
.await;
let query_time = timer.elapsed();
+1 -4
View File
@@ -104,7 +104,6 @@ async fn get_shared_rooms(&self, user_a: OwnedUserId, user_b: OwnedUserId) -> Re
.rooms
.state_cache
.get_shared_rooms(&user_a, &user_b)
.map(ToOwned::to_owned)
.collect()
.await;
let query_time = timer.elapsed();
@@ -217,8 +216,7 @@ async fn iter_users2(&self) -> Result {
let result: Vec<_> = self.services.users.stream().collect().await;
let result: Vec<_> = result
.into_iter()
.map(ruma::UserId::as_bytes)
.map(String::from_utf8_lossy)
.map(|user_id| String::from_utf8_lossy(user_id.as_bytes()).into_owned())
.collect();
let query_time = timer.elapsed();
@@ -254,7 +252,6 @@ async fn list_devices(&self, user_id: OwnedUserId) -> Result {
.services
.users
.all_device_ids(&user_id)
.map(ToOwned::to_owned)
.collect::<Vec<_>>()
.await;
+3 -3
View File
@@ -3,7 +3,7 @@
use clap::Subcommand;
use conduwuit::{Err, Result};
use futures::StreamExt;
use ruma::{OwnedRoomAliasId, OwnedRoomId};
use ruma::{OwnedRoomAliasId, OwnedRoomId, RoomAliasId};
use crate::Context;
@@ -52,7 +52,7 @@ pub(super) async fn process(command: RoomAliasCommand, context: &Context<'_>) ->
| RoomAliasCommand::Which { ref room_alias_localpart } => {
let room_alias_str =
format!("#{}:{}", room_alias_localpart, services.globals.server_name());
let room_alias = match OwnedRoomAliasId::parse(room_alias_str) {
let room_alias = match RoomAliasId::parse(room_alias_str) {
| Ok(alias) => alias,
| Err(err) => {
return Err!("Failed to parse alias: {err}");
@@ -139,7 +139,7 @@ pub(super) async fn process(command: RoomAliasCommand, context: &Context<'_>) ->
.rooms
.alias
.all_local_aliases()
.map(|(room_id, localpart)| (room_id.into(), localpart.into()))
.map(|(room_id, localpart)| (room_id, localpart.into()))
.collect::<Vec<(OwnedRoomId, String)>>()
.await;
+4 -4
View File
@@ -22,14 +22,14 @@ pub(super) async fn list_rooms(
.metadata
.iter_ids()
.filter_map(|room_id| async move {
(!exclude_disabled || !self.services.rooms.metadata.is_disabled(room_id).await)
(!exclude_disabled || !self.services.rooms.metadata.is_disabled(&room_id).await)
.then_some(room_id)
})
.filter_map(|room_id| async move {
(!exclude_banned || !self.services.rooms.metadata.is_banned(room_id).await)
(!exclude_banned || !self.services.rooms.metadata.is_banned(&room_id).await)
.then_some(room_id)
})
.then(|room_id| get_room_info(self.services, room_id))
.then(async |room_id| get_room_info(self.services, &room_id).await)
.then(|(room_id, total_members, name)| async move {
let local_members: Vec<_> = self
.services
@@ -72,7 +72,7 @@ pub(super) async fn list_rooms(
.collect::<Vec<_>>()
.join("\n");
self.write_str(&format!("Rooms ({}):\n```\n{body}\n```", rooms.len(),))
self.write_str(&format!("Rooms ({}):\n```\n{body}\n```", rooms.len()))
.await
}
+2 -2
View File
@@ -43,7 +43,7 @@ pub(super) async fn process(command: RoomDirectoryCommand, context: &Context<'_>
.rooms
.directory
.public_rooms()
.then(|room_id| get_room_info(services, room_id))
.then(async |room_id| get_room_info(services, &room_id).await)
.collect()
.await;
@@ -67,7 +67,7 @@ pub(super) async fn process(command: RoomDirectoryCommand, context: &Context<'_>
.join("\n");
context
.write_str(&format!("Rooms (page {page}):\n```\n{body}\n```",))
.write_str(&format!("Rooms (page {page}):\n```\n{body}\n```"))
.await
},
}
+1 -2
View File
@@ -46,7 +46,6 @@ async fn list_joined_members(&self, room_id: OwnedRoomId, local_only: bool) -> R
.then(|| self.services.globals.user_is_local(user_id))
.unwrap_or(true)
})
.map(ToOwned::to_owned)
.filter_map(|user_id| async move {
Some((
self.services
@@ -67,7 +66,7 @@ async fn list_joined_members(&self, room_id: OwnedRoomId, local_only: bool) -> R
.collect::<Vec<_>>()
.join("\n");
self.write_str(&format!("{num} Members in Room \"{room_name}\":\n```\n{body}\n```",))
self.write_str(&format!("{num} Members in Room \"{room_name}\":\n```\n{body}\n```"))
.await
}
+10 -14
View File
@@ -71,7 +71,7 @@ async fn ban_room(&self, room: OwnedRoomOrAliasId) -> Result {
debug!("Room specified is a room ID, banning room ID");
room_id.to_owned()
room_id.clone()
} else if room.is_room_alias_id() {
let room_alias = match RoomAliasId::parse(&room) {
| Ok(room_alias) => room_alias,
@@ -89,7 +89,7 @@ async fn ban_room(&self, room: OwnedRoomOrAliasId) -> Result {
locally, if not using get_alias_helper to fetch room ID remotely"
);
match self.services.rooms.alias.resolve_alias(room_alias).await {
match self.services.rooms.alias.resolve_alias(&room_alias).await {
| Ok((room_id, servers)) => {
debug!(
%room_id,
@@ -116,7 +116,6 @@ async fn ban_room(&self, room: OwnedRoomOrAliasId) -> Result {
.rooms
.state_cache
.room_members(&room_id)
.map(ToOwned::to_owned)
.ready_filter(|user| self.services.globals.user_is_local(user))
.boxed();
@@ -140,7 +139,6 @@ async fn ban_room(&self, room: OwnedRoomOrAliasId) -> Result {
.rooms
.alias
.local_aliases_for_room(&room_id)
.map(ToOwned::to_owned)
.for_each(|local_alias| async move {
self.services
.rooms
@@ -205,7 +203,7 @@ async fn ban_list_of_rooms(&self) -> Result {
},
};
room_ids.push(room_id.to_owned());
room_ids.push(room_id.clone());
}
if room_alias_or_id.is_room_alias_id() {
@@ -215,7 +213,7 @@ async fn ban_list_of_rooms(&self) -> Result {
.services
.rooms
.alias
.resolve_local_alias(room_alias)
.resolve_local_alias(&room_alias)
.await
{
| Ok(room_id) => room_id,
@@ -229,7 +227,7 @@ async fn ban_list_of_rooms(&self) -> Result {
.services
.rooms
.alias
.resolve_alias(room_alias)
.resolve_alias(&room_alias)
.await
{
| Ok((room_id, servers)) => {
@@ -284,7 +282,6 @@ async fn ban_list_of_rooms(&self) -> Result {
.rooms
.state_cache
.room_members(&room_id)
.map(ToOwned::to_owned)
.ready_filter(|user| self.services.globals.user_is_local(user))
.boxed();
@@ -309,7 +306,6 @@ async fn ban_list_of_rooms(&self) -> Result {
.rooms
.alias
.local_aliases_for_room(&room_id)
.map(ToOwned::to_owned)
.for_each(|local_alias| async move {
self.services
.rooms
@@ -348,9 +344,9 @@ async fn unban_room(&self, room: OwnedRoomOrAliasId) -> Result {
};
debug!("Room specified is a room ID, unbanning room ID");
self.services.rooms.metadata.ban_room(room_id, false);
self.services.rooms.metadata.ban_room(&room_id, false);
room_id.to_owned()
room_id.clone()
} else if room.is_room_alias_id() {
let room_alias = match RoomAliasId::parse(&room) {
| Ok(room_alias) => room_alias,
@@ -372,7 +368,7 @@ async fn unban_room(&self, room: OwnedRoomOrAliasId) -> Result {
.services
.rooms
.alias
.resolve_local_alias(room_alias)
.resolve_local_alias(&room_alias)
.await
{
| Ok(room_id) => room_id,
@@ -382,7 +378,7 @@ async fn unban_room(&self, room: OwnedRoomOrAliasId) -> Result {
room ID over federation"
);
match self.services.rooms.alias.resolve_alias(room_alias).await {
match self.services.rooms.alias.resolve_alias(&room_alias).await {
| Ok((room_id, servers)) => {
debug!(
%room_id,
@@ -453,6 +449,6 @@ async fn list_banned_rooms(&self, no_details: bool) -> Result {
.collect::<Vec<_>>()
.join("\n");
self.write_str(&format!("Rooms Banned ({num}):\n```\n{body}\n```",))
self.write_str(&format!("Rooms Banned ({num}):\n```\n{body}\n```"))
.await
}
+2 -2
View File
@@ -159,8 +159,8 @@ pub(super) async fn list_features(&self) -> Result {
let mut enabled_features = conduwuit::info::introspection::ENABLED_FEATURES
.lock()
.expect("locked")
.iter()
.flat_map(|(_, f)| f.iter())
.values()
.flat_map(|f| f.iter())
.collect::<Vec<_>>();
enabled_features.sort_unstable();
+55 -63
View File
@@ -9,20 +9,18 @@
};
use conduwuit::{
Err, Result, debug_warn, error, info,
matrix::{Event, pdu::PduBuilder},
matrix::{Event, pdu::PartialPdu},
utils::{self, ReadyExt},
warn,
};
use futures::{FutureExt, StreamExt};
use lettre::Address;
use ruma::{
OwnedEventId, OwnedRoomId, OwnedRoomOrAliasId, OwnedServerName, OwnedUserId, UserId,
OwnedEventId, OwnedRoomId, OwnedRoomOrAliasId, OwnedServerName, OwnedUserId, ServerName,
UserId, assign,
events::{
RoomAccountDataEventType, StateEventType,
room::{
power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent},
redaction::RoomRedactionEventContent,
},
RoomAccountDataEventType,
room::{power_levels::RoomPowerLevelsEventContent, redaction::RoomRedactionEventContent},
tag::{TagEvent, TagEventContent, TagInfo},
},
};
@@ -41,7 +39,7 @@ pub(super) async fn list_users(&self) -> Result {
.services
.users
.list_local_users()
.map(ToString::to_string)
.map(|id| id.as_str().to_owned())
.collect()
.await;
@@ -103,11 +101,12 @@ pub(super) async fn create_user(&self, username: String, password: Option<String
ruma::events::GlobalAccountDataEventType::PushRules
.to_string()
.into(),
&serde_json::to_value(ruma::events::push_rules::PushRulesEvent {
content: ruma::events::push_rules::PushRulesEventContent {
global: ruma::push::Ruleset::server_default(&user_id),
},
})?,
&serde_json::to_value(ruma::events::push_rules::PushRulesEvent::new(
ruma::events::push_rules::PushRulesEventContent::new(
ruma::push::Ruleset::server_default(&user_id),
),
))
.unwrap(),
)
.await?;
@@ -292,7 +291,12 @@ pub(super) async fn reset_password(
self.services
.users
.all_device_ids(&user_id)
.for_each(|device_id| self.services.users.remove_device(&user_id, device_id))
.for_each(async |device_id| {
self.services
.users
.remove_device(&user_id, &device_id)
.await;
})
.await;
write!(self, "\nAll existing sessions have been logged out.").await?;
}
@@ -437,7 +441,7 @@ pub(super) async fn list_joined_rooms(&self, user_id: String) -> Result {
.rooms
.state_cache
.rooms_joined(&user_id)
.then(|room_id| get_room_info(self.services, room_id))
.then(async |room_id| get_room_info(self.services, &room_id).await)
.collect()
.await;
@@ -454,7 +458,7 @@ pub(super) async fn list_joined_rooms(&self, user_id: String) -> Result {
.collect::<Vec<_>>()
.join("\n");
self.write_str(&format!("Rooms {user_id} Joined ({}):\n```\n{body}\n```", rooms.len(),))
self.write_str(&format!("Rooms {user_id} Joined ({}):\n```\n{body}\n```", rooms.len()))
.await
}
@@ -506,7 +510,7 @@ pub(super) async fn force_join_list_of_local_users(
.rooms
.state_cache
.room_members(&room_id)
.ready_any(|user_id| server_admins.contains(&user_id.to_owned()))
.ready_any(|user_id| server_admins.contains(&user_id))
.await
{
return Err!("There is not a single server admin in the room.",);
@@ -620,7 +624,7 @@ pub(super) async fn force_join_all_local_users(
.rooms
.state_cache
.room_members(&room_id)
.ready_any(|user_id| server_admins.contains(&user_id.to_owned()))
.ready_any(|user_id| server_admins.contains(&user_id))
.await
{
return Err!("There is not a single server admin in the room.",);
@@ -633,7 +637,6 @@ pub(super) async fn force_join_all_local_users(
.services
.users
.list_local_users()
.map(UserId::to_owned)
.collect::<Vec<_>>()
.await
{
@@ -684,7 +687,7 @@ pub(super) async fn force_join_room(
);
join_room_by_id_helper(self.services, &user_id, &room_id, None, &servers, &None).await?;
self.write_str(&format!("{user_id} has been joined to {room_id}.",))
self.write_str(&format!("{user_id} has been joined to {room_id}."))
.await
}
@@ -716,7 +719,7 @@ pub(super) async fn force_leave_room(
.boxed()
.await?;
self.write_str(&format!("{user_id} has left {room_id}.",))
self.write_str(&format!("{user_id} has left {room_id}."))
.await
}
@@ -730,42 +733,34 @@ pub(super) async fn force_demote(&self, user_id: String, room_id: OwnedRoomOrAli
"Parsed user_id must be a local user"
);
let state_lock = self.services.rooms.state.mutex.lock(&room_id).await;
let state_lock = self.services.rooms.state.mutex.lock(room_id.as_str()).await;
let room_power_levels: Option<RoomPowerLevelsEventContent> = self
let mut room_power_levels = self
.services
.rooms
.state_accessor
.room_state_get_content(&room_id, &StateEventType::RoomPowerLevels, "")
.await
.ok();
.get_room_power_levels(&room_id)
.await;
let user_can_demote_self = room_power_levels
.as_ref()
.is_some_and(|power_levels_content| {
RoomPowerLevels::from(power_levels_content.clone())
.user_can_change_user_power_level(&user_id, &user_id)
}) || self
.services
.rooms
.state_accessor
.room_state_get(&room_id, &StateEventType::RoomCreate, "")
.await
.is_ok_and(|event| event.sender() == user_id);
let user_can_demote_self =
room_power_levels.user_can_change_user_power_level(&user_id, &user_id);
if !user_can_demote_self {
return Err!("User is not allowed to modify their own power levels in the room.",);
}
let mut power_levels_content = room_power_levels.unwrap_or_default();
power_levels_content.users.remove(&user_id);
room_power_levels.users.remove(&user_id);
let event_id = self
.services
.rooms
.timeline
.build_and_append_pdu(
PduBuilder::state(String::new(), &power_levels_content),
PartialPdu::state(
String::new(),
&RoomPowerLevelsEventContent::try_from(room_power_levels)
.expect("PLs should be valid for room version"),
),
&user_id,
Some(&room_id),
&state_lock,
@@ -793,7 +788,7 @@ pub(super) async fn make_user_admin(&self, user_id: String) -> Result {
.boxed()
.await?;
self.write_str(&format!("{user_id} has been granted admin privileges.",))
self.write_str(&format!("{user_id} has been granted admin privileges."))
.await
}
@@ -811,9 +806,7 @@ pub(super) async fn put_room_tag(
.account_data
.get_room(&room_id, &user_id, RoomAccountDataEventType::Tag)
.await
.unwrap_or(TagEvent {
content: TagEventContent { tags: BTreeMap::new() },
});
.unwrap_or_else(|_| TagEvent::new(TagEventContent::new(BTreeMap::new())));
tags_event
.content
@@ -850,9 +843,7 @@ pub(super) async fn delete_room_tag(
.account_data
.get_room(&room_id, &user_id, RoomAccountDataEventType::Tag)
.await
.unwrap_or(TagEvent {
content: TagEventContent { tags: BTreeMap::new() },
});
.unwrap_or_else(|_| TagEvent::new(TagEventContent::new(BTreeMap::new())));
tags_event.content.tags.remove(&tag.clone().into());
@@ -882,9 +873,7 @@ pub(super) async fn get_room_tags(&self, user_id: String, room_id: OwnedRoomId)
.account_data
.get_room(&room_id, &user_id, RoomAccountDataEventType::Tag)
.await
.unwrap_or(TagEvent {
content: TagEventContent { tags: BTreeMap::new() },
});
.unwrap_or_else(|_| TagEvent::new(TagEventContent::new(BTreeMap::new())));
self.write_str(&format!("```\n{:#?}\n```", tags_event.content.tags))
.await
@@ -921,19 +910,19 @@ pub(super) async fn redact_event(&self, event_id: OwnedEventId) -> Result {
.rooms
.state
.mutex
.lock(&event.room_id_or_hash())
.lock(event.room_id_or_hash().as_str())
.await;
self.services
.rooms
.timeline
.build_and_append_pdu(
PduBuilder {
PartialPdu {
redacts: Some(event.event_id().to_owned()),
..PduBuilder::timeline(&RoomRedactionEventContent {
..PartialPdu::timeline(&assign!(RoomRedactionEventContent::new_v1(), {
redacts: Some(event.event_id().to_owned()),
reason: Some(reason),
})
}))
},
event.sender(),
Some(&event.room_id_or_hash()),
@@ -963,7 +952,7 @@ pub(super) async fn force_leave_remote_room(
.resolve_with_servers(
&room_id,
if let Some(v) = via.clone() {
Some(vec![OwnedServerName::parse(v)?])
Some(vec![ServerName::parse(v)?])
} else {
None
},
@@ -976,7 +965,7 @@ pub(super) async fn force_leave_remote_room(
);
let mut vias: HashSet<OwnedServerName> = HashSet::new();
if let Some(via) = via {
vias.insert(OwnedServerName::parse(via)?);
vias.insert(ServerName::parse(via)?);
}
for server in vias_raw {
vias.insert(server);
@@ -1051,7 +1040,12 @@ pub(super) async fn logout(&self, user_id: String) -> Result {
self.services
.users
.all_device_ids(&user_id)
.for_each(|device_id| self.services.users.remove_device(&user_id, device_id))
.for_each(async |device_id| {
self.services
.users
.remove_device(&user_id, &device_id)
.await;
})
.await;
self.write_str(&format!("User {user_id} has been logged out from all devices."))
.await
@@ -1129,11 +1123,9 @@ pub(super) async fn get_user_by_email(&self, email: String) -> Result {
match self.services.threepid.get_localpart_for_email(&email).await {
| Some(localpart) => {
let user_id = OwnedUserId::parse(format!(
"@{localpart}:{}",
self.services.globals.server_name()
))
.unwrap();
let user_id =
UserId::parse(format!("@{localpart}:{}", self.services.globals.server_name()))
.unwrap();
self.write_str(&format!("{email} belongs to {user_id}."))
.await
+2
View File
@@ -88,7 +88,9 @@ lettre.workspace = true
log.workspace = true
rand.workspace = true
reqwest.workspace = true
assign.workspace = true
ruma.workspace = true
ruminuwuity.workspace = true
serde_html_form.workspace = true
serde_json.workspace = true
serde.workspace = true
+4 -7
View File
@@ -1,10 +1,8 @@
use axum::extract::State;
use conduwuit::{Err, Result, info, utils::ReadyExt, warn};
use futures::{FutureExt, StreamExt};
use ruma::{
OwnedRoomAliasId, continuwuity_admin_api::rooms,
events::room::message::RoomMessageEventContent,
};
use ruma::{OwnedRoomAliasId, events::room::message::RoomMessageEventContent};
use ruminuwuity::admin::continuwuity::rooms;
use crate::{Ruma, client::leave_room};
@@ -36,7 +34,6 @@ pub(crate) async fn ban_room(
.rooms
.state_cache
.room_members(&body.room_id)
.map(ToOwned::to_owned)
.ready_filter(|user| services.globals.user_is_local(user))
.boxed();
let mut evicted = Vec::new();
@@ -63,9 +60,9 @@ pub(crate) async fn ban_room(
.rooms
.alias
.local_aliases_for_room(&body.room_id)
.map(ToOwned::to_owned)
.collect::<Vec<_>>()
.collect()
.await;
for alias in &aliases {
info!("Removing alias {} for banned room {}", alias, body.room_id);
services
+4 -3
View File
@@ -1,7 +1,8 @@
use axum::extract::State;
use conduwuit::{Err, Result};
use futures::StreamExt;
use ruma::{OwnedRoomId, continuwuity_admin_api::rooms};
use ruma::OwnedRoomId;
use ruminuwuity::admin::continuwuity::rooms;
use crate::Ruma;
@@ -22,8 +23,8 @@ pub(crate) async fn list_rooms(
.metadata
.iter_ids()
.filter_map(|room_id| async move {
if !services.rooms.metadata.is_banned(room_id).await {
Some(room_id.to_owned())
if !services.rooms.metadata.is_banned(&room_id).await {
Some(room_id.clone())
} else {
None
}
+43 -67
View File
@@ -1,15 +1,15 @@
use axum::extract::State;
use axum_client_ip::ClientIp;
use conduwuit::{
Err, Event, Result, err, info,
pdu::PduBuilder,
Err, Result, err, info,
pdu::PartialPdu,
utils::{ReadyExt, stream::BroadbandExt},
};
use conduwuit_service::Services;
use futures::{FutureExt, StreamExt};
use lettre::{Address, message::Mailbox};
use ruma::{
OwnedRoomId, OwnedUserId, UserId,
OwnedRoomId, UserId,
api::client::{
account::{
ThirdPartyIdRemovalStatus, change_password, check_registration_token_validity,
@@ -18,12 +18,10 @@
},
uiaa::{AuthFlow, AuthType},
},
events::{
StateEventType,
room::{
member::{MembershipState, RoomMemberEventContent},
power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent},
},
assign,
events::room::{
member::{MembershipState, RoomMemberEventContent},
power_levels::RoomPowerLevelsEventContent,
},
};
use service::{mailer::messages, uiaa::Identity};
@@ -87,7 +85,7 @@ pub(crate) async fn get_register_available_route(
return Err!(Request(Exclusive("Username is reserved by an appservice.")));
}
Ok(get_username_availability::v3::Response { available: true })
Ok(get_username_availability::v3::Response::new(true))
}
/// # `POST /_matrix/client/r0/account/password`
@@ -143,7 +141,7 @@ pub(crate) async fn change_password_route(
.await?
};
let sender_user = OwnedUserId::parse(format!(
let sender_user = UserId::parse(format!(
"@{}:{}",
identity.localpart.expect("localpart should be known"),
services.globals.server_name()
@@ -161,7 +159,7 @@ pub(crate) async fn change_password_route(
.users
.all_device_ids(&sender_user)
.ready_filter(|id| *id != body.sender_device())
.for_each(|id| services.users.remove_device(&sender_user, id))
.for_each(async |id| services.users.remove_device(&sender_user, &id).await)
.await;
// Remove all pushers except the ones associated with this session
@@ -175,8 +173,8 @@ pub(crate) async fn change_password_route(
.get_pusher_device(&pushkey)
.await
.ok()
.filter(|pusher_device| pusher_device != body.sender_device())
.is_some()
.as_ref()
.is_some_and(|pusher_device| pusher_device != body.sender_device())
.then_some(pushkey)
})
.for_each(async |pushkey| {
@@ -194,7 +192,7 @@ pub(crate) async fn change_password_route(
.await;
}
Ok(change_password::v3::Response {})
Ok(change_password::v3::Response::new())
}
/// # `POST /_matrix/client/v3/account/password/email/requestToken`
@@ -215,7 +213,7 @@ pub(crate) async fn request_password_change_token_via_email_route(
};
let user_id =
OwnedUserId::parse(format!("@{localpart}:{}", services.globals.server_name())).unwrap();
UserId::parse(format!("@{localpart}:{}", services.globals.server_name())).unwrap();
let display_name = services.users.displayname(&user_id).await.ok();
let session = services
@@ -251,11 +249,10 @@ pub(crate) async fn whoami_route(
.map_err(|_| {
err!(Request(Forbidden("Application service has not registered this user.")))
})? && body.appservice_info.is_none();
Ok(whoami::v3::Response {
user_id: body.sender_user().to_owned(),
Ok(assign!(whoami::v3::Response::new(body.sender_user().to_owned(), is_guest), {
device_id: body.sender_device.clone(),
is_guest,
})
}))
}
/// # `POST /_matrix/client/r0/account/deactivate`
@@ -310,9 +307,7 @@ pub(crate) async fn deactivate_route(
.await;
}
Ok(deactivate::v3::Response {
id_server_unbind_result: ThirdPartyIdRemovalStatus::Success,
})
Ok(deactivate::v3::Response::new(ThirdPartyIdRemovalStatus::Success))
}
/// # `GET /_matrix/client/v1/register/m.login.registration_token/validity`
@@ -330,7 +325,7 @@ pub(crate) async fn check_registration_token_validity(
.await
.is_some();
Ok(check_registration_token_validity::v1::Response { valid })
Ok(check_registration_token_validity::v1::Response::new(valid))
}
/// Runs through all the deactivation steps:
@@ -354,13 +349,7 @@ pub async fn full_user_deactivate(
.await;
}
services
.users
.all_profile_keys(user_id)
.ready_for_each(|(profile_key, _)| {
services.users.set_profile_key(user_id, &profile_key, None);
})
.await;
services.users.clear_profile(user_id).await;
services
.pusher
@@ -372,62 +361,49 @@ pub async fn full_user_deactivate(
// TODO: Rescind all user invites
let mut pdu_queue: Vec<(PduBuilder, &OwnedRoomId)> = Vec::new();
let mut pdu_queue: Vec<(PartialPdu, &OwnedRoomId)> = Vec::new();
for room_id in all_joined_rooms {
let room_power_levels = services
.rooms
.state_accessor
.room_state_get_content::<RoomPowerLevelsEventContent>(
room_id,
&StateEventType::RoomPowerLevels,
"",
)
.await
.ok();
.get_room_power_levels(room_id)
.await;
let user_can_demote_self =
room_power_levels
.as_ref()
.is_some_and(|power_levels_content| {
RoomPowerLevels::from(power_levels_content.clone())
.user_can_change_user_power_level(user_id, user_id)
}) || services
.rooms
.state_accessor
.room_state_get(room_id, &StateEventType::RoomCreate, "")
.await
.is_ok_and(|event| event.sender() == user_id);
room_power_levels.user_can_change_user_power_level(user_id, user_id);
if user_can_demote_self {
let mut power_levels_content = room_power_levels.unwrap_or_default();
if user_can_demote_self
&& let Ok(mut power_levels_content) =
RoomPowerLevelsEventContent::try_from(room_power_levels)
{
power_levels_content.users.remove(user_id);
let pl_evt = PduBuilder::state(String::new(), &power_levels_content);
let pl_evt = PartialPdu::state(String::new(), &power_levels_content);
pdu_queue.push((pl_evt, room_id));
}
// Leave the room
pdu_queue.push((
PduBuilder::state(user_id.to_string(), &RoomMemberEventContent {
avatar_url: None,
blurhash: None,
membership: MembershipState::Leave,
displayname: None,
join_authorized_via_users_server: None,
reason: None,
is_direct: None,
third_party_invite: None,
redact_events: None,
}),
PartialPdu::state(
user_id.to_string(),
&RoomMemberEventContent::new(MembershipState::Leave),
),
room_id,
));
// TODO: Redact all messages sent by the user in the room
}
super::update_all_rooms(services, pdu_queue, user_id)
.boxed()
.await;
for (pdu, room_id) in pdu_queue {
let state_lock = services.rooms.state.mutex.lock(room_id.as_str()).await;
let _ = services
.rooms
.timeline
.build_and_append_pdu(pdu, user_id, Some(room_id.as_ref()), &state_lock)
.await;
}
for room_id in all_joined_rooms {
services.rooms.state_cache.forget(room_id, user_id);
}
+15 -18
View File
@@ -20,7 +20,11 @@
},
uiaa::{AuthFlow, AuthType},
},
events::{GlobalAccountDataEventType, room::message::RoomMessageEventContent},
assign,
events::{
GlobalAccountDataEventType, push_rules::PushRulesEvent,
room::message::RoomMessageEventContent,
},
push,
};
use serde_json::value::RawValue;
@@ -209,23 +213,15 @@ pub(crate) async fn register_route(
None,
&user_id,
GlobalAccountDataEventType::PushRules.to_string().into(),
&serde_json::to_value(ruma::events::push_rules::PushRulesEvent {
content: ruma::events::push_rules::PushRulesEventContent {
global: push::Ruleset::server_default(&user_id),
},
})?,
&serde_json::to_value(PushRulesEvent::new(
push::Ruleset::server_default(&user_id).into(),
))
.expect("should be able to serialize push rules"),
)
.await?;
// Generate new device id if the user didn't specify one
let no_device = body.inhibit_login
|| body
.appservice_info
.as_ref()
.is_some_and(|aps| aps.registration.device_management);
let (token, device) = if !no_device {
// Don't create a device for inhibited logins
let (token, device) = if !body.inhibit_login {
let device_id = if is_guest { None } else { body.device_id.clone() }
.unwrap_or_else(|| utils::random_string(DEVICE_ID_LENGTH).into());
@@ -243,12 +239,14 @@ pub(crate) async fn register_route(
Some(client.to_string()),
)
.await?;
debug_info!(%user_id, %device_id, "User account was created");
(Some(new_token), Some(device_id))
} else {
// Don't create a device for inhibited logins
(None, None)
};
debug_info!(%user_id, ?device, "User account was created");
// If the user registered with an email, associate it with their account.
if let Some(identity) = identity
&& let Some(email) = identity.email
@@ -393,13 +391,12 @@ pub(crate) async fn register_route(
}
}
Ok(register::v3::Response {
Ok(assign!(register::v3::Response::new(user_id), {
access_token: token,
user_id,
device_id: device,
refresh_token: None,
expires_in: None,
})
}))
}
/// Determine which flows and parameters should be presented when
+2 -6
View File
@@ -141,9 +141,7 @@ pub(crate) async fn delete_3pid_route(
let sender_user = body.sender_user();
if body.medium != Medium::Email {
return Ok(delete_3pid::v3::Response {
id_server_unbind_result: ThirdPartyIdRemovalStatus::NoSupport,
});
return Ok(delete_3pid::v3::Response::new(ThirdPartyIdRemovalStatus::NoSupport));
}
if !services.threepid.email_requirement().may_remove() {
@@ -159,7 +157,5 @@ pub(crate) async fn delete_3pid_route(
return Err!(Request(ThreepidNotFound("Your account has no associated email.")));
}
Ok(delete_3pid::v3::Response {
id_server_unbind_result: ThirdPartyIdRemovalStatus::Success,
})
Ok(delete_3pid::v3::Response::new(ThirdPartyIdRemovalStatus::Success))
}
+6 -9
View File
@@ -7,10 +7,7 @@
get_global_account_data, get_room_account_data, set_global_account_data,
set_room_account_data,
},
events::{
AnyGlobalAccountDataEventContent, AnyRoomAccountDataEventContent,
RoomAccountDataEventType,
},
events::{AnyGlobalAccountDataEventContent, AnyRoomAccountDataEventContent},
serde::Raw,
};
use serde::Deserialize;
@@ -40,7 +37,7 @@ pub(crate) async fn set_global_account_data_route(
)
.await?;
Ok(set_global_account_data::v3::Response {})
Ok(set_global_account_data::v3::Response::new())
}
/// # `PUT /_matrix/client/r0/user/{userId}/rooms/{roomId}/account_data/{type}`
@@ -65,7 +62,7 @@ pub(crate) async fn set_room_account_data_route(
)
.await?;
Ok(set_room_account_data::v3::Response {})
Ok(set_room_account_data::v3::Response::new())
}
/// # `GET /_matrix/client/r0/user/{userId}/account_data/{type}`
@@ -87,7 +84,7 @@ pub(crate) async fn get_global_account_data_route(
.await
.map_err(|_| err!(Request(NotFound("Data not found."))))?;
Ok(get_global_account_data::v3::Response { account_data: account_data.content })
Ok(get_global_account_data::v3::Response::new(account_data.content))
}
/// # `GET /_matrix/client/r0/user/{userId}/rooms/{roomId}/account_data/{type}`
@@ -109,7 +106,7 @@ pub(crate) async fn get_room_account_data_route(
.await
.map_err(|_| err!(Request(NotFound("Data not found."))))?;
Ok(get_room_account_data::v3::Response { account_data: account_data.content })
Ok(get_room_account_data::v3::Response::new(account_data.content))
}
async fn set_account_data(
@@ -119,7 +116,7 @@ async fn set_account_data(
event_type_s: &str,
data: &RawJsonValue,
) -> Result {
if event_type_s == RoomAccountDataEventType::FullyRead.to_cow_str() {
if event_type_s == "m.fully_read" {
return Err!(Request(BadJson(
"This endpoint cannot be used for marking a room as fully read (setting \
m.fully_read)"
+1 -1
View File
@@ -1,7 +1,7 @@
use axum::extract::State;
use conduwuit::{Err, Result};
use futures::future::{join, join3};
use ruma::api::client::admin::{get_suspended, set_suspended};
use ruminuwuity::admin::{get_suspended, set_suspended};
use crate::Ruma;
+7 -4
View File
@@ -1,6 +1,9 @@
use axum::extract::State;
use conduwuit::{Err, Result, err};
use ruma::api::{appservice::ping, client::appservice::request_ping};
use ruma::{
api::{appservice::ping, client::appservice::request_ping},
assign,
};
use crate::Ruma;
@@ -40,12 +43,12 @@ pub(crate) async fn appservice_ping(
.sending
.send_appservice_request(
appservice_info.registration.clone(),
ping::send_ping::v1::Request {
assign!(ping::send_ping::v1::Request::new(), {
transaction_id: body.transaction_id.clone(),
},
}),
)
.await?
.expect("We already validated if an appservice URL exists above");
Ok(request_ping::v1::Response { duration: timer.elapsed() })
Ok(request_ping::v1::Response::new(timer.elapsed()))
}
+29 -33
View File
@@ -3,7 +3,6 @@
use axum::extract::State;
use conduwuit::{Err, Result, err};
use conduwuit_service::Services;
use futures::{FutureExt, future::try_join};
use ruma::{
UInt, UserId,
api::client::backup::{
@@ -28,7 +27,7 @@ pub(crate) async fn create_backup_version_route(
.key_backups
.create_backup(body.sender_user(), &body.algorithm)?;
Ok(create_backup_version::v3::Response { version })
Ok(create_backup_version::v3::Response::new(version))
}
/// # `PUT /_matrix/client/r0/room_keys/version/{version}`
@@ -44,7 +43,7 @@ pub(crate) async fn update_backup_version_route(
.update_backup(body.sender_user(), &body.version, &body.algorithm)
.await?;
Ok(update_backup_version::v3::Response {})
Ok(update_backup_version::v3::Response::new())
}
/// # `GET /_matrix/client/r0/room_keys/version`
@@ -60,9 +59,9 @@ pub(crate) async fn get_latest_backup_info_route(
.await
.map_err(|_| err!(Request(NotFound("Key backup does not exist."))))?;
let (count, etag) = get_count_etag(&services, body.sender_user(), &version).await?;
let (count, etag) = get_count_etag(&services, body.sender_user(), &version).await;
Ok(get_latest_backup_info::v3::Response { algorithm, count, etag, version })
Ok(get_latest_backup_info::v3::Response::new(algorithm, count, etag, version))
}
/// # `GET /_matrix/client/v3/room_keys/version/{version}`
@@ -80,14 +79,9 @@ pub(crate) async fn get_backup_info_route(
err!(Request(NotFound("Key backup does not exist at version {:?}", body.version)))
})?;
let (count, etag) = get_count_etag(&services, body.sender_user(), &body.version).await?;
let (count, etag) = get_count_etag(&services, body.sender_user(), &body.version).await;
Ok(get_backup_info::v3::Response {
algorithm,
count,
etag,
version: body.version.clone(),
})
Ok(get_backup_info::v3::Response::new(algorithm, count, etag, body.version.clone()))
}
/// # `DELETE /_matrix/client/r0/room_keys/version/{version}`
@@ -105,7 +99,7 @@ pub(crate) async fn delete_backup_version_route(
.delete_backup(body.sender_user(), &body.version)
.await;
Ok(delete_backup_version::v3::Response {})
Ok(delete_backup_version::v3::Response::new())
}
/// # `PUT /_matrix/client/r0/room_keys/keys`
@@ -140,9 +134,9 @@ pub(crate) async fn add_backup_keys_route(
}
}
let (count, etag) = get_count_etag(&services, body.sender_user(), &body.version).await?;
let (count, etag) = get_count_etag(&services, body.sender_user(), &body.version).await;
Ok(add_backup_keys::v3::Response { count, etag })
Ok(add_backup_keys::v3::Response::new(etag, count))
}
/// # `PUT /_matrix/client/r0/room_keys/keys/{roomId}`
@@ -175,9 +169,9 @@ pub(crate) async fn add_backup_keys_for_room_route(
.await?;
}
let (count, etag) = get_count_etag(&services, body.sender_user(), &body.version).await?;
let (count, etag) = get_count_etag(&services, body.sender_user(), &body.version).await;
Ok(add_backup_keys_for_room::v3::Response { count, etag })
Ok(add_backup_keys_for_room::v3::Response::new(etag, count))
}
/// # `PUT /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}`
@@ -275,9 +269,9 @@ pub(crate) async fn add_backup_keys_for_session_route(
.await?;
}
let (count, etag) = get_count_etag(&services, body.sender_user(), &body.version).await?;
let (count, etag) = get_count_etag(&services, body.sender_user(), &body.version).await;
Ok(add_backup_keys_for_session::v3::Response { count, etag })
Ok(add_backup_keys_for_session::v3::Response::new(etag, count))
}
/// # `GET /_matrix/client/r0/room_keys/keys`
@@ -292,7 +286,7 @@ pub(crate) async fn get_backup_keys_route(
.get_all(body.sender_user(), &body.version)
.await;
Ok(get_backup_keys::v3::Response { rooms })
Ok(get_backup_keys::v3::Response::new(rooms))
}
/// # `GET /_matrix/client/r0/room_keys/keys/{roomId}`
@@ -307,7 +301,7 @@ pub(crate) async fn get_backup_keys_for_room_route(
.get_room(body.sender_user(), &body.version, &body.room_id)
.await;
Ok(get_backup_keys_for_room::v3::Response { sessions })
Ok(get_backup_keys_for_room::v3::Response::new(sessions))
}
/// # `GET /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}`
@@ -325,7 +319,7 @@ pub(crate) async fn get_backup_keys_for_session_route(
err!(Request(NotFound(debug_error!("Backup key not found for this user's session."))))
})?;
Ok(get_backup_keys_for_session::v3::Response { key_data })
Ok(get_backup_keys_for_session::v3::Response::new(key_data))
}
/// # `DELETE /_matrix/client/r0/room_keys/keys`
@@ -340,9 +334,9 @@ pub(crate) async fn delete_backup_keys_route(
.delete_all_keys(body.sender_user(), &body.version)
.await;
let (count, etag) = get_count_etag(&services, body.sender_user(), &body.version).await?;
let (count, etag) = get_count_etag(&services, body.sender_user(), &body.version).await;
Ok(delete_backup_keys::v3::Response { count, etag })
Ok(delete_backup_keys::v3::Response::new(etag, count))
}
/// # `DELETE /_matrix/client/r0/room_keys/keys/{roomId}`
@@ -357,9 +351,9 @@ pub(crate) async fn delete_backup_keys_for_room_route(
.delete_room_keys(body.sender_user(), &body.version, &body.room_id)
.await;
let (count, etag) = get_count_etag(&services, body.sender_user(), &body.version).await?;
let (count, etag) = get_count_etag(&services, body.sender_user(), &body.version).await;
Ok(delete_backup_keys_for_room::v3::Response { count, etag })
Ok(delete_backup_keys_for_room::v3::Response::new(etag, count))
}
/// # `DELETE /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}`
@@ -374,22 +368,24 @@ pub(crate) async fn delete_backup_keys_for_session_route(
.delete_room_key(body.sender_user(), &body.version, &body.room_id, &body.session_id)
.await;
let (count, etag) = get_count_etag(&services, body.sender_user(), &body.version).await?;
let (count, etag) = get_count_etag(&services, body.sender_user(), &body.version).await;
Ok(delete_backup_keys_for_session::v3::Response { count, etag })
Ok(delete_backup_keys_for_session::v3::Response::new(etag, count))
}
async fn get_count_etag(
services: &Services,
sender_user: &UserId,
version: &str,
) -> Result<(UInt, String)> {
let count = services
) -> (UInt, String) {
let count: UInt = services
.key_backups
.count_keys(sender_user, version)
.map(TryInto::try_into);
.await
.try_into()
.expect("number of keys should fit into a UInt");
let etag = services.key_backups.get_etag(sender_user, version).map(Ok);
let etag = services.key_backups.get_etag(sender_user, version).await;
Ok(try_join(count, etag).await?)
(count, etag)
}
+13 -12
View File
@@ -5,8 +5,11 @@
use ruma::{
RoomVersionId,
api::client::discovery::get_capabilities::{
self, Capabilities, GetLoginTokenCapability, RoomVersionStability,
RoomVersionsCapability, ThirdPartyIdChangesCapability,
self,
v3::{
Capabilities, GetLoginTokenCapability, RoomVersionStability, RoomVersionsCapability,
ThirdPartyIdChangesCapability,
},
},
};
use serde_json::json;
@@ -25,19 +28,17 @@ pub(crate) async fn get_capabilities_route(
Server::available_room_versions().collect();
let mut capabilities = Capabilities::default();
capabilities.room_versions = RoomVersionsCapability {
capabilities.room_versions = RoomVersionsCapability::new(
services.server.config.default_room_version.clone(),
available,
default: services.server.config.default_room_version.clone(),
};
);
// Only allow 3pid changes if SMTP is configured
capabilities.thirdparty_id_changes = ThirdPartyIdChangesCapability {
enabled: services.threepid.email_requirement().may_change(),
};
capabilities.thirdparty_id_changes =
ThirdPartyIdChangesCapability::new(services.threepid.email_requirement().may_change());
capabilities.get_login_token = GetLoginTokenCapability {
enabled: services.server.config.login_via_existing_session,
};
capabilities.get_login_token =
GetLoginTokenCapability::new(services.server.config.login_via_existing_session);
// MSC4133 capability
capabilities.set("uk.tcpip.msc4133.profile_fields", json!({"enabled": true}))?;
@@ -56,5 +57,5 @@ pub(crate) async fn get_capabilities_route(
capabilities.set("uk.timedout.msc4323", json!({"suspend": true, "lock": false}))?;
}
Ok(get_capabilities::v3::Response { capabilities })
Ok(get_capabilities::v3::Response::new(capabilities))
}
+5 -3
View File
@@ -12,7 +12,9 @@
FutureExt, StreamExt, TryFutureExt, TryStreamExt,
future::{OptionFuture, join, join3, try_join3},
};
use ruma::{OwnedEventId, UserId, api::client::context::get_context, events::StateEventType};
use ruma::{
OwnedEventId, UserId, api::client::context::get_context, assign, events::StateEventType,
};
use crate::{
Ruma,
@@ -213,7 +215,7 @@ pub(crate) async fn get_context_route(
.collect()
.await;
Ok(get_context::v3::Response {
Ok(assign!(get_context::v3::Response::new(), {
event: base_event.map(at!(1)).map(Event::into_format),
start: events_before
@@ -243,5 +245,5 @@ pub(crate) async fn get_context_route(
.collect(),
state,
})
}))
}
+13 -13
View File
@@ -2,10 +2,14 @@
use axum_client_ip::ClientIp;
use conduwuit::{Err, Result, at};
use futures::StreamExt;
use ruma::api::client::dehydrated_device::{
delete_dehydrated_device::unstable as delete_dehydrated_device,
get_dehydrated_device::unstable as get_dehydrated_device, get_events::unstable as get_events,
put_dehydrated_device::unstable as put_dehydrated_device,
use ruma::{
api::client::dehydrated_device::{
delete_dehydrated_device::unstable as delete_dehydrated_device,
get_dehydrated_device::unstable as get_dehydrated_device,
get_events::unstable as get_events,
put_dehydrated_device::unstable as put_dehydrated_device,
},
assign,
};
use crate::Ruma;
@@ -33,7 +37,7 @@ pub(crate) async fn put_dehydrated_device_route(
.set_dehydrated_device(sender_user, body.body)
.await?;
Ok(put_dehydrated_device::Response { device_id })
Ok(put_dehydrated_device::Response::new(device_id))
}
/// # `DELETE /_matrix/client/../dehydrated_device`
@@ -51,7 +55,7 @@ pub(crate) async fn delete_dehydrated_device_route(
services.users.remove_device(sender_user, &device_id).await;
Ok(delete_dehydrated_device::Response { device_id })
Ok(delete_dehydrated_device::Response::new(device_id))
}
/// # `GET /_matrix/client/../dehydrated_device`
@@ -67,10 +71,7 @@ pub(crate) async fn get_dehydrated_device_route(
let device = services.users.get_dehydrated_device(sender_user).await?;
Ok(get_dehydrated_device::Response {
device_id: device.device_id,
device_data: device.device_data,
})
Ok(get_dehydrated_device::Response::new(device.device_id, device.device_data))
}
/// # `GET /_matrix/client/../dehydrated_device/{device_id}/events`
@@ -114,8 +115,7 @@ pub(crate) async fn get_dehydrated_events_route(
.collect()
.await;
Ok(get_events::Response {
events,
Ok(assign!(get_events::Response::new(events), {
next_batch: next_batch.as_ref().map(ToString::to_string),
})
}))
}
+23 -46
View File
@@ -25,7 +25,7 @@ pub(crate) async fn get_devices_route(
.collect()
.await;
Ok(get_devices::v3::Response { devices })
Ok(get_devices::v3::Response::new(devices))
}
/// # `GET /_matrix/client/r0/devices/{deviceId}`
@@ -41,7 +41,7 @@ pub(crate) async fn get_device_route(
.await
.map_err(|_| err!(Request(NotFound("Device not found."))))?;
Ok(get_device::v3::Response { device })
Ok(get_device::v3::Response::new(device))
}
/// # `PUT /_matrix/client/r0/devices/{deviceId}`
@@ -73,19 +73,16 @@ pub(crate) async fn update_device_route(
.update_device_metadata(sender_user, &body.device_id, &device)
.await?;
Ok(update_device::v3::Response {})
Ok(update_device::v3::Response::new())
},
| Err(_) => {
let Some(appservice) = appservice else {
return Err!(Request(NotFound("Device not found.")));
};
if !appservice.registration.device_management {
return Err!(Request(NotFound("Device not found.")));
}
debug!(
"Creating new device for {sender_user} from appservice {} as MSC4190 is enabled \
and device ID does not exist",
"Creating new device for {sender_user} from appservice {} as device ID does not \
exist",
appservice.registration.id
);
@@ -102,7 +99,7 @@ pub(crate) async fn update_device_route(
)
.await?;
return Ok(update_device::v3::Response {});
return Ok(update_device::v3::Response::new());
},
}
}
@@ -124,39 +121,28 @@ pub(crate) async fn delete_device_route(
let sender_user = body.sender_user();
let appservice = body.appservice_info.as_ref();
if appservice.is_some_and(|appservice| appservice.registration.device_management) {
debug!(
"Skipping UIAA for {sender_user} as this is from an appservice and MSC4190 is \
enabled"
);
services
.users
.remove_device(sender_user, &body.device_id)
.await;
return Ok(delete_device::v3::Response {});
// Appservices get to skip UIAA for this endpoint
if appservice.is_none() {
// Prompt the user to confirm with their password using UIAA
let _ = services
.uiaa
.authenticate_password(&body.auth, Some(Identity::from_user_id(sender_user)))
.await?;
}
// Prompt the user to confirm with their password using UIAA
let _ = services
.uiaa
.authenticate_password(&body.auth, Some(Identity::from_user_id(sender_user)))
.await?;
services
.users
.remove_device(sender_user, &body.device_id)
.await;
Ok(delete_device::v3::Response {})
Ok(delete_device::v3::Response::new())
}
/// # `POST /_matrix/client/v3/delete_devices`
///
/// Deletes the given list of devices.
///
/// - Requires UIAA to verify user password unless from an appservice with
/// MSC4190 enabled.
/// - Requires UIAA to verify user password.
///
/// For each device:
/// - Invalidates access token
@@ -171,27 +157,18 @@ pub(crate) async fn delete_devices_route(
let sender_user = body.sender_user();
let appservice = body.appservice_info.as_ref();
if appservice.is_some_and(|appservice| appservice.registration.device_management) {
debug!(
"Skipping UIAA for {sender_user} as this is from an appservice and MSC4190 is \
enabled"
);
for device_id in &body.devices {
services.users.remove_device(sender_user, device_id).await;
}
return Ok(delete_devices::v3::Response {});
// Appservices get to skip UIAA for this endpoint
if appservice.is_none() {
// Prompt the user to confirm with their password using UIAA
let _ = services
.uiaa
.authenticate_password(&body.auth, Some(Identity::from_user_id(sender_user)))
.await?;
}
// Prompt the user to confirm with their password using UIAA
let _ = services
.uiaa
.authenticate_password(&body.auth, Some(Identity::from_user_id(sender_user)))
.await?;
for device_id in &body.devices {
services.users.remove_device(sender_user, device_id).await;
}
Ok(delete_devices::v3::Response {})
Ok(delete_devices::v3::Response::new())
}
+50 -140
View File
@@ -1,23 +1,16 @@
use std::iter::once;
use axum::extract::State;
use axum_client_ip::ClientIp;
use conduwuit::{
Err, Event, Result, RoomVersion, err, info,
Err, Result, err, info,
utils::{
TryFutureExtExt,
math::Expected,
result::FlatOk,
stream::{ReadyExt, WidebandExt},
},
};
use conduwuit_service::Services;
use futures::{
FutureExt, StreamExt, TryFutureExt,
future::{join, join4, join5},
};
use futures::StreamExt;
use ruma::{
OwnedRoomId, RoomId, ServerName, UInt, UserId,
RoomId, ServerName, UInt, UserId,
api::{
client::{
directory::{
@@ -28,15 +21,9 @@
},
federation,
},
directory::{Filter, PublicRoomJoinRule, PublicRoomsChunk, RoomNetwork, RoomTypeFilter},
events::{
StateEventType,
room::{
create::RoomCreateEventContent,
join_rules::{JoinRule, RoomJoinRulesEventContent},
power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent},
},
},
assign,
directory::{Filter, PublicRoomsChunk, RoomNetwork, RoomTypeFilter},
events::StateEventType,
uint,
};
use tokio::join;
@@ -109,12 +96,11 @@ pub(crate) async fn get_public_rooms_route(
err!(Request(Unknown(warn!(?body.server, "Failed to return /publicRooms: {e}"))))
})?;
Ok(get_public_rooms::v3::Response {
chunk: response.chunk,
Ok(assign!(get_public_rooms::v3::Response::new(response.chunk), {
prev_batch: response.prev_batch,
next_batch: response.next_batch,
total_room_count_estimate: response.total_room_count_estimate,
})
}))
}
/// # `PUT /_matrix/client/r0/directory/list/room/{roomId}`
@@ -197,7 +183,7 @@ pub(crate) async fn set_room_visibility_route(
},
}
Ok(set_room_visibility::v3::Response {})
Ok(set_room_visibility::v3::Response::new())
}
/// # `GET /_matrix/client/r0/directory/list/room/{roomId}`
@@ -212,13 +198,13 @@ pub(crate) async fn get_room_visibility_route(
return Err!(Request(NotFound("Room not found")));
}
Ok(get_room_visibility::v3::Response {
visibility: if services.rooms.directory.is_public_room(&body.room_id).await {
room::Visibility::Public
} else {
room::Visibility::Private
},
})
let visibility = if services.rooms.directory.is_public_room(&body.room_id).await {
room::Visibility::Public
} else {
room::Visibility::Private
};
Ok(get_room_visibility::v3::Response::new(visibility))
}
pub(crate) async fn get_public_rooms_filtered_helper(
@@ -236,24 +222,24 @@ pub(crate) async fn get_public_rooms_filtered_helper(
.sending
.send_federation_request(
other_server,
federation::directory::get_public_rooms_filtered::v1::Request {
assign!(federation::directory::get_public_rooms_filtered::v1::Request::new(), {
limit,
since: since.map(ToOwned::to_owned),
filter: Filter {
filter: assign!(Filter::new(), {
generic_search_term: filter.generic_search_term.clone(),
room_types: filter.room_types.clone(),
},
}),
room_network: RoomNetwork::Matrix,
},
}),
)
.await?;
return Ok(get_public_rooms_filtered::v3::Response {
return Ok(assign!(get_public_rooms_filtered::v3::Response::new(), {
chunk: response.chunk,
prev_batch: response.prev_batch,
next_batch: response.next_batch,
total_room_count_estimate: response.total_room_count_estimate,
});
}));
}
// Use limit or else 10, with maximum 100
@@ -284,16 +270,24 @@ pub(crate) async fn get_public_rooms_filtered_helper(
.rooms
.directory
.public_rooms()
.map(ToOwned::to_owned)
.wide_then(|room_id| public_rooms_chunk(services, room_id))
.ready_filter_map(|chunk| {
.wide_then(async |room_id| {
let summary = services
.rooms
.summary
.build_local_room_summary(&room_id)
.await
.expect("room in public room directory should exist");
summary.into()
})
.ready_filter_map(|chunk: PublicRoomsChunk| {
if !filter.room_types.is_empty() && !filter.room_types.contains(&RoomTypeFilter::from(chunk.room_type.clone())) {
return None;
}
if let Some(query) = filter.generic_search_term.as_ref().map(|q| q.to_lowercase()) {
if let Some(name) = &chunk.name {
if name.as_str().to_lowercase().contains(&query) {
if name.to_lowercase().contains(&query) {
return Some(chunk);
}
}
@@ -320,7 +314,7 @@ pub(crate) async fn get_public_rooms_filtered_helper(
.collect()
.await;
all_rooms.sort_by(|l, r| r.num_joined_members.cmp(&l.num_joined_members));
all_rooms.sort_by_key(|r| std::cmp::Reverse(r.num_joined_members));
let total_room_count_estimate = UInt::try_from(all_rooms.len())
.unwrap_or_else(|_| uint!(0))
@@ -335,12 +329,12 @@ pub(crate) async fn get_public_rooms_filtered_helper(
.ge(&limit)
.then_some(format!("n{}", num_since.expected_add(limit)));
Ok(get_public_rooms_filtered::v3::Response {
Ok(assign!(get_public_rooms_filtered::v3::Response::new(), {
chunk,
prev_batch,
next_batch,
total_room_count_estimate,
})
}))
}
/// Checks whether the given user ID is allowed to publish the target room to
@@ -356,109 +350,25 @@ async fn user_can_publish_room(
// Server admins can always publish to their own room directory.
return Ok(true);
}
let (create_event, room_version, power_levels_content) = join!(
services
.rooms
.state_accessor
.room_state_get(room_id, &StateEventType::RoomCreate, ""),
let (room_version, room_creators, power_levels) = join!(
services.rooms.state.get_room_version(room_id),
services
.rooms
.state_accessor
.room_state_get_content::<RoomPowerLevelsEventContent>(
room_id,
&StateEventType::RoomPowerLevels,
""
)
services.rooms.state_accessor.get_room_creators(room_id),
services.rooms.state_accessor.get_room_power_levels(room_id),
);
let room_version = room_version
.as_ref()
.map_err(|_| err!(Request(NotFound("Unknown room"))))?;
let create_event = create_event.map_err(|_| err!(Request(NotFound("Unknown room"))))?;
if RoomVersion::new(room_version)
.expect("room version must be supported")
let room_version_rules = room_version.rules().unwrap();
if room_version_rules
.authorization
.explicitly_privilege_room_creators
&& room_creators.contains(user_id)
{
let create_content: RoomCreateEventContent =
serde_json::from_str(create_event.content().get())
.map_err(|_| err!(Database("Invalid event content for m.room.create")))?;
let is_creator = create_content
.additional_creators
.unwrap_or_default()
.into_iter()
.chain(once(create_event.sender().to_owned()))
.any(|sender| sender == user_id);
if is_creator {
return Ok(true);
}
}
match power_levels_content.map(RoomPowerLevels::from) {
| Ok(pl) => Ok(pl.user_can_send_state(user_id, StateEventType::RoomCanonicalAlias)),
| Err(e) =>
if e.is_not_found() {
Ok(create_event.sender() == user_id)
} else {
Err!(Database("Invalid event content for m.room.power_levels: {e}"))
},
}
}
async fn public_rooms_chunk(services: &Services, room_id: OwnedRoomId) -> PublicRoomsChunk {
let name = services.rooms.state_accessor.get_name(&room_id).ok();
let room_type = services.rooms.state_accessor.get_room_type(&room_id).ok();
let canonical_alias = services
.rooms
.state_accessor
.get_canonical_alias(&room_id)
.ok();
let avatar_url = services.rooms.state_accessor.get_avatar(&room_id);
let topic = services.rooms.state_accessor.get_room_topic(&room_id).ok();
let world_readable = services.rooms.state_accessor.is_world_readable(&room_id);
let join_rule = services
.rooms
.state_accessor
.room_state_get_content(&room_id, &StateEventType::RoomJoinRules, "")
.map_ok(|c: RoomJoinRulesEventContent| match c.join_rule {
| JoinRule::Public => PublicRoomJoinRule::Public,
| JoinRule::Knock => "knock".into(),
| JoinRule::KnockRestricted(_) => "knock_restricted".into(),
| _ => "invite".into(),
});
let guest_can_join = services.rooms.state_accessor.guest_can_join(&room_id);
let num_joined_members = services.rooms.state_cache.room_joined_count(&room_id);
let (
(avatar_url, canonical_alias, guest_can_join, join_rule, name),
(num_joined_members, room_type, topic, world_readable),
) = join(
join5(avatar_url, canonical_alias, guest_can_join, join_rule, name),
join4(num_joined_members, room_type, topic, world_readable),
)
.boxed()
.await;
PublicRoomsChunk {
avatar_url: avatar_url.into_option().unwrap_or_default().url,
canonical_alias,
guest_can_join,
join_rule: join_rule.unwrap_or_default(),
name,
num_joined_members: num_joined_members
.map(TryInto::try_into)
.map(Result::ok)
.flat_ok()
.unwrap_or_else(|| uint!(0)),
room_id,
room_type,
topic,
world_readable,
return Ok(true);
}
Ok(power_levels.user_can_send_state(user_id, StateEventType::RoomCanonicalAlias))
}
+78 -118
View File
@@ -5,25 +5,23 @@
use axum::extract::State;
use conduwuit::{
Err, Error, Result, debug, debug_warn, err,
result::NotFound,
utils::{IterStream, stream::WidebandExt},
Err, Result, debug, debug_warn, err,
result::FlatOk,
utils::{IterStream, TryFutureExtExt, stream::WidebandExt},
};
use conduwuit_service::{Services, users::parse_master_key};
use futures::{StreamExt, stream::FuturesUnordered};
use ruma::{
OneTimeKeyAlgorithm, OwnedDeviceId, OwnedUserId, UserId,
api::{
client::{
error::ErrorKind,
keys::{
claim_keys, get_key_changes, get_keys, upload_keys,
upload_signatures::{self},
upload_signing_keys,
},
client::keys::{
claim_keys, get_key_changes, get_keys, upload_keys,
upload_signatures::{self},
upload_signing_keys,
},
federation,
},
assign,
encryption::CrossSigningKey,
serde::Raw,
};
@@ -115,12 +113,12 @@ pub(crate) async fn upload_keys_route(
}
}
Ok(upload_keys::v3::Response {
one_time_key_counts: services
.users
.count_one_time_keys(sender_user, sender_device)
.await,
})
let one_time_key_counts = services
.users
.count_one_time_keys(sender_user, sender_device)
.await;
Ok(upload_keys::v3::Response::new(one_time_key_counts))
}
/// # `POST /_matrix/client/r0/keys/query`
@@ -174,7 +172,7 @@ pub(crate) async fn upload_signing_keys_route(
) -> Result<upload_signing_keys::v3::Response> {
let sender_user = body.sender_user();
match check_for_new_keys(
if uiaa_needed_to_upload_keys(
services,
sender_user,
body.self_signing_key.as_ref(),
@@ -182,25 +180,11 @@ pub(crate) async fn upload_signing_keys_route(
body.master_key.as_ref(),
)
.await
.inspect_err(|e| debug!(?e))
{
| Ok(exists) => {
if let Some(result) = exists {
// No-op, they tried to reupload the same set of keys
// (lost connection for example)
return Ok(result);
}
debug!(
"Skipping UIA in accordance with MSC3967, the user didn't have any existing keys"
);
// Some of the keys weren't found, so we let them upload
},
| _ => {
let _ = services
.uiaa
.authenticate_password(&body.auth, Some(Identity::from_user_id(sender_user)))
.await?;
},
let _ = services
.uiaa
.authenticate_password(&body.auth, Some(Identity::from_user_id(sender_user)))
.await?;
}
services
@@ -214,77 +198,56 @@ pub(crate) async fn upload_signing_keys_route(
)
.await?;
Ok(upload_signing_keys::v3::Response {})
Ok(upload_signing_keys::v3::Response::new())
}
async fn check_for_new_keys(
async fn uiaa_needed_to_upload_keys(
services: crate::State,
user_id: &UserId,
self_signing_key: Option<&Raw<CrossSigningKey>>,
user_signing_key: Option<&Raw<CrossSigningKey>>,
master_signing_key: Option<&Raw<CrossSigningKey>>,
) -> Result<Option<upload_signing_keys::v3::Response>> {
debug!("checking for existing keys");
let mut empty = false;
if let Some(master_signing_key) = master_signing_key {
let (key, value) = parse_master_key(user_id, master_signing_key)?;
let result = services
.users
.get_master_key(None, user_id, &|_| true)
.await;
if result.is_not_found() {
empty = true;
} else {
let existing_master_key = result?;
let (existing_key, existing_value) = parse_master_key(user_id, &existing_master_key)?;
if existing_key != key || existing_value != value {
return Err!(Request(Forbidden(
"Tried to change an existing master key, UIA required"
)));
}
}
}
if let Some(user_signing_key) = user_signing_key {
let key = services.users.get_user_signing_key(user_id).await;
if key.is_not_found() && !empty {
return Err!(Request(Forbidden(
"Tried to update an existing user signing key, UIA required"
)));
}
if !key.is_not_found() {
let existing_signing_key = key?.deserialize()?;
if existing_signing_key != user_signing_key.deserialize()? {
return Err!(Request(Forbidden(
"Tried to change an existing user signing key, UIA required"
)));
}
}
}
if let Some(self_signing_key) = self_signing_key {
let key = services
) -> bool {
let (self_signing_key, user_signing_key, master_signing_key) = (
self_signing_key.map(Raw::deserialize).flat_ok(),
user_signing_key.map(Raw::deserialize).flat_ok(),
master_signing_key.map(Raw::deserialize).flat_ok(),
);
let (existing_self_signing_key, existing_user_signing_key, existing_master_signing_key) = futures::join!(
services
.users
.get_self_signing_key(None, user_id, &|_| true)
.await;
if key.is_not_found() && !empty {
debug!(?key);
return Err!(Request(Forbidden(
"Tried to add a new signing key independently from the master key"
)));
}
if !key.is_not_found() {
let existing_signing_key = key?.deserialize()?;
if existing_signing_key != self_signing_key.deserialize()? {
return Err!(Request(Forbidden(
"Tried to update an existing self signing key, UIA required"
)));
}
}
}
if empty {
return Ok(None);
}
.ok(),
services.users.get_user_signing_key(user_id).ok(),
services.users.get_master_key(None, user_id, &|_| true).ok(),
);
Ok(Some(upload_signing_keys::v3::Response {}))
let (existing_self_signing_key, existing_user_signing_key, existing_master_signing_key) = (
existing_self_signing_key
.as_ref()
.map(Raw::deserialize)
.flat_ok(),
existing_user_signing_key
.as_ref()
.map(Raw::deserialize)
.flat_ok(),
existing_master_signing_key
.as_ref()
.map(Raw::deserialize)
.flat_ok(),
);
if let Some(existing_master_signing_key) = existing_master_signing_key {
// If a master key exists, UIAA is required if any of the keys are different.
master_signing_key != Some(existing_master_signing_key)
|| user_signing_key != existing_user_signing_key
|| self_signing_key != existing_self_signing_key
} else {
// If no master key exists, UIAA is not required.
false
}
}
/// # `POST /_matrix/client/r0/keys/signatures/upload`
@@ -343,7 +306,7 @@ pub(crate) async fn upload_signatures_route(
}
}
Ok(upload_signatures::v3::Response { failures: BTreeMap::new() })
Ok(upload_signatures::v3::Response::new())
}
/// # `POST /_matrix/client/r0/keys/changes`
@@ -363,18 +326,17 @@ pub(crate) async fn get_key_changes_route(
let from = body
.from
.parse()
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `from`."))?;
.map_err(|_| err!(Request(InvalidParam("Invalid `from`."))))?;
let to = body
.to
.parse()
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `to`."))?;
.map_err(|_| err!(Request(InvalidParam("Invalid `to`."))))?;
device_list_updates.extend(
services
.users
.keys_changed(sender_user, Some(from), Some(to))
.map(ToOwned::to_owned)
.collect::<Vec<_>>()
.await,
);
@@ -385,18 +347,18 @@ pub(crate) async fn get_key_changes_route(
device_list_updates.extend(
services
.users
.room_keys_changed(room_id, Some(from), Some(to))
.room_keys_changed(&room_id, Some(from), Some(to))
.map(|(user_id, _)| user_id)
.map(ToOwned::to_owned)
.collect::<Vec<_>>()
.await,
);
}
Ok(get_key_changes::v3::Response {
changed: device_list_updates.into_iter().collect(),
left: Vec::new(), // TODO
})
Ok(get_key_changes::v3::Response::new(
device_list_updates.into_iter().collect(),
// TODO
vec![],
))
}
pub(crate) async fn get_keys_helper<F>(
@@ -433,10 +395,10 @@ pub(crate) async fn get_keys_helper<F>(
let mut devices = services.users.all_device_ids(user_id).boxed();
while let Some(device_id) = devices.next().await {
if let Ok(mut keys) = services.users.get_device_keys(user_id, device_id).await {
if let Ok(mut keys) = services.users.get_device_keys(user_id, &device_id).await {
let metadata = services
.users
.get_device_metadata(user_id, device_id)
.get_device_metadata(user_id, &device_id)
.await
.map_err(|_| {
err!(Database("all_device_keys contained nonexistent device."))
@@ -445,7 +407,7 @@ pub(crate) async fn get_keys_helper<F>(
add_unsigned_device_display_name(&mut keys, metadata, include_display_names)
.map_err(|_| err!(Database("invalid device keys in database")))?;
container.insert(device_id.to_owned(), keys);
container.insert(device_id.clone(), keys);
}
}
@@ -506,8 +468,7 @@ pub(crate) async fn get_keys_helper<F>(
device_keys_input_fed.insert(user_id.to_owned(), keys.clone());
}
let request =
federation::keys::get_keys::v1::Request { device_keys: device_keys_input_fed };
let request = federation::keys::get_keys::v1::Request::new(device_keys_input_fed);
let response = tokio::time::timeout(
timeout,
services.sending.send_federation_request(server, request),
@@ -561,13 +522,13 @@ pub(crate) async fn get_keys_helper<F>(
}
}
Ok(get_keys::v3::Response {
Ok(assign!(get_keys::v3::Response::new(), {
failures,
device_keys,
master_keys,
self_signing_keys,
user_signing_keys,
})
}))
}
fn add_unsigned_device_display_name(
@@ -576,7 +537,8 @@ fn add_unsigned_device_display_name(
include_display_names: bool,
) -> serde_json::Result<()> {
if let Some(display_name) = metadata.display_name {
let mut object = keys.deserialize_as::<serde_json::Map<String, serde_json::Value>>()?;
let mut object =
keys.deserialize_as_unchecked::<serde_json::Map<String, serde_json::Value>>()?;
let unsigned = object.entry("unsigned").or_insert_with(|| json!({}));
if let serde_json::Value::Object(unsigned_object) = unsigned {
@@ -642,9 +604,7 @@ pub(crate) async fn claim_keys_helper(
timeout,
services.sending.send_federation_request(
server,
federation::keys::claim_keys::v1::Request {
one_time_keys: one_time_keys_input_fed,
},
federation::keys::claim_keys::v1::Request::new(one_time_keys_input_fed),
),
)
.await
@@ -667,5 +627,5 @@ pub(crate) async fn claim_keys_helper(
}
}
Ok(claim_keys::v3::Response { failures, one_time_keys })
Ok(assign!(claim_keys::v3::Response::new(one_time_keys), { failures: failures }))
}
+36 -81
View File
@@ -9,11 +9,11 @@
use conduwuit_core::error;
use conduwuit_service::{
Services,
media::{CACHE_CONTROL_IMMUTABLE, CORP_CROSS_ORIGIN, Dim, FileMeta, MXC_LENGTH},
media::{Dim, FileMeta, MXC_LENGTH},
};
use reqwest::Url;
use ruma::{
Mxc, UserId,
UserId,
api::client::{
authenticated_media::{
get_content, get_content_as_filename, get_content_thumbnail, get_media_config,
@@ -21,7 +21,9 @@
},
media::create_content,
},
assign,
};
use service::media::mxc::Mxc;
use crate::Ruma;
@@ -30,9 +32,9 @@ pub(crate) async fn get_media_config_route(
State(services): State<crate::State>,
_body: Ruma<get_media_config::v1::Request>,
) -> Result<get_media_config::v1::Response> {
Ok(get_media_config::v1::Response {
upload_size: ruma_from_usize(services.server.config.max_request_size),
})
Ok(get_media_config::v1::Response::new(ruma_from_usize(
services.server.config.max_request_size,
)))
}
/// # `POST /_matrix/media/v3/upload`
@@ -82,10 +84,9 @@ pub(crate) async fn create_content_route(
.flatten()
});
Ok(create_content::v3::Response {
content_uri: mxc.to_string().into(),
Ok(assign!(create_content::v3::Response::new(mxc.to_string().into()), {
blurhash: blurhash.flatten(),
})
}))
}
/// # `GET /_matrix/client/v1/media/thumbnail/{serverName}/{mediaId}`
@@ -114,7 +115,7 @@ pub(crate) async fn get_content_thumbnail_route(
content,
content_type,
content_disposition,
} = match fetch_thumbnail(&services, &mxc, user, body.timeout_ms, &dim).await {
} = match fetch_thumbnail_meta(&services, &mxc, user, body.timeout_ms, &dim).await {
| Ok(meta) => meta,
| Err(conduwuit::Error::Io(e)) => match e.kind() {
| std::io::ErrorKind::NotFound =>
@@ -128,13 +129,14 @@ pub(crate) async fn get_content_thumbnail_route(
| Err(_) => return Err!(Request(Unknown("Unknown error when fetching thumbnail."))),
};
Ok(get_content_thumbnail::v1::Response {
file: content.expect("entire file contents"),
content_type: content_type.map(Into::into),
cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()),
cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()),
let content_disposition =
make_content_disposition(content_disposition.as_ref(), content_type.as_deref(), None);
Ok(get_content_thumbnail::v1::Response::new(
content.expect("entire file contents"),
content_type.unwrap_or_default(),
content_disposition,
})
))
}
/// # `GET /_matrix/client/v1/media/download/{serverName}/{mediaId}`
@@ -161,7 +163,7 @@ pub(crate) async fn get_content_route(
content,
content_type,
content_disposition,
} = match fetch_file(&services, &mxc, user, body.timeout_ms, None).await {
} = match fetch_file_meta(&services, &mxc, user, body.timeout_ms).await {
| Ok(meta) => meta,
| Err(conduwuit::Error::Io(e)) => match e.kind() {
| std::io::ErrorKind::NotFound => return Err!(Request(NotFound("Media not found."))),
@@ -174,13 +176,14 @@ pub(crate) async fn get_content_route(
| Err(_) => return Err!(Request(Unknown("Unknown error when fetching file."))),
};
Ok(get_content::v1::Response {
file: content.expect("entire file contents"),
content_type: content_type.map(Into::into),
cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()),
cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()),
let content_disposition =
make_content_disposition(content_disposition.as_ref(), content_type.as_deref(), None);
Ok(get_content::v1::Response::new(
content.expect("entire file contents"),
content_type.unwrap_or_default(),
content_disposition,
})
))
}
/// # `GET /_matrix/client/v1/media/download/{serverName}/{mediaId}/{fileName}`
@@ -208,7 +211,7 @@ pub(crate) async fn get_content_as_filename_route(
content,
content_type,
content_disposition,
} = match fetch_file(&services, &mxc, user, body.timeout_ms, None).await {
} = match fetch_file_meta(&services, &mxc, user, body.timeout_ms).await {
| Ok(meta) => meta,
| Err(conduwuit::Error::Io(e)) => match e.kind() {
| std::io::ErrorKind::NotFound => return Err!(Request(NotFound("Media not found."))),
@@ -221,13 +224,17 @@ pub(crate) async fn get_content_as_filename_route(
| Err(_) => return Err!(Request(Unknown("Unknown error when fetching file."))),
};
Ok(get_content_as_filename::v1::Response {
file: content.expect("entire file contents"),
content_type: content_type.map(Into::into),
cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()),
cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()),
let content_disposition = make_content_disposition(
content_disposition.as_ref(),
content_type.as_deref(),
Some(&body.filename),
);
Ok(get_content_as_filename::v1::Response::new(
content.expect("entire file contents"),
content_type.unwrap_or_default(),
content_disposition,
})
))
}
/// # `GET /_matrix/client/v1/media/preview_url`
@@ -278,58 +285,6 @@ pub(crate) async fn get_media_preview_route(
})
}
async fn fetch_thumbnail(
services: &Services,
mxc: &Mxc<'_>,
user: &UserId,
timeout_ms: Duration,
dim: &Dim,
) -> Result<FileMeta> {
let FileMeta {
content,
content_type,
content_disposition,
} = fetch_thumbnail_meta(services, mxc, user, timeout_ms, dim).await?;
let content_disposition = Some(make_content_disposition(
content_disposition.as_ref(),
content_type.as_deref(),
None,
));
Ok(FileMeta {
content,
content_type,
content_disposition,
})
}
async fn fetch_file(
services: &Services,
mxc: &Mxc<'_>,
user: &UserId,
timeout_ms: Duration,
filename: Option<&str>,
) -> Result<FileMeta> {
let FileMeta {
content,
content_type,
content_disposition,
} = fetch_file_meta(services, mxc, user, timeout_ms).await?;
let content_disposition = Some(make_content_disposition(
content_disposition.as_ref(),
content_type.as_deref(),
filename,
));
Ok(FileMeta {
content,
content_type,
content_disposition,
})
}
async fn fetch_thumbnail_meta(
services: &Services,
mxc: &Mxc<'_>,
+65 -47
View File
@@ -6,15 +6,16 @@
Err, Result, err,
utils::{content_disposition::make_content_disposition, math::ruma_from_usize},
};
use conduwuit_service::media::{CACHE_CONTROL_IMMUTABLE, CORP_CROSS_ORIGIN, Dim, FileMeta};
use conduwuit_service::media::{CORP_CROSS_ORIGIN, Dim, FileMeta};
use reqwest::Url;
use ruma::{
Mxc,
api::client::media::{
create_content, get_content, get_content_as_filename, get_content_thumbnail,
get_media_config, get_media_preview,
},
assign,
};
use service::media::mxc::Mxc;
use crate::{Ruma, RumaResponse, client::create_content_route};
@@ -25,9 +26,9 @@ pub(crate) async fn get_media_config_legacy_route(
State(services): State<crate::State>,
_body: Ruma<get_media_config::v3::Request>,
) -> Result<get_media_config::v3::Response> {
Ok(get_media_config::v3::Response {
upload_size: ruma_from_usize(services.server.config.max_request_size),
})
Ok(get_media_config::v3::Response::new(ruma_from_usize(
services.server.config.max_request_size,
)))
}
/// # `GET /_matrix/media/v1/config`
@@ -153,13 +154,16 @@ pub(crate) async fn get_content_legacy_route(
None,
);
Ok(get_content::v3::Response {
file: content.expect("entire file contents"),
content_type: content_type.map(Into::into),
content_disposition: Some(content_disposition),
cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()),
cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()),
})
Ok(assign!(
get_content::v3::Response::new(
content.expect("entire file contents"),
content_type.unwrap_or_default(),
content_disposition,
),
{
cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()),
}
))
},
| _ =>
if !services.globals.server_is_ours(&body.server_name) && body.allow_remote {
@@ -177,13 +181,16 @@ pub(crate) async fn get_content_legacy_route(
None,
);
Ok(get_content::v3::Response {
file: response.file,
content_type: response.content_type,
content_disposition: Some(content_disposition),
cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()),
cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()),
})
Ok(assign!(
get_content::v3::Response::new(
response.file,
response.content_type.unwrap_or_default(),
content_disposition,
),
{
cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()),
}
))
} else {
Err!(Request(NotFound("Media not found.")))
},
@@ -244,13 +251,15 @@ pub(crate) async fn get_content_as_filename_legacy_route(
Some(&body.filename),
);
Ok(get_content_as_filename::v3::Response {
file: content.expect("entire file contents"),
content_type: content_type.map(Into::into),
content_disposition: Some(content_disposition),
cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()),
cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()),
})
Ok(assign!(get_content_as_filename::v3::Response::new(
content.expect("entire file contents"),
content_type.unwrap_or_default(),
content_disposition,
),
{
cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()),
}
))
},
| _ =>
if !services.globals.server_is_ours(&body.server_name) && body.allow_remote {
@@ -268,13 +277,16 @@ pub(crate) async fn get_content_as_filename_legacy_route(
None,
);
Ok(get_content_as_filename::v3::Response {
content_disposition: Some(content_disposition),
content_type: response.content_type,
file: response.file,
cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()),
cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()),
})
Ok(assign!(
get_content_as_filename::v3::Response::new(
response.file,
response.content_type.unwrap_or_default(),
content_disposition,
),
{
cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()),
}
))
} else {
Err!(Request(NotFound("Media not found.")))
},
@@ -335,13 +347,16 @@ pub(crate) async fn get_content_thumbnail_legacy_route(
None,
);
Ok(get_content_thumbnail::v3::Response {
file: content.expect("entire file contents"),
content_type: content_type.map(Into::into),
cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()),
cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()),
content_disposition: Some(content_disposition),
})
Ok(assign!(
get_content_thumbnail::v3::Response::new(
content.expect("entire file contents"),
content_type.unwrap_or_default(),
content_disposition,
),
{
cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.to_owned()),
}
))
},
| _ =>
if !services.globals.server_is_ours(&body.server_name) && body.allow_remote {
@@ -359,13 +374,16 @@ pub(crate) async fn get_content_thumbnail_legacy_route(
None,
);
Ok(get_content_thumbnail::v3::Response {
file: response.file,
content_type: response.content_type,
cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()),
cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()),
content_disposition: Some(content_disposition),
})
Ok(assign!(
get_content_thumbnail::v3::Response::new(
response.file,
response.content_type.unwrap_or_default(),
content_disposition,
),
{
cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.to_owned()),
}
))
} else {
Err!(Request(NotFound("Media not found.")))
},
+10 -20
View File
@@ -1,7 +1,8 @@
use axum::extract::State;
use conduwuit::{Err, Result, matrix::pdu::PduBuilder};
use conduwuit::{Err, Result, matrix::pdu::PartialPdu};
use ruma::{
api::client::membership::ban_user,
assign,
events::room::member::{MembershipState, RoomMemberEventContent},
};
@@ -24,30 +25,19 @@ pub(crate) async fn ban_user_route(
return Err!(Request(UserSuspended("You cannot perform this action while suspended.")));
}
let state_lock = services.rooms.state.mutex.lock(&body.room_id).await;
let current_member_content = services
.rooms
.state_accessor
.get_member(&body.room_id, &body.user_id)
.await
.unwrap_or_else(|_| RoomMemberEventContent::new(MembershipState::Ban));
let state_lock = services.rooms.state.mutex.lock(body.room_id.as_str()).await;
services
.rooms
.timeline
.build_and_append_pdu(
PduBuilder::state(body.user_id.to_string(), &RoomMemberEventContent {
membership: MembershipState::Ban,
reason: body.reason.clone(),
displayname: None, // display name may be offensive
avatar_url: None, // avatar may be offensive
is_direct: None,
join_authorized_via_users_server: None,
third_party_invite: None,
redact_events: body.redact_events,
..current_member_content
}),
PartialPdu::state(
body.user_id.to_string(),
&assign!(RoomMemberEventContent::new(MembershipState::Ban), {
reason: body.reason.clone(),
redact_events: body.redact_events,
}),
),
sender_user,
Some(&body.room_id),
&state_lock,
+59 -46
View File
@@ -2,18 +2,19 @@
use axum_client_ip::ClientIp;
use conduwuit::{
Err, Result, debug_error, err, info,
matrix::{event::gen_event_id_canonical_json, pdu::PduBuilder},
matrix::{event::gen_event_id_canonical_json, pdu::PartialPdu},
warn,
};
use futures::FutureExt;
use ruma::{
RoomId, UserId,
api::{client::membership::invite_user, federation::membership::create_invite},
events::{
invite_permission_config::FilterLevel,
room::member::{MembershipState, RoomMemberEventContent},
api::{
client::membership::invite_user::{self, v3::InviteUserId},
federation::membership::create_invite,
},
events::room::member::{MembershipState, RoomMemberEventContent},
};
use ruminuwuity::invite_permission_config::FilterLevel;
use service::Services;
use super::banned_room_check;
@@ -51,7 +52,11 @@ pub(crate) async fn invite_user_route(
.await?;
match &body.recipient {
| invite_user::v3::InvitationRecipient::UserId { user_id: recipient_user } => {
| invite_user::v3::InvitationRecipient::UserId(InviteUserId {
user_id: recipient_user,
reason,
..
}) => {
let sender_filter_level = services
.users
.invite_filter_level(recipient_user, sender_user)
@@ -59,7 +64,7 @@ pub(crate) async fn invite_user_route(
if !matches!(sender_filter_level, FilterLevel::Allow) {
// drop invites if the sender has the recipient filtered
return Ok(invite_user::v3::Response {});
return Ok(invite_user::v3::Response::new());
}
if let Ok(target_user_membership) = services
@@ -95,13 +100,13 @@ pub(crate) async fn invite_user_route(
sender_user,
recipient_user,
&body.room_id,
body.reason.clone(),
reason.clone(),
false,
)
.boxed()
.await?;
Ok(invite_user::v3::Response {})
Ok(invite_user::v3::Response::new())
},
| _ => {
Err!(Request(NotFound("User not found.")))
@@ -141,25 +146,28 @@ pub(crate) async fn invite_helper(
let (pdu, pdu_json, invite_room_state) = {
let state_lock = services.rooms.state.mutex.lock(room_id).await;
let content = RoomMemberEventContent {
avatar_url: services.users.avatar_url(recipient_user).await.ok(),
is_direct: Some(is_direct),
reason,
..RoomMemberEventContent::new(MembershipState::Invite)
};
let mut content = RoomMemberEventContent::new(MembershipState::Invite);
content.displayname = services.users.displayname(recipient_user).await.ok();
content.avatar_url = services.users.avatar_url(recipient_user).await.ok();
content.is_direct = Some(is_direct);
content.reason = reason;
let (pdu, pdu_json) = services
.rooms
.timeline
.create_hash_and_sign_event(
PduBuilder::state(recipient_user.to_string(), &content),
PartialPdu::state(recipient_user.to_string(), &content),
sender_user,
Some(room_id),
&state_lock,
)
.await?;
let invite_room_state = services.rooms.state.summary_stripped(&pdu, room_id).await;
let invite_room_state = services
.rooms
.state
.summary_stripped(&pdu, room_id, recipient_user)
.await;
drop(state_lock);
@@ -168,32 +176,39 @@ pub(crate) async fn invite_helper(
let room_version_id = services.rooms.state.get_room_version(room_id).await?;
let mut request = create_invite::v2::Request::new(
room_id.to_owned(),
(*pdu.event_id).to_owned(),
room_version_id.clone(),
services
.sending
.convert_to_outgoing_federation_event(pdu_json.clone())
.await,
invite_room_state,
);
request.via = services
.rooms
.state_cache
.servers_route_via(room_id)
.await
.ok();
let response = services
.sending
.send_federation_request(recipient_user.server_name(), create_invite::v2::Request {
room_id: room_id.to_owned(),
event_id: (*pdu.event_id).to_owned(),
room_version: room_version_id.clone(),
event: services
.sending
.convert_to_outgoing_federation_event(pdu_json.clone())
.await,
invite_room_state,
via: services
.rooms
.state_cache
.servers_route_via(room_id)
.await
.ok(),
})
.send_federation_request(recipient_user.server_name(), request)
.await?;
// We do not add the event_id field to the pdu here because of signature and
// hashes checks
let (event_id, value) = gen_event_id_canonical_json(&response.event, &room_version_id)
.map_err(|e| {
err!(Request(BadJson(warn!("Could not convert event to canonical JSON: {e}"))))
})?;
let (event_id, value) = gen_event_id_canonical_json(
&response.event,
&room_version_id
.rules()
.expect("room version should have defined rules"),
)
.map_err(|e| {
err!(Request(BadJson(warn!("Could not convert event to canonical JSON: {e}"))))
})?;
if pdu.event_id != event_id {
return Err!(Request(BadJson(warn!(
@@ -229,20 +244,18 @@ pub(crate) async fn invite_helper(
let state_lock = services.rooms.state.mutex.lock(room_id).await;
let content = RoomMemberEventContent {
displayname: services.users.displayname(recipient_user).await.ok(),
avatar_url: services.users.avatar_url(recipient_user).await.ok(),
blurhash: services.users.blurhash(recipient_user).await.ok(),
is_direct: Some(is_direct),
reason,
..RoomMemberEventContent::new(MembershipState::Invite)
};
let mut content = RoomMemberEventContent::new(MembershipState::Invite);
content.displayname = services.users.displayname(recipient_user).await.ok();
content.avatar_url = services.users.avatar_url(recipient_user).await.ok();
content.blurhash = services.users.blurhash(recipient_user).await.ok();
content.is_direct = Some(is_direct);
content.reason = reason;
services
.rooms
.timeline
.build_and_append_pdu(
PduBuilder::state(recipient_user.to_string(), &content),
PartialPdu::state(recipient_user.to_string(), &content),
sender_user,
Some(room_id),
&state_lock,
+59 -71
View File
@@ -7,7 +7,7 @@
matrix::{
StateKey,
event::{gen_event_id, gen_event_id_canonical_json},
pdu::{PduBuilder, PduEvent},
pdu::{PartialPdu, PduEvent},
state_res,
},
result::FlatOk,
@@ -24,10 +24,8 @@
CanonicalJsonObject, CanonicalJsonValue, OwnedRoomId, OwnedServerName, OwnedUserId, RoomId,
RoomVersionId, UserId,
api::{
client::{
error::ErrorKind,
membership::{join_room_by_id, join_room_by_id_or_alias},
},
client::membership::{join_room_by_id, join_room_by_id_or_alias},
error::{ErrorKind, IncompatibleRoomVersionErrorData},
federation::{self},
},
canonical_json::to_canonical_value,
@@ -89,7 +87,6 @@ pub(crate) async fn join_room_by_id_route(
.rooms
.state_cache
.servers_invite_via(&body.room_id)
.map(ToOwned::to_owned)
.collect()
.await;
@@ -169,7 +166,6 @@ pub(crate) async fn join_room_by_id_or_alias_route(
.rooms
.state_cache
.servers_invite_via(&room_id)
.map(ToOwned::to_owned)
.collect::<Vec<_>>()
.await,
);
@@ -210,11 +206,7 @@ pub(crate) async fn join_room_by_id_or_alias_route(
)
.await?;
let addl_via_servers = services
.rooms
.state_cache
.servers_invite_via(&room_id)
.map(ToOwned::to_owned);
let addl_via_servers = services.rooms.state_cache.servers_invite_via(&room_id);
let addl_state_servers = services
.rooms
@@ -227,7 +219,7 @@ pub(crate) async fn join_room_by_id_or_alias_route(
.iter()
.map(|event| event.get_field("sender"))
.filter_map(FlatOk::flat_ok)
.map(|user: &UserId| user.server_name().to_owned())
.map(|user: OwnedUserId| user.server_name().to_owned())
.stream()
.chain(addl_via_servers)
.collect()
@@ -254,7 +246,7 @@ pub(crate) async fn join_room_by_id_or_alias_route(
.boxed()
.await?;
Ok(join_room_by_id_or_alias::v3::Response { room_id: join_room_response.room_id })
Ok(join_room_by_id_or_alias::v3::Response::new(join_room_response.room_id))
}
pub async fn join_room_by_id_helper(
@@ -285,7 +277,7 @@ pub async fn join_room_by_id_helper(
.await
{
debug_warn!("{sender_user} is already joined in {room_id}");
return Ok(join_room_by_id::v3::Response { room_id: room_id.into() });
return Ok(join_room_by_id::v3::Response::new(room_id.to_owned()));
}
if let Err(e) = services
@@ -385,13 +377,14 @@ async fn join_room_by_id_helper_remote(
info!("make_join finished");
let room_version_id = make_join_response.room_version.unwrap_or(RoomVersionId::V1);
let room_version = make_join_response.room_version.unwrap_or(RoomVersionId::V1);
let room_version_rules = room_version
.rules()
.expect("room version should have defined rules");
if !services.server.supported_room_version(&room_version_id) {
if !services.server.supported_room_version(&room_version) {
// How did we get here?
return Err!(BadServerResponse(
"Remote room version {room_version_id} is not supported by conduwuit"
));
return Err!(BadServerResponse("Remote room version {room_version} is not supported"));
}
let mut join_event_stub: CanonicalJsonObject =
@@ -403,7 +396,7 @@ async fn join_room_by_id_helper_remote(
let join_authorized_via_users_server = {
use RoomVersionId::*;
if !matches!(room_version_id, V1 | V2 | V3 | V4 | V5 | V6 | V7) {
if !matches!(room_version, V1 | V2 | V3 | V4 | V5 | V6 | V7) {
join_event_stub
.get("content")
.map(|s| {
@@ -425,36 +418,32 @@ async fn join_room_by_id_helper_remote(
.expect("Timestamp is valid js_int value"),
),
);
let mut join_content = RoomMemberEventContent::new(MembershipState::Join);
join_content.displayname = services.users.displayname(sender_user).await.ok();
join_content.avatar_url = services.users.avatar_url(sender_user).await.ok();
join_content.blurhash = services.users.blurhash(sender_user).await.ok();
join_content.reason = reason;
join_content
.join_authorized_via_users_server
.clone_from(&join_authorized_via_users_server);
join_event_stub.insert(
"content".to_owned(),
to_canonical_value(RoomMemberEventContent {
displayname: services.users.displayname(sender_user).await.ok(),
avatar_url: services.users.avatar_url(sender_user).await.ok(),
blurhash: services.users.blurhash(sender_user).await.ok(),
reason,
join_authorized_via_users_server: join_authorized_via_users_server.clone(),
..RoomMemberEventContent::new(MembershipState::Join)
})
.expect("event is valid, we just created it"),
to_canonical_value(join_content).expect("event is valid, we just created it"),
);
// We keep the "event_id" in the pdu only in v1 or
// v2 rooms
match room_version_id {
| RoomVersionId::V1 | RoomVersionId::V2 => {},
| _ => {
join_event_stub.remove("event_id");
},
}
// Remove event id if it exists
join_event_stub.remove("event_id");
// In order to create a compatible ref hash (EventID) the `hashes` field needs
// to be present
services
.server_keys
.hash_and_sign_event(&mut join_event_stub, &room_version_id)?;
.hash_and_sign_event(&mut join_event_stub, &room_version_rules)?;
// Generate event id
let event_id = gen_event_id(&join_event_stub, &room_version_id)?;
let event_id = gen_event_id(&join_event_stub, &room_version_rules)?;
// Add event_id back
join_event_stub
@@ -464,15 +453,14 @@ async fn join_room_by_id_helper_remote(
let mut join_event = join_event_stub;
info!("Asking {remote_server} for send_join in room {room_id}");
let send_join_request = federation::membership::create_join_event::v2::Request {
room_id: room_id.to_owned(),
event_id: event_id.clone(),
omit_members: false,
pdu: services
let send_join_request = federation::membership::create_join_event::v2::Request::new(
room_id.to_owned(),
event_id.clone(),
services
.sending
.convert_to_outgoing_federation_event(join_event.clone())
.await,
};
);
let send_join_response = match services
.sending
@@ -496,7 +484,7 @@ async fn join_room_by_id_helper_remote(
);
let (signed_event_id, signed_value) =
gen_event_id_canonical_json(signed_raw, &room_version_id).map_err(|e| {
gen_event_id_canonical_json(signed_raw, &room_version_rules).map_err(|e| {
err!(Request(BadJson(warn!(
"Could not convert event to canonical JSON: {e}"
))))
@@ -571,7 +559,7 @@ async fn join_room_by_id_helper_remote(
.then(|pdu| {
services
.server_keys
.validate_and_add_event_id_no_fetch(pdu, &room_version_id)
.validate_and_add_event_id_no_fetch(pdu, &room_version_rules)
.inspect_err(|e| {
debug_warn!("Could not validate send_join response room_state event: {e:?}");
})
@@ -619,7 +607,7 @@ async fn join_room_by_id_helper_remote(
.then(|pdu| {
services
.server_keys
.validate_and_add_event_id_no_fetch(pdu, &room_version_id)
.validate_and_add_event_id_no_fetch(pdu, &room_version_rules)
})
.ready_filter_map(Result::ok)
.ready_for_each(|(event_id, value)| {
@@ -640,7 +628,7 @@ async fn join_room_by_id_helper_remote(
};
let auth_check = state_res::event_auth::auth_check(
&state_res::RoomVersion::new(&room_version_id)?,
&room_version.rules().unwrap(),
&parsed_join_pdu,
None, // TODO: third party invite
|k, s| state_fetch(k.clone(), s.into()),
@@ -747,8 +735,7 @@ async fn join_room_by_id_helper_local(
// This is a restricted room, check if we can complete the join requirements
// locally.
let needs_auth_user =
user_can_perform_restricted_join(services, sender_user, room_id, &room_version)
.await;
user_can_perform_restricted_join(services, sender_user, room_id).await;
if needs_auth_user.is_ok_and(is_true!()) {
// If there was an error or the value is false, we'll try joining over
// federation. Since it's Ok(true), we can authorise this locally.
@@ -761,21 +748,19 @@ async fn join_room_by_id_helper_local(
}
}
let content = RoomMemberEventContent {
displayname: services.users.displayname(sender_user).await.ok(),
avatar_url: services.users.avatar_url(sender_user).await.ok(),
blurhash: services.users.blurhash(sender_user).await.ok(),
reason: reason.clone(),
join_authorized_via_users_server: auth_user,
..RoomMemberEventContent::new(MembershipState::Join)
};
let mut content = RoomMemberEventContent::new(MembershipState::Join);
content.displayname = services.users.displayname(sender_user).await.ok();
content.avatar_url = services.users.avatar_url(sender_user).await.ok();
content.blurhash = services.users.blurhash(sender_user).await.ok();
content.reason.clone_from(&reason);
content.join_authorized_via_users_server = auth_user;
// Try normal join first
let Err(error) = services
.rooms
.timeline
.build_and_append_pdu(
PduBuilder::state(sender_user.to_string(), &content),
PartialPdu::state(sender_user.to_string(), &content),
sender_user,
Some(room_id),
&state_lock,
@@ -824,16 +809,16 @@ async fn make_join_request(
"Asking {remote_server} for make_join (attempt {make_join_counter}/{})",
servers.len()
);
let mut request = federation::membership::prepare_join_event::v1::Request::new(
room_id.to_owned(),
sender_user.to_owned(),
);
request.ver = services.server.supported_room_versions().collect();
let make_join_response = services
.sending
.send_federation_request(
remote_server,
federation::membership::prepare_join_event::v1::Request {
room_id: room_id.to_owned(),
user_id: sender_user.to_owned(),
ver: services.server.supported_room_versions().collect(),
},
)
.send_federation_request(remote_server, request)
.await;
trace!("make_join response: {:?}", make_join_response);
@@ -866,14 +851,17 @@ async fn make_join_request(
rules, but is unable to authorise a join for us. Will continue trying."
);
},
| ErrorKind::IncompatibleRoomVersion { room_version } => {
| ErrorKind::IncompatibleRoomVersion(IncompatibleRoomVersionErrorData {
room_version,
..
}) => {
warn!(
"{remote_server} reports the room we are trying to join is \
v{room_version}, which we do not support."
);
return Err(e);
},
| ErrorKind::Forbidden { .. } => {
| ErrorKind::Forbidden => {
warn!("{remote_server} refuses to let us join: {e}.");
return Err(e);
},
+20 -27
View File
@@ -1,7 +1,8 @@
use axum::extract::State;
use conduwuit::{Err, Result, matrix::pdu::PduBuilder};
use conduwuit::{Err, Result, matrix::pdu::PartialPdu};
use ruma::{
api::client::membership::kick_user,
assign,
events::room::member::{MembershipState, RoomMemberEventContent},
};
@@ -18,41 +19,33 @@ pub(crate) async fn kick_user_route(
if services.users.is_suspended(sender_user).await? {
return Err!(Request(UserSuspended("You cannot perform this action while suspended.")));
}
let state_lock = services.rooms.state.mutex.lock(&body.room_id).await;
let state_lock = services.rooms.state.mutex.lock(body.room_id.as_str()).await;
let Ok(event) = services
if !services
.rooms
.state_accessor
.get_member(&body.room_id, &body.user_id)
.state_cache
.user_membership(&body.user_id, &body.room_id)
.await
else {
// copy synapse's behaviour of returning 200 without any change to the state
// instead of erroring on left users
return Ok(kick_user::v3::Response::new());
};
if !matches!(
event.membership,
MembershipState::Invite | MembershipState::Knock | MembershipState::Join,
) {
return Err!(Request(Forbidden(
"Cannot kick a user who is not apart of the room (current membership: {})",
event.membership
)));
.is_some_and(|membership| {
matches!(
membership,
MembershipState::Invite | MembershipState::Join | MembershipState::Knock
)
}) {
return Err!(Request(Forbidden("You cannot kick users who are not in the room.")));
}
services
.rooms
.timeline
.build_and_append_pdu(
PduBuilder::state(body.user_id.to_string(), &RoomMemberEventContent {
membership: MembershipState::Leave,
reason: body.reason.clone(),
is_direct: None,
join_authorized_via_users_server: None,
third_party_invite: None,
..event
}),
PartialPdu::state(
body.user_id.to_string(),
&assign!(RoomMemberEventContent::new(MembershipState::Leave), {
reason: body.reason.clone(),
redact_events: body.redact_events,
}),
),
sender_user,
Some(&body.room_id),
&state_lock,
+73 -83
View File
@@ -6,7 +6,7 @@
Err, Result, debug, debug_info, debug_warn, err, info,
matrix::{
event::gen_event_id,
pdu::{PduBuilder, PduEvent},
pdu::{PartialPdu, PduEvent},
},
result::FlatOk,
trace,
@@ -15,8 +15,8 @@
};
use futures::{FutureExt, StreamExt};
use ruma::{
CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, OwnedRoomId, OwnedServerName, RoomId,
RoomVersionId, UserId,
CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, OwnedRoomId, OwnedServerName,
OwnedUserId, RoomId, UserId,
api::{
client::knock::knock_room,
federation::{self},
@@ -73,7 +73,6 @@ pub(crate) async fn knock_room_route(
.rooms
.state_cache
.servers_invite_via(&room_id)
.map(ToOwned::to_owned)
.collect::<Vec<_>>()
.await,
);
@@ -113,11 +112,7 @@ pub(crate) async fn knock_room_route(
)
.await?;
let addl_via_servers = services
.rooms
.state_cache
.servers_invite_via(&room_id)
.map(ToOwned::to_owned);
let addl_via_servers = services.rooms.state_cache.servers_invite_via(&room_id);
let addl_state_servers = services
.rooms
@@ -130,7 +125,7 @@ pub(crate) async fn knock_room_route(
.iter()
.map(|event| event.get_field("sender"))
.filter_map(FlatOk::flat_ok)
.map(|user: &UserId| user.server_name().to_owned())
.map(|user: OwnedUserId| user.server_name().to_owned())
.stream()
.chain(addl_via_servers)
.collect()
@@ -188,7 +183,7 @@ async fn knock_room_by_id_helper(
.await
{
debug_warn!("{sender_user} is already knocked in {room_id}");
return Ok(knock_room::v3::Response { room_id: room_id.into() });
return Ok(knock_room::v3::Response::new(room_id.into()));
}
if let Ok(membership) = services
@@ -339,34 +334,27 @@ async fn knock_room_helper_local(
) -> Result {
debug_info!("We can knock locally");
let room_version_id = services.rooms.state.get_room_version(room_id).await?;
let room_version = services.rooms.state.get_room_version(room_id).await?;
let room_version_rules = room_version
.rules()
.expect("room version should have defined rules");
if matches!(
room_version_id,
RoomVersionId::V1
| RoomVersionId::V2
| RoomVersionId::V3
| RoomVersionId::V4
| RoomVersionId::V5
| RoomVersionId::V6
) {
if !room_version_rules.authorization.knocking {
return Err!(Request(Forbidden("This room does not support knocking.")));
}
let content = RoomMemberEventContent {
displayname: services.users.displayname(sender_user).await.ok(),
avatar_url: services.users.avatar_url(sender_user).await.ok(),
blurhash: services.users.blurhash(sender_user).await.ok(),
reason: reason.clone(),
..RoomMemberEventContent::new(MembershipState::Knock)
};
let mut content = RoomMemberEventContent::new(MembershipState::Knock);
content.displayname = services.users.displayname(sender_user).await.ok();
content.avatar_url = services.users.avatar_url(sender_user).await.ok();
content.blurhash = services.users.blurhash(sender_user).await.ok();
content.reason.clone_from(&reason.clone());
// Try normal knock first
let Err(error) = services
.rooms
.timeline
.build_and_append_pdu(
PduBuilder::state(sender_user.to_string(), &content),
PartialPdu::state(sender_user.to_string(), &content),
sender_user,
Some(room_id),
&state_lock,
@@ -381,19 +369,18 @@ async fn knock_room_helper_local(
return Err(error);
}
warn!("We couldn't do the knock locally, maybe federation can help to satisfy the knock");
let (make_knock_response, remote_server) =
make_knock_request(services, sender_user, room_id, servers).await?;
info!("make_knock finished");
let room_version_id = make_knock_response.room_version;
let room_version = make_knock_response.room_version;
let room_version_rules = room_version
.rules()
.expect("room version should have defined rules");
if !services.server.supported_room_version(&room_version_id) {
return Err!(BadServerResponse(
"Remote room version {room_version_id} is not supported by conduwuit"
));
if !services.server.supported_room_version(&room_version) {
return Err!(BadServerResponse("Remote room version {room_version} is not supported"));
}
let mut knock_event_stub = serde_json::from_str::<CanonicalJsonObject>(
@@ -424,24 +411,17 @@ async fn knock_room_helper_local(
);
knock_event_stub.insert(
"content".to_owned(),
to_canonical_value(RoomMemberEventContent {
displayname: services.users.displayname(sender_user).await.ok(),
avatar_url: services.users.avatar_url(sender_user).await.ok(),
blurhash: services.users.blurhash(sender_user).await.ok(),
reason,
..RoomMemberEventContent::new(MembershipState::Knock)
})
.expect("event is valid, we just created it"),
to_canonical_value(content).expect("event is valid, we just created it"),
);
// In order to create a compatible ref hash (EventID) the `hashes` field needs
// to be present
services
.server_keys
.hash_and_sign_event(&mut knock_event_stub, &room_version_id)?;
.hash_and_sign_event(&mut knock_event_stub, &room_version_rules)?;
// Generate event id
let event_id = gen_event_id(&knock_event_stub, &room_version_id)?;
let event_id = gen_event_id(&knock_event_stub, &room_version_rules)?;
// Add event_id
knock_event_stub
@@ -451,14 +431,14 @@ async fn knock_room_helper_local(
let knock_event = knock_event_stub;
info!("Asking {remote_server} for send_knock in room {room_id}");
let send_knock_request = federation::knock::send_knock::v1::Request {
room_id: room_id.to_owned(),
event_id: event_id.clone(),
pdu: services
let send_knock_request = federation::membership::create_knock_event::v1::Request::new(
room_id.to_owned(),
event_id.clone(),
services
.sending
.convert_to_outgoing_federation_event(knock_event.clone())
.await,
};
);
services
.sending
@@ -520,12 +500,13 @@ async fn knock_room_helper_remote(
info!("make_knock finished");
let room_version_id = make_knock_response.room_version;
let room_version = make_knock_response.room_version;
let room_version_rules = room_version
.rules()
.expect("room version should have defined rules");
if !services.server.supported_room_version(&room_version_id) {
return Err!(BadServerResponse(
"Remote room version {room_version_id} is not supported by conduwuit"
));
if !services.server.supported_room_version(&room_version) {
return Err!(BadServerResponse("Remote room version {room_version} is not supported"));
}
let mut knock_event_stub: CanonicalJsonObject =
@@ -545,26 +526,26 @@ async fn knock_room_helper_remote(
.expect("Timestamp is valid js_int value"),
),
);
let mut knock_content = RoomMemberEventContent::new(MembershipState::Knock);
knock_content.displayname = services.users.displayname(sender_user).await.ok();
knock_content.avatar_url = services.users.avatar_url(sender_user).await.ok();
knock_content.blurhash = services.users.blurhash(sender_user).await.ok();
knock_content.reason = reason;
knock_event_stub.insert(
"content".to_owned(),
to_canonical_value(RoomMemberEventContent {
displayname: services.users.displayname(sender_user).await.ok(),
avatar_url: services.users.avatar_url(sender_user).await.ok(),
blurhash: services.users.blurhash(sender_user).await.ok(),
reason,
..RoomMemberEventContent::new(MembershipState::Knock)
})
.expect("event is valid, we just created it"),
to_canonical_value(knock_content).expect("event is valid, we just created it"),
);
// In order to create a compatible ref hash (EventID) the `hashes` field needs
// to be present
services
.server_keys
.hash_and_sign_event(&mut knock_event_stub, &room_version_id)?;
.hash_and_sign_event(&mut knock_event_stub, &room_version_rules)?;
// Generate event id
let event_id = gen_event_id(&knock_event_stub, &room_version_id)?;
let event_id = gen_event_id(&knock_event_stub, &room_version_rules)?;
// Add event_id
knock_event_stub
@@ -574,18 +555,18 @@ async fn knock_room_helper_remote(
let knock_event = knock_event_stub;
info!("Asking {remote_server} for send_knock in room {room_id}");
let send_knock_request = federation::knock::send_knock::v1::Request {
room_id: room_id.to_owned(),
event_id: event_id.clone(),
pdu: services
let request = federation::membership::create_knock_event::v1::Request::new(
room_id.to_owned(),
event_id.clone(),
services
.sending
.convert_to_outgoing_federation_event(knock_event.clone())
.await,
};
);
let send_knock_response = services
.sending
.send_federation_request(&remote_server, send_knock_request)
.send_federation_request(&remote_server, request)
.await?;
info!("send_knock finished");
@@ -604,7 +585,17 @@ async fn knock_room_helper_remote(
let state = send_knock_response
.knock_room_state
.iter()
.map(|event| serde_json::from_str::<CanonicalJsonObject>(event.clone().into_json().get()))
.map(|event| {
#[allow(deprecated)]
let raw_value = match event {
| federation::membership::RawStrippedState::Stripped(raw_state) =>
&raw_state.clone().into_json(),
| federation::membership::RawStrippedState::Pdu(raw_value) => raw_value,
| _ => panic!("unknown raw stripped state type"),
};
serde_json::from_str::<CanonicalJsonObject>(raw_value.get())
})
.filter_map(Result::ok);
let mut state_map: HashMap<u64, OwnedEventId> = HashMap::new();
@@ -629,7 +620,7 @@ async fn knock_room_helper_remote(
continue;
};
let event_id = gen_event_id(&event, &room_version_id)?;
let event_id = gen_event_id(&event, &room_version_rules)?;
let shortstatekey = services
.rooms
.short
@@ -709,7 +700,7 @@ async fn make_knock_request(
sender_user: &UserId,
room_id: &RoomId,
servers: &[OwnedServerName],
) -> Result<(federation::knock::create_knock_event_template::v1::Response, OwnedServerName)> {
) -> Result<(federation::membership::prepare_knock_event::v1::Response, OwnedServerName)> {
let mut make_knock_response_and_server =
Err!(BadServerResponse("No server available to assist in knocking."));
@@ -722,16 +713,15 @@ async fn make_knock_request(
info!("Asking {remote_server} for make_knock ({make_knock_counter})");
let mut request = federation::membership::prepare_knock_event::v1::Request::new(
room_id.to_owned(),
sender_user.to_owned(),
);
request.ver = services.server.supported_room_versions().collect();
let make_knock_response = services
.sending
.send_federation_request(
remote_server,
federation::knock::create_knock_event_template::v1::Request {
room_id: room_id.to_owned(),
user_id: sender_user.to_owned(),
ver: services.server.supported_room_versions().collect(),
},
)
.send_federation_request(remote_server, request)
.await;
trace!("make_knock response: {make_knock_response:?}");
+27 -34
View File
@@ -3,13 +3,13 @@
use axum::extract::State;
use conduwuit::{
Err, Pdu, Result, debug_info, debug_warn, err,
matrix::{event::gen_event_id, pdu::PduBuilder},
matrix::{event::gen_event_id, pdu::PartialPdu},
utils::{self, FutureBoolExt, future::ReadyEqExt},
warn,
};
use futures::{FutureExt, StreamExt, pin_mut};
use ruma::{
CanonicalJsonObject, CanonicalJsonValue, OwnedServerName, RoomId, RoomVersionId, UserId,
CanonicalJsonObject, CanonicalJsonValue, OwnedServerName, RoomId, UserId,
api::{
client::membership::leave_room,
federation::{self},
@@ -42,11 +42,7 @@ pub(crate) async fn leave_room_route(
// Make a user leave all their joined rooms, rescinds knocks, forgets all rooms,
// and ignores errors
pub async fn leave_all_rooms(services: &Services, user_id: &UserId) {
let rooms_joined = services
.rooms
.state_cache
.rooms_joined(user_id)
.map(ToOwned::to_owned);
let rooms_joined = services.rooms.state_cache.rooms_joined(user_id);
let rooms_invited = services
.rooms
@@ -142,18 +138,17 @@ pub async fn leave_room(
.await;
match user_member_event_content {
| Ok(content) => {
| Ok(mut content) => {
content.membership = MembershipState::Leave;
content.reason = reason;
content.join_authorized_via_users_server = None;
content.is_direct = None;
services
.rooms
.timeline
.build_and_append_pdu(
PduBuilder::state(user_id.to_string(), &RoomMemberEventContent {
membership: MembershipState::Leave,
reason,
join_authorized_via_users_server: None,
is_direct: None,
..content
}),
PartialPdu::state(user_id.to_string(), &content),
user_id,
Some(room_id),
&state_lock,
@@ -226,7 +221,6 @@ pub async fn remote_leave_room<S: ::std::hash::BuildHasher>(
.rooms
.state_cache
.servers_invite_via(room_id)
.map(ToOwned::to_owned)
.collect::<HashSet<OwnedServerName>>()
.await,
);
@@ -260,7 +254,7 @@ pub async fn remote_leave_room<S: ::std::hash::BuildHasher>(
.filter_map(|event| event.get_field("sender").ok().flatten())
.filter_map(|sender: &str| UserId::parse(sender).ok())
.filter_map(|sender| {
if !services.globals.user_is_local(sender) {
if !services.globals.user_is_local(&sender) {
Some(sender.server_name().to_owned())
} else {
None
@@ -289,10 +283,10 @@ pub async fn remote_leave_room<S: ::std::hash::BuildHasher>(
.sending
.send_federation_request(
remote_server.as_ref(),
federation::membership::prepare_leave_event::v1::Request {
room_id: room_id.to_owned(),
user_id: user_id.to_owned(),
},
federation::membership::prepare_leave_event::v1::Request::new(
room_id.to_owned(),
user_id.to_owned(),
),
)
.await;
@@ -329,6 +323,10 @@ pub async fn remote_leave_room<S: ::std::hash::BuildHasher>(
)));
}
let room_version_rules = room_version_id
.rules()
.expect("room version should have defined rules");
let mut leave_event_stub = serde_json::from_str::<CanonicalJsonObject>(
make_leave_response.event.get(),
)
@@ -366,21 +364,16 @@ pub async fn remote_leave_room<S: ::std::hash::BuildHasher>(
}
// room v3 and above removed the "event_id" field from remote PDU format
match room_version_id {
| RoomVersionId::V1 | RoomVersionId::V2 => {},
| _ => {
leave_event_stub.remove("event_id");
},
}
leave_event_stub.remove("event_id");
// In order to create a compatible ref hash (EventID) the `hashes` field needs
// to be present
services
.server_keys
.hash_and_sign_event(&mut leave_event_stub, &room_version_id)?;
.hash_and_sign_event(&mut leave_event_stub, &room_version_rules)?;
// Generate event id
let event_id = gen_event_id(&leave_event_stub, &room_version_id)?;
let event_id = gen_event_id(&leave_event_stub, &room_version_rules)?;
// Add event_id back
leave_event_stub
@@ -393,14 +386,14 @@ pub async fn remote_leave_room<S: ::std::hash::BuildHasher>(
.sending
.send_federation_request(
&remote_server,
federation::membership::create_leave_event::v2::Request {
room_id: room_id.to_owned(),
event_id: event_id.clone(),
pdu: services
federation::membership::create_leave_event::v2::Request::new(
room_id.to_owned(),
event_id.clone(),
services
.sending
.convert_to_outgoing_federation_event(leave_event.clone())
.await,
},
),
)
.await?;
+48 -72
View File
@@ -9,7 +9,7 @@
use futures::{FutureExt, StreamExt, future::join};
use ruma::{
api::client::membership::{
get_member_events::{self, v3::MembershipEventFilter},
get_member_events::{self},
joined_members::{self, v3::RoomMember},
},
events::{
@@ -43,20 +43,20 @@ pub(crate) async fn get_member_events_route(
return Err!(Request(Forbidden("You don't have permission to view this room.")));
}
Ok(get_member_events::v3::Response {
chunk: services
.rooms
.state_accessor
.room_state_full(&body.room_id)
.ready_filter_map(Result::ok)
.ready_filter(|((ty, _), _)| *ty == StateEventType::RoomMember)
.map(at!(1))
.ready_filter_map(|pdu| membership_filter(pdu, membership, not_membership))
.map(Event::into_format)
.collect()
.boxed()
.await,
})
let chunk = services
.rooms
.state_accessor
.room_state_full(&body.room_id)
.ready_filter_map(Result::ok)
.ready_filter(|((ty, _), _)| *ty == StateEventType::RoomMember)
.map(at!(1))
.ready_filter_map(|pdu| membership_filter(pdu, membership, not_membership))
.map(Event::into_format)
.collect()
.boxed()
.await;
Ok(get_member_events::v3::Response::new(chunk))
}
/// # `POST /_matrix/client/r0/rooms/{roomId}/joined_members`
@@ -78,70 +78,46 @@ pub(crate) async fn joined_members_route(
return Err!(Request(Forbidden("You don't have permission to view this room.")));
}
Ok(joined_members::v3::Response {
joined: services
.rooms
.state_cache
.room_members(&body.room_id)
.map(ToOwned::to_owned)
.broad_then(|user_id| async move {
let (display_name, avatar_url) = join(
services.users.displayname(&user_id).ok(),
services.users.avatar_url(&user_id).ok(),
)
.await;
let joined = services
.rooms
.state_cache
.room_members(&body.room_id)
.broad_then(|user_id| async move {
let mut member = RoomMember::new();
let (display_name, avatar_url) = join(
services.users.displayname(&user_id).ok(),
services.users.avatar_url(&user_id).ok(),
)
.await;
member.display_name = display_name;
member.avatar_url = avatar_url;
(user_id, RoomMember { display_name, avatar_url })
})
.collect()
.await,
})
(user_id, member)
})
.collect()
.await;
Ok(joined_members::v3::Response::new(joined))
}
fn membership_filter<Pdu: Event>(
pdu: Pdu,
for_membership: Option<&MembershipEventFilter>,
not_membership: Option<&MembershipEventFilter>,
membership_state_filter: Option<&MembershipState>,
not_membership_state_filter: Option<&MembershipState>,
) -> Option<impl Event> {
let membership_state_filter = match for_membership {
| Some(MembershipEventFilter::Ban) => MembershipState::Ban,
| Some(MembershipEventFilter::Invite) => MembershipState::Invite,
| Some(MembershipEventFilter::Knock) => MembershipState::Knock,
| Some(MembershipEventFilter::Leave) => MembershipState::Leave,
| Some(_) | None => MembershipState::Join,
};
let not_membership_state_filter = match not_membership {
| Some(MembershipEventFilter::Ban) => MembershipState::Ban,
| Some(MembershipEventFilter::Invite) => MembershipState::Invite,
| Some(MembershipEventFilter::Join) => MembershipState::Join,
| Some(MembershipEventFilter::Knock) => MembershipState::Knock,
| Some(_) | None => MembershipState::Leave,
};
let evt_membership = pdu.get_content::<RoomMemberEventContent>().ok()?.membership;
if for_membership.is_some() && not_membership.is_some() {
if membership_state_filter != evt_membership
|| not_membership_state_filter == evt_membership
{
None
} else {
Some(pdu)
}
} else if for_membership.is_some() && not_membership.is_none() {
if membership_state_filter != evt_membership {
None
} else {
Some(pdu)
}
} else if not_membership.is_some() && for_membership.is_none() {
if not_membership_state_filter == evt_membership {
None
} else {
Some(pdu)
}
} else {
Some(pdu)
if let Some(membership_state_filter) = membership_state_filter
&& *membership_state_filter != evt_membership
{
return None;
}
if let Some(not_membership_state_filter) = not_membership_state_filter
&& *not_membership_state_filter == evt_membership
{
return None;
}
Some(pdu)
}
+8 -9
View File
@@ -47,15 +47,14 @@ pub(crate) async fn joined_rooms_route(
State(services): State<crate::State>,
body: Ruma<joined_rooms::v3::Request>,
) -> Result<joined_rooms::v3::Response> {
Ok(joined_rooms::v3::Response {
joined_rooms: services
.rooms
.state_cache
.rooms_joined(body.sender_user())
.map(ToOwned::to_owned)
.collect()
.await,
})
let joined_rooms = services
.rooms
.state_cache
.rooms_joined(body.sender_user())
.collect()
.await;
Ok(joined_rooms::v3::Response::new(joined_rooms))
}
/// Checks if the room is banned in any way possible and the sender user is not
+10 -11
View File
@@ -1,5 +1,5 @@
use axum::extract::State;
use conduwuit::{Err, Result, matrix::pdu::PduBuilder};
use conduwuit::{Err, Result, matrix::pdu::PartialPdu};
use ruma::{
api::client::membership::unban_user,
events::room::member::{MembershipState, RoomMemberEventContent},
@@ -18,9 +18,9 @@ pub(crate) async fn unban_user_route(
if services.users.is_suspended(sender_user).await? {
return Err!(Request(UserSuspended("You cannot perform this action while suspended.")));
}
let state_lock = services.rooms.state.mutex.lock(&body.room_id).await;
let state_lock = services.rooms.state.mutex.lock(body.room_id.as_str()).await;
let current_member_content = services
let mut current_member_content = services
.rooms
.state_accessor
.get_member(&body.room_id, &body.user_id)
@@ -34,18 +34,17 @@ pub(crate) async fn unban_user_route(
)));
}
current_member_content.membership = MembershipState::Leave;
current_member_content.reason.clone_from(&body.reason);
current_member_content.join_authorized_via_users_server = None;
current_member_content.third_party_invite = None;
current_member_content.is_direct = None;
services
.rooms
.timeline
.build_and_append_pdu(
PduBuilder::state(body.user_id.to_string(), &RoomMemberEventContent {
membership: MembershipState::Leave,
reason: body.reason.clone(),
join_authorized_via_users_server: None,
third_party_invite: None,
is_direct: None,
..current_member_content
}),
PartialPdu::state(body.user_id.to_string(), &current_member_content),
sender_user,
Some(&body.room_id),
&state_lock,
+13 -10
View File
@@ -26,15 +26,17 @@
DeviceId, RoomId, UserId,
api::{
Direction,
client::{error::ErrorKind, filter::RoomEventFilter, message::get_message_events},
client::{filter::RoomEventFilter, message::get_message_events},
error::{ErrorKind, SenderIgnoredErrorData},
},
assign,
events::{
AnyStateEvent, StateEventType,
TimelineEventType::{self, *},
invite_permission_config::FilterLevel,
},
serde::Raw,
};
use ruminuwuity::invite_permission_config::FilterLevel;
use tracing::warn;
use crate::Ruma;
@@ -74,7 +76,6 @@ pub(crate) async fn get_message_events_route(
ClientIp(client_ip): ClientIp,
body: Ruma<get_message_events::v3::Request>,
) -> Result<get_message_events::v3::Response> {
debug_assert!(IGNORED_MESSAGE_TYPES.is_sorted(), "IGNORED_MESSAGE_TYPES is not sorted");
let sender_user = body.sender_user();
let sender_device = body.sender_device.as_deref();
let room_id = &body.room_id;
@@ -199,12 +200,12 @@ pub(crate) async fn get_message_events_route(
.map(Event::into_format)
.collect();
Ok(get_message_events::v3::Response {
Ok(assign!(get_message_events::v3::Response::new(), {
start: from.to_string(),
end: next_token.as_ref().map(PduCount::to_string),
chunk,
state,
})
chunk: chunk,
state: state,
}))
}
pub(crate) async fn lazy_loading_witness<'a, I>(
@@ -301,7 +302,7 @@ pub(crate) async fn is_ignored_pdu<Pdu>(
{
// exclude Synapse's dummy events from bloating up response bodies. clients
// don't need to see this.
if event.kind().to_cow_str() == "org.matrix.dummy_event" {
if event.kind().to_string() == "org.matrix.dummy_event" {
return Ok(true);
}
@@ -323,7 +324,7 @@ pub(crate) async fn is_ignored_pdu<Pdu>(
if server_ignored {
// the sender's server is ignored, so ignore this event
return Err(Error::BadRequest(
ErrorKind::SenderIgnored { sender: None },
ErrorKind::SenderIgnored(SenderIgnoredErrorData::new()),
"The sender's server is ignored by this server.",
));
}
@@ -332,7 +333,9 @@ pub(crate) async fn is_ignored_pdu<Pdu>(
// the recipient of this PDU has the sender ignored, and we're not
// configured to send ignored messages to clients
return Err(Error::BadRequest(
ErrorKind::SenderIgnored { sender: Some(event.sender().to_owned()) },
ErrorKind::SenderIgnored(SenderIgnoredErrorData::with_sender(
event.sender().to_owned(),
)),
"You have ignored this sender.",
));
}
+2 -3
View File
@@ -15,6 +15,7 @@
pub(super) mod media_legacy;
pub(super) mod membership;
pub(super) mod message;
pub(super) mod mutual_rooms;
pub(super) mod openid;
pub(super) mod presence;
pub(super) mod profile;
@@ -35,7 +36,6 @@
pub(super) mod threads;
pub(super) mod to_device;
pub(super) mod typing;
pub(super) mod unstable;
pub(super) mod unversioned;
pub(super) mod user_directory;
pub(super) mod voip;
@@ -60,10 +60,10 @@
pub(super) use membership::*;
pub use membership::{join_room_by_id_helper, leave_all_rooms, leave_room, remote_leave_room};
pub(super) use message::*;
pub(super) use mutual_rooms::*;
pub(super) use openid::*;
pub(super) use presence::*;
pub(super) use profile::*;
pub use profile::{update_all_rooms, update_avatar_url, update_displayname};
pub use push::recreate_push_rules_and_return;
pub(super) use push::*;
pub(super) use read_marker::*;
@@ -82,7 +82,6 @@
pub(super) use threads::*;
pub(super) use to_device::*;
pub(super) use typing::*;
pub(super) use unstable::*;
pub(super) use unversioned::*;
pub(super) use user_directory::*;
pub(super) use voip::*;
+36
View File
@@ -0,0 +1,36 @@
use axum::extract::State;
use conduwuit::{Err, Result};
use futures::StreamExt;
use ruma::api::client::membership::mutual_rooms;
use crate::Ruma;
/// # `GET /_matrix/client/unstable/uk.half-shot.msc2666/user/mutual_rooms`
///
/// Gets all the rooms the sender shares with the specified user.
///
/// An implementation of [MSC2666](https://github.com/matrix-org/matrix-spec-proposals/pull/2666)
#[tracing::instrument(skip_all, name = "mutual_rooms", level = "info")]
pub(crate) async fn get_mutual_rooms_route(
State(services): State<crate::State>,
body: Ruma<mutual_rooms::unstable::Request>,
) -> Result<mutual_rooms::unstable::Response> {
let sender_user = body.sender_user();
if sender_user == body.user_id {
return Err!(Request(Unknown("You cannot request rooms in common with yourself.")));
}
if !services.users.exists(&body.user_id).await {
return Ok(mutual_rooms::unstable::Response::new(vec![]));
}
let mutual_rooms = services
.rooms
.state_cache
.get_shared_rooms(sender_user, &body.user_id)
.collect()
.await;
Ok(mutual_rooms::unstable::Response::new(mutual_rooms))
}
+5 -5
View File
@@ -29,10 +29,10 @@ pub(crate) async fn create_openid_token_route(
.users
.create_openid_token(&body.user_id, &access_token)?;
Ok(account::request_openid_token::v3::Response {
Ok(account::request_openid_token::v3::Response::new(
access_token,
token_type: TokenType::Bearer,
matrix_server_name: services.server.name.clone(),
expires_in: Duration::from_secs(expires_in),
})
TokenType::Bearer,
services.server.name.clone(),
Duration::from_secs(expires_in),
))
}
+7 -6
View File
@@ -2,7 +2,10 @@
use axum::extract::State;
use conduwuit::{Err, Result};
use ruma::api::client::presence::{get_presence, set_presence};
use ruma::{
api::client::presence::{get_presence, set_presence},
assign,
};
use crate::Ruma;
@@ -26,7 +29,7 @@ pub(crate) async fn set_presence_route(
.set_presence(body.sender_user(), &body.presence, None, None, body.status_msg.clone())
.await?;
Ok(set_presence::v3::Response {})
Ok(set_presence::v3::Response::new())
}
/// # `GET /_matrix/client/r0/presence/{userId}/status`
@@ -76,13 +79,11 @@ pub(crate) async fn get_presence_route(
.map(|millis| Duration::from_millis(millis.into())),
};
Ok(get_presence::v3::Response {
// TODO: Should ruma just use the presenceeventcontent type here?
Ok(assign!(get_presence::v3::Response::new(presence.content.presence), {
status_msg,
currently_active: presence.content.currently_active,
last_active_ago,
presence: presence.content.presence,
})
}))
},
| _ => Err!(Request(NotFound("Presence state for this user was not found"))),
}
+335 -379
View File
@@ -1,229 +1,26 @@
use std::collections::BTreeMap;
use axum::extract::State;
use conduwuit::{
Err, Result,
matrix::pdu::PduBuilder,
utils::{IterStream, future::TryExtExt, stream::TryIgnore},
warn,
};
use conduwuit::{Err, Result, matrix::pdu::PartialPdu, utils::to_canonical_object};
use conduwuit_service::Services;
use futures::{
FutureExt, StreamExt, TryStreamExt,
future::{join, join3, join4},
};
use futures::StreamExt;
use ruma::{
OwnedMxcUri, OwnedRoomId, UserId,
UserId,
api::{
client::profile::{
get_avatar_url, get_display_name, get_profile, set_avatar_url, set_display_name,
delete_profile_field, get_profile, get_profile_field, set_profile_field,
},
federation,
},
assign,
events::room::member::{MembershipState, RoomMemberEventContent},
presence::PresenceState,
profile::{ProfileFieldName, ProfileFieldValue},
};
use serde_json::{Value, to_value};
use crate::Ruma;
/// # `PUT /_matrix/client/r0/profile/{userId}/displayname`
///
/// Updates the displayname.
///
/// - Also makes sure other users receive the update using presence EDUs
pub(crate) async fn set_displayname_route(
State(services): State<crate::State>,
body: Ruma<set_display_name::v3::Request>,
) -> Result<set_display_name::v3::Response> {
let sender_user = body.sender_user();
if services.users.is_suspended(sender_user).await? {
return Err!(Request(UserSuspended("You cannot perform this action while suspended.")));
}
if *sender_user != body.user_id && body.appservice_info.is_none() {
return Err!(Request(Forbidden("You cannot update the profile of another user")));
}
let all_joined_rooms: Vec<OwnedRoomId> = services
.rooms
.state_cache
.rooms_joined(&body.user_id)
.map(ToOwned::to_owned)
.collect()
.await;
update_displayname(&services, &body.user_id, body.displayname.clone(), &all_joined_rooms)
.boxed()
.await;
if services.config.allow_local_presence {
// Presence update
services
.presence
.ping_presence(&body.user_id, &PresenceState::Online)
.await?;
}
Ok(set_display_name::v3::Response {})
}
/// # `GET /_matrix/client/v3/profile/{userId}/displayname`
///
/// Returns the displayname of the user.
///
/// - If user is on another server and we do not have a local copy already fetch
/// displayname over federation
pub(crate) async fn get_displayname_route(
State(services): State<crate::State>,
body: Ruma<get_display_name::v3::Request>,
) -> Result<get_display_name::v3::Response> {
if !services.globals.user_is_local(&body.user_id) {
// Create and update our local copy of the user
if let Ok(response) = services
.sending
.send_federation_request(
body.user_id.server_name(),
federation::query::get_profile_information::v1::Request {
user_id: body.user_id.clone(),
field: None, // we want the full user's profile to update locally too
},
)
.await
{
if !services.users.exists(&body.user_id).await {
services.users.create(&body.user_id, None, None).await?;
}
services
.users
.set_displayname(&body.user_id, response.displayname.clone());
services
.users
.set_avatar_url(&body.user_id, response.avatar_url.clone());
services
.users
.set_blurhash(&body.user_id, response.blurhash.clone());
return Ok(get_display_name::v3::Response { displayname: response.displayname });
}
}
if !services.users.exists(&body.user_id).await {
// Return 404 if this user doesn't exist and we couldn't fetch it over
// federation
return Err!(Request(NotFound("Profile was not found.")));
}
Ok(get_display_name::v3::Response {
displayname: services.users.displayname(&body.user_id).await.ok(),
})
}
/// # `PUT /_matrix/client/v3/profile/{userId}/avatar_url`
///
/// Updates the `avatar_url` and `blurhash`.
///
/// - Also makes sure other users receive the update using presence EDUs
pub(crate) async fn set_avatar_url_route(
State(services): State<crate::State>,
body: Ruma<set_avatar_url::v3::Request>,
) -> Result<set_avatar_url::v3::Response> {
let sender_user = body.sender_user();
if services.users.is_suspended(sender_user).await? {
return Err!(Request(UserSuspended("You cannot perform this action while suspended.")));
}
if *sender_user != body.user_id && body.appservice_info.is_none() {
return Err!(Request(Forbidden("You cannot update the profile of another user")));
}
let all_joined_rooms: Vec<OwnedRoomId> = services
.rooms
.state_cache
.rooms_joined(&body.user_id)
.map(ToOwned::to_owned)
.collect()
.await;
update_avatar_url(
&services,
&body.user_id,
body.avatar_url.clone(),
body.blurhash.clone(),
&all_joined_rooms,
)
.boxed()
.await;
if services.config.allow_local_presence {
// Presence update
services
.presence
.ping_presence(&body.user_id, &PresenceState::Online)
.await
.ok();
}
Ok(set_avatar_url::v3::Response {})
}
/// # `GET /_matrix/client/v3/profile/{userId}/avatar_url`
///
/// Returns the `avatar_url` and `blurhash` of the user.
///
/// - If user is on another server and we do not have a local copy already fetch
/// `avatar_url` and blurhash over federation
pub(crate) async fn get_avatar_url_route(
State(services): State<crate::State>,
body: Ruma<get_avatar_url::v3::Request>,
) -> Result<get_avatar_url::v3::Response> {
if !services.globals.user_is_local(&body.user_id) {
// Create and update our local copy of the user
if let Ok(response) = services
.sending
.send_federation_request(
body.user_id.server_name(),
federation::query::get_profile_information::v1::Request {
user_id: body.user_id.clone(),
field: None, // we want the full user's profile to update locally as well
},
)
.await
{
if !services.users.exists(&body.user_id).await {
services.users.create(&body.user_id, None, None).await?;
}
services
.users
.set_displayname(&body.user_id, response.displayname.clone());
services
.users
.set_avatar_url(&body.user_id, response.avatar_url.clone());
services
.users
.set_blurhash(&body.user_id, response.blurhash.clone());
return Ok(get_avatar_url::v3::Response {
avatar_url: response.avatar_url,
blurhash: response.blurhash,
});
}
}
if !services.users.exists(&body.user_id).await {
// Return 404 if this user doesn't exist and we couldn't fetch it over
// federation
return Err!(Request(NotFound("Profile was not found.")));
}
let (avatar_url, blurhash) = join(
services.users.avatar_url(&body.user_id).ok(),
services.users.blurhash(&body.user_id).ok(),
)
.await;
Ok(get_avatar_url::v3::Response { avatar_url, blurhash })
}
/// # `GET /_matrix/client/v3/profile/{userId}`
///
/// Returns the displayname, avatar_url, blurhash, and custom profile fields of
@@ -235,188 +32,347 @@ pub(crate) async fn get_profile_route(
State(services): State<crate::State>,
body: Ruma<get_profile::v3::Request>,
) -> Result<get_profile::v3::Response> {
let Some(profile) = fetch_full_profile(&services, &body.user_id).await else {
return Err!(Request(NotFound("This user's profile could not be fetched.")));
};
Ok(get_profile::v3::Response::from_iter(profile))
}
pub(crate) async fn get_profile_field_route(
State(services): State<crate::State>,
body: Ruma<get_profile_field::v3::Request>,
) -> Result<get_profile_field::v3::Response> {
let value = fetch_profile_field(&services, &body.user_id, body.field.clone()).await?;
Ok(assign!(get_profile_field::v3::Response::default(), { value }))
}
pub(crate) async fn set_profile_field_route(
State(services): State<crate::State>,
body: Ruma<set_profile_field::v3::Request>,
) -> Result<set_profile_field::v3::Response> {
if body.user_id != body.sender_user()
&& !(body.appservice_info.is_some()
|| services.admin.user_is_admin(body.sender_user()).await)
{
return Err!(Request(Forbidden("You may not change other users' profile data.")));
}
if !services.globals.user_is_local(&body.user_id) {
// Create and update our local copy of the user
if let Ok(response) = services
.sending
.send_federation_request(
body.user_id.server_name(),
federation::query::get_profile_information::v1::Request {
user_id: body.user_id.clone(),
field: None,
},
)
return Err!(Request(InvalidParam("You may not change a remote user's profile data.")));
}
set_profile_field(&services, &body.user_id, ProfileFieldChange::Set(body.value.clone()))
.await?;
Ok(set_profile_field::v3::Response::new())
}
pub(crate) async fn delete_profile_field_route(
State(services): State<crate::State>,
body: Ruma<delete_profile_field::v3::Request>,
) -> Result<delete_profile_field::v3::Response> {
if body.user_id != body.sender_user()
&& !(body.appservice_info.is_some()
|| services.admin.user_is_admin(body.sender_user()).await)
{
return Err!(Request(Forbidden("You may not change other users' profile data.")));
}
if !services.globals.user_is_local(&body.user_id) {
return Err!(Request(InvalidParam("You may not change a remote user's profile data.")));
}
set_profile_field(&services, &body.user_id, ProfileFieldChange::Delete(body.field.clone()))
.await?;
Ok(delete_profile_field::v3::Response::new())
}
async fn fetch_full_profile(
services: &Services,
user_id: &UserId,
) -> Option<BTreeMap<String, Value>> {
// If the user exists locally, fetch their local profile
if services.users.exists(user_id).await {
return Some(get_local_profile(services, user_id).await);
}
// Otherwise ask their homeserver
let Ok(response) = services
.sending
.send_federation_request(
user_id.server_name(),
federation::query::get_profile_information::v1::Request::new(user_id.to_owned()),
)
.await
else {
return None;
};
// Update our local copies of their profile fields
services.users.clear_profile(user_id).await;
for (field, value) in response.iter() {
let Ok(value) = ProfileFieldValue::new(field, value.to_owned()) else {
// Skip malformed fields
continue;
};
let _ = set_profile_field(services, user_id, ProfileFieldChange::Set(value)).await;
}
Some(BTreeMap::from_iter(response))
}
async fn fetch_profile_field(
services: &Services,
user_id: &UserId,
field: ProfileFieldName,
) -> Result<Option<ProfileFieldValue>> {
// If the user exists locally, fetch their local profile field
if services.globals.user_is_local(user_id) {
return Ok(get_local_profile_field(services, user_id, field).await);
}
// Otherwise ask their homeserver
let Ok(response) = services
.sending
.send_federation_request(
user_id.server_name(),
assign!(federation::query::get_profile_information::v1::Request::new(user_id.to_owned()), {
field: Some(field.clone())
}),
)
.await
else {
return Err!(Request(NotFound(
"User's homeserver could not provide this profile field."
)));
};
if let Some(value) = response.get(field.as_str()).map(ToOwned::to_owned) {
if let Ok(value) = ProfileFieldValue::new(field.as_str(), value) {
let _ = set_profile_field(services, user_id, ProfileFieldChange::Set(value.clone()))
.await;
Ok(Some(value))
} else {
Err!(Request(Unknown(
"User's homeserver returned malformed data for this profile field."
)))
}
} else {
let _ = set_profile_field(services, user_id, ProfileFieldChange::Delete(field)).await;
Ok(None)
}
}
pub(crate) async fn get_local_profile(
services: &Services,
user_id: &UserId,
) -> BTreeMap<String, Value> {
let mut profile = BTreeMap::new();
// Get displayname and avatar_url independently because `all_profile_keys`
// doesn't include them
for field in [ProfileFieldName::AvatarUrl, ProfileFieldName::DisplayName] {
let key = field.as_str().to_owned();
if let Some(value) = get_local_profile_field(services, user_id, field).await {
profile.insert(key, value.value().into_owned());
}
}
// Insert all other profile fields
let mut all_fields = services.users.all_profile_keys(user_id);
while let Some((key, value)) = all_fields.next().await {
profile.insert(key, value);
}
profile
}
pub(crate) async fn get_local_profile_field(
services: &Services,
user_id: &UserId,
field: ProfileFieldName,
) -> Option<ProfileFieldValue> {
let value = match field.clone() {
| ProfileFieldName::AvatarUrl => services
.users
.avatar_url(user_id)
.await
{
if !services.users.exists(&body.user_id).await {
services.users.create(&body.user_id, None, None).await?;
}
.ok()
.map(to_value)
.transpose()
.expect("converting avatar url to value should succeed"),
| ProfileFieldName::DisplayName => services
.users
.displayname(user_id)
.await
.ok()
.map(to_value)
.transpose()
.expect("converting displayname to value should succeed"),
| other => services
.users
.profile_key(user_id, other.as_str())
.await
.ok(),
}?;
services
.users
.set_displayname(&body.user_id, response.displayname.clone());
services
.users
.set_avatar_url(&body.user_id, response.avatar_url.clone());
services
.users
.set_blurhash(&body.user_id, response.blurhash.clone());
Some(
ProfileFieldValue::new(field.as_str(), value)
.expect("local profile field should be valid"),
)
}
for (profile_key, profile_key_value) in &response.custom_profile_fields {
services.users.set_profile_key(
&body.user_id,
profile_key,
Some(profile_key_value.clone()),
enum ProfileFieldChange {
Set(ProfileFieldValue),
Delete(ProfileFieldName),
}
impl ProfileFieldChange {
fn field_name(&self) -> ProfileFieldName {
match self {
| &Self::Delete(ref name) => name.clone(),
| &Self::Set(ref value) => value.field_name(),
}
}
fn value(&self) -> Option<Value> {
if let Self::Set(value) = self {
Some(value.value().into_owned())
} else {
None
}
}
}
async fn set_profile_field(
services: &Services,
user_id: &UserId,
change: ProfileFieldChange,
) -> Result<()> {
const MAX_KEY_LENGTH_BYTES: usize = 255;
const MAX_PROFILE_LENGTH_BYTES: usize = 65536;
let field_name = change.field_name();
// TODO: The spec mentions special error codes (M_PROFILE_TOO_LARGE,
// M_KEY_TOO_LARGE) for profile field size limits, but they're not in its list
// of error codes and Ruma doesn't have them. Should we return those, or is
// M_TOO_LARGE okay?
if field_name.as_str().len() > MAX_KEY_LENGTH_BYTES {
return Err!(Request(TooLarge(
"Individual profile keys must not exceed {MAX_KEY_LENGTH_BYTES} bytes in length."
)));
}
// Serialize the entire profile as canonical JSON, including the new change,
// to check if it exceeds 64 KiB
{
let mut full_profile = get_local_profile(services, user_id).await;
match &change {
| ProfileFieldChange::Set(value) => {
full_profile.insert(
value.field_name().as_str().to_owned(),
value.value().clone().into_owned(),
);
},
| ProfileFieldChange::Delete(key) => {
full_profile.remove(key.as_str());
},
}
if let Ok(canonical_profile) = to_canonical_object(full_profile) {
if serde_json::to_string(&canonical_profile)
.expect("should be able to serialize to string")
.len() > MAX_PROFILE_LENGTH_BYTES
{
return Err!(
"Profile data must not exceed {MAX_PROFILE_LENGTH_BYTES} bytes in length."
);
}
return Ok(get_profile::v3::Response {
displayname: response.displayname,
avatar_url: response.avatar_url,
blurhash: response.blurhash,
custom_profile_fields: response.custom_profile_fields,
});
} else {
return Err!(Request(BadJson("Failed to canonicalize profile.")));
}
}
if !services.users.exists(&body.user_id).await {
// Return 404 if this user doesn't exist and we couldn't fetch it over
// federation
return Err!(Request(NotFound("Profile was not found.")));
match change {
| ProfileFieldChange::Set(ProfileFieldValue::DisplayName(displayname)) => {
services
.users
.set_displayname(user_id, Some(displayname).filter(|dn| !dn.is_empty()));
},
| ProfileFieldChange::Set(ProfileFieldValue::AvatarUrl(avatar_url)) => {
services
.users
.set_avatar_url(user_id, Some(avatar_url).filter(|av| av.is_valid()));
},
| ProfileFieldChange::Delete(ProfileFieldName::DisplayName) => {
services.users.set_displayname(user_id, None);
},
| ProfileFieldChange::Delete(ProfileFieldName::AvatarUrl) => {
services.users.set_avatar_url(user_id, None);
},
| other =>
if other.field_name().as_str() == "blurhash" {
if let Some(Value::String(blurhash)) = other.value() {
services.users.set_blurhash(user_id, Some(blurhash));
} else {
services.users.set_blurhash(user_id, None);
}
} else {
services.users.set_profile_key(
user_id,
other.field_name().as_str(),
other.value(),
);
},
}
let (avatar_url, blurhash, displayname, custom_profile_fields) = join4(
services.users.avatar_url(&body.user_id).ok(),
services.users.blurhash(&body.user_id).ok(),
services.users.displayname(&body.user_id).ok(),
services.users.all_profile_keys(&body.user_id).collect(),
)
.await;
// If the user is local and changed their displayname or avatar_url, update it
// in all their joined rooms
if matches!(field_name, ProfileFieldName::AvatarUrl | ProfileFieldName::DisplayName)
&& services.users.is_active_local(user_id).await
{
let displayname = services.users.displayname(user_id).await.ok();
let avatar_url = services.users.avatar_url(user_id).await.ok();
let membership_content = assign!(
RoomMemberEventContent::new(MembershipState::Join), { displayname, avatar_url }
);
Ok(get_profile::v3::Response {
avatar_url,
blurhash,
displayname,
custom_profile_fields,
})
}
let mut all_joined_rooms = services.rooms.state_cache.rooms_joined(user_id);
pub async fn update_displayname(
services: &Services,
user_id: &UserId,
displayname: Option<String>,
all_joined_rooms: &[OwnedRoomId],
) {
let (current_avatar_url, current_blurhash, current_displayname) = join3(
services.users.avatar_url(user_id).ok(),
services.users.blurhash(user_id).ok(),
services.users.displayname(user_id).ok(),
)
.await;
while let Some(room_id) = all_joined_rooms.next().await {
let state_lock = services.rooms.state.mutex.lock(room_id.as_str()).await;
if displayname == current_displayname {
return;
}
let _ = services
.rooms
.timeline
.build_and_append_pdu(
PartialPdu::state(user_id.to_string(), &membership_content),
user_id,
Some(&room_id),
&state_lock,
)
.await;
}
services.users.set_displayname(user_id, displayname.clone());
// Send a new join membership event into all joined rooms
let avatar_url = &current_avatar_url;
let blurhash = &current_blurhash;
let displayname = &displayname;
let all_joined_rooms: Vec<_> = all_joined_rooms
.iter()
.try_stream()
.and_then(|room_id: &OwnedRoomId| async move {
let pdu = PduBuilder::state(user_id.to_string(), &RoomMemberEventContent {
displayname: displayname.clone(),
membership: MembershipState::Join,
avatar_url: avatar_url.clone(),
blurhash: blurhash.clone(),
join_authorized_via_users_server: None,
reason: None,
is_direct: None,
third_party_invite: None,
redact_events: None,
});
Ok((pdu, room_id))
})
.ignore_err()
.collect()
.await;
update_all_rooms(services, all_joined_rooms, user_id)
.boxed()
.await;
}
pub async fn update_avatar_url(
services: &Services,
user_id: &UserId,
avatar_url: Option<OwnedMxcUri>,
blurhash: Option<String>,
all_joined_rooms: &[OwnedRoomId],
) {
let (current_avatar_url, current_blurhash, current_displayname) = join3(
services.users.avatar_url(user_id).ok(),
services.users.blurhash(user_id).ok(),
services.users.displayname(user_id).ok(),
)
.await;
if current_avatar_url == avatar_url && current_blurhash == blurhash {
return;
}
services.users.set_avatar_url(user_id, avatar_url.clone());
services.users.set_blurhash(user_id, blurhash.clone());
// Send a new join membership event into all joined rooms
let avatar_url = &avatar_url;
let blurhash = &blurhash;
let displayname = &current_displayname;
let all_joined_rooms: Vec<_> = all_joined_rooms
.iter()
.try_stream()
.and_then(|room_id: &OwnedRoomId| async move {
let pdu = PduBuilder::state(user_id.to_string(), &RoomMemberEventContent {
avatar_url: avatar_url.clone(),
blurhash: blurhash.clone(),
membership: MembershipState::Join,
displayname: displayname.clone(),
join_authorized_via_users_server: None,
reason: None,
is_direct: None,
third_party_invite: None,
redact_events: None,
});
Ok((pdu, room_id))
})
.ignore_err()
.collect()
.await;
update_all_rooms(services, all_joined_rooms, user_id)
.boxed()
.await;
}
pub async fn update_all_rooms(
services: &Services,
all_joined_rooms: Vec<(PduBuilder, &OwnedRoomId)>,
user_id: &UserId,
) {
for (pdu_builder, room_id) in all_joined_rooms {
let state_lock = services.rooms.state.mutex.lock(room_id).await;
if let Err(e) = services
.rooms
.timeline
.build_and_append_pdu(pdu_builder, user_id, Some(room_id), &state_lock)
.await
{
warn!(%user_id, %room_id, "Failed to update/send new profile join membership update in room: {e}");
if services.config.allow_local_presence {
// Send a presence EDU to indicate the profile changed
let _ = services
.presence
.ping_presence(user_id, &PresenceState::Online)
.await;
}
}
Ok(())
}
+36 -42
View File
@@ -3,13 +3,13 @@
use conduwuit_service::Services;
use ruma::{
CanonicalJsonObject, CanonicalJsonValue,
api::client::{
error::ErrorKind,
push::{
api::{
client::push::{
delete_pushrule, get_pushers, get_pushrule, get_pushrule_actions,
get_pushrule_enabled, get_pushrules_all, get_pushrules_global_scope, set_pusher,
set_pushrule, set_pushrule_actions, set_pushrule_enabled,
},
error::ErrorKind,
},
events::{
GlobalAccountDataEventType,
@@ -80,9 +80,7 @@ pub(crate) async fn get_pushrules_all_route(
global_ruleset.update_with_server_default(Ruleset::server_default(sender_user));
let ty = GlobalAccountDataEventType::PushRules;
let event = PushRulesEvent {
content: PushRulesEventContent { global: global_ruleset.clone() },
};
let event = PushRulesEvent::new(PushRulesEventContent::new(global_ruleset.clone()));
services
.account_data
@@ -91,7 +89,7 @@ pub(crate) async fn get_pushrules_all_route(
}
};
Ok(get_pushrules_all::v3::Response { global: global_ruleset })
Ok(get_pushrules_all::v3::Response::new(global_ruleset))
}
/// # `GET /_matrix/client/r0/pushrules/global/`
@@ -116,21 +114,20 @@ pub(crate) async fn get_pushrules_global_route(
// user somehow has non-existent push rule event. recreate it and return server
// default silently
let ty = GlobalAccountDataEventType::PushRules;
let event = PushRulesEvent {
content: PushRulesEventContent {
global: Ruleset::server_default(sender_user),
},
};
let global_ruleset = Ruleset::server_default(sender_user);
let event = PushRulesEvent::new(PushRulesEventContent::new(global_ruleset.clone()));
services
.account_data
.update(None, sender_user, ty.to_string().into(), &serde_json::to_value(event)?)
.update(
None,
sender_user,
GlobalAccountDataEventType::PushRules.to_string().into(),
&serde_json::to_value(event)?,
)
.await?;
return Ok(get_pushrules_global_scope::v3::Response {
global: Ruleset::server_default(sender_user),
});
return Ok(get_pushrules_global_scope::v3::Response::new(global_ruleset));
};
let account_data_content =
@@ -173,16 +170,16 @@ pub(crate) async fn get_pushrules_global_route(
None,
sender_user,
GlobalAccountDataEventType::PushRules.to_string().into(),
&serde_json::to_value(PushRulesEvent {
content: PushRulesEventContent { global: global_ruleset.clone() },
})
&serde_json::to_value(PushRulesEvent::new(PushRulesEventContent::new(
global_ruleset.clone(),
)))
.expect("to json always works"),
)
.await?;
}
};
Ok(get_pushrules_global_scope::v3::Response { global: global_ruleset })
Ok(get_pushrules_global_scope::v3::Response::new(global_ruleset))
}
/// # `GET /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}`
@@ -216,7 +213,7 @@ pub(crate) async fn get_pushrule_route(
.map(Into::into);
if let Some(rule) = rule {
Ok(get_pushrule::v3::Response { rule })
Ok(get_pushrule::v3::Response::new(rule))
} else {
Err!(Request(NotFound("Push rule not found.")))
}
@@ -275,7 +272,7 @@ pub(crate) async fn set_pushrule_route(
.update(None, sender_user, ty.to_string().into(), &serde_json::to_value(account_data)?)
.await?;
Ok(set_pushrule::v3::Response {})
Ok(set_pushrule::v3::Response::new())
}
/// # `GET /_matrix/client/r0/pushrules/global/{kind}/{ruleId}/actions`
@@ -309,7 +306,7 @@ pub(crate) async fn get_pushrule_actions_route(
.map(|rule| rule.actions().to_owned())
.ok_or_else(|| err!(Request(NotFound("Push rule not found."))))?;
Ok(get_pushrule_actions::v3::Response { actions })
Ok(get_pushrule_actions::v3::Response::new(actions))
}
/// # `PUT /_matrix/client/r0/pushrules/global/{kind}/{ruleId}/actions`
@@ -342,7 +339,7 @@ pub(crate) async fn set_pushrule_actions_route(
.update(None, sender_user, ty.to_string().into(), &serde_json::to_value(account_data)?)
.await?;
Ok(set_pushrule_actions::v3::Response {})
Ok(set_pushrule_actions::v3::Response::new())
}
/// # `GET /_matrix/client/r0/pushrules/global/{kind}/{ruleId}/enabled`
@@ -360,7 +357,7 @@ pub(crate) async fn get_pushrule_enabled_route(
|| body.rule_id.as_str() == PredefinedOverrideRuleId::ContainsDisplayName.as_str()
|| body.rule_id.as_str() == PredefinedOverrideRuleId::RoomNotif.as_str()
{
return Ok(get_pushrule_enabled::v3::Response { enabled: false });
return Ok(get_pushrule_enabled::v3::Response::new(false));
}
let event: PushRulesEvent = services
@@ -376,7 +373,7 @@ pub(crate) async fn get_pushrule_enabled_route(
.map(ruma::push::AnyPushRuleRef::enabled)
.ok_or_else(|| err!(Request(NotFound("Push rule not found."))))?;
Ok(get_pushrule_enabled::v3::Response { enabled })
Ok(get_pushrule_enabled::v3::Response::new(enabled))
}
/// # `PUT /_matrix/client/r0/pushrules/global/{kind}/{ruleId}/enabled`
@@ -409,7 +406,7 @@ pub(crate) async fn set_pushrule_enabled_route(
.update(None, sender_user, ty.to_string().into(), &serde_json::to_value(account_data)?)
.await?;
Ok(set_pushrule_enabled::v3::Response {})
Ok(set_pushrule_enabled::v3::Response::new())
}
/// # `DELETE /_matrix/client/r0/pushrules/global/{kind}/{ruleId}`
@@ -451,7 +448,7 @@ pub(crate) async fn delete_pushrule_route(
.update(None, sender_user, ty.to_string().into(), &serde_json::to_value(account_data)?)
.await?;
Ok(delete_pushrule::v3::Response {})
Ok(delete_pushrule::v3::Response::new())
}
/// # `GET /_matrix/client/r0/pushers`
@@ -463,9 +460,7 @@ pub(crate) async fn get_pushers_route(
) -> Result<get_pushers::v3::Response> {
let sender_user = body.sender_user();
Ok(get_pushers::v3::Response {
pushers: services.pusher.get_pushers(sender_user).await,
})
Ok(get_pushers::v3::Response::new(services.pusher.get_pushers(sender_user).await))
}
/// # `POST /_matrix/client/r0/pushers/set`
@@ -493,19 +488,18 @@ pub async fn recreate_push_rules_and_return(
services: &Services,
sender_user: &ruma::UserId,
) -> Result<get_pushrules_all::v3::Response> {
let ty = GlobalAccountDataEventType::PushRules;
let event = PushRulesEvent {
content: PushRulesEventContent {
global: Ruleset::server_default(sender_user),
},
};
let global_ruleset = Ruleset::server_default(sender_user);
let event = PushRulesEvent::new(PushRulesEventContent::new(global_ruleset.clone()));
services
.account_data
.update(None, sender_user, ty.to_string().into(), &serde_json::to_value(event)?)
.update(
None,
sender_user,
GlobalAccountDataEventType::PushRules.to_string().into(),
&serde_json::to_value(event)?,
)
.await?;
Ok(get_pushrules_all::v3::Response {
global: Ruleset::server_default(sender_user),
})
Ok(get_pushrules_all::v3::Response::new(global_ruleset))
}
+21 -31
View File
@@ -8,7 +8,8 @@
api::client::{read_marker::set_read_marker, receipt::create_receipt},
events::{
RoomAccountDataEventType,
receipt::{ReceiptThread, ReceiptType},
fully_read::{FullyReadEvent, FullyReadEventContent},
receipt::{Receipt, ReceiptEvent, ReceiptEventContent, ReceiptType},
},
};
@@ -28,9 +29,7 @@ pub(crate) async fn set_read_marker_route(
let sender_user = body.sender_user();
if let Some(event) = &body.fully_read {
let fully_read_event = ruma::events::fully_read::FullyReadEvent {
content: ruma::events::fully_read::FullyReadEventContent { event_id: event.clone() },
};
let fully_read_event = FullyReadEvent::new(FullyReadEventContent::new(event.to_owned()));
services
.account_data
@@ -62,19 +61,16 @@ pub(crate) async fn set_read_marker_route(
if services.config.allow_local_read_receipts
&& !services.users.is_suspended(sender_user).await?
{
let receipt_content = BTreeMap::from_iter([(
let receipt_content = [(
event.to_owned(),
BTreeMap::from_iter([(
ReceiptType::Read,
BTreeMap::from_iter([(
sender_user.to_owned(),
ruma::events::receipt::Receipt {
ts: Some(MilliSecondsSinceUnixEpoch::now()),
thread: ReceiptThread::Unthreaded,
},
Receipt::new(MilliSecondsSinceUnixEpoch::now()),
)]),
)]),
)]);
)];
services
.rooms
@@ -82,10 +78,10 @@ pub(crate) async fn set_read_marker_route(
.readreceipt_update(
sender_user,
&body.room_id,
&ruma::events::receipt::ReceiptEvent {
content: ruma::events::receipt::ReceiptEventContent(receipt_content),
room_id: body.room_id.clone(),
},
&ReceiptEvent::new(
body.room_id.clone(),
ReceiptEventContent::from_iter(receipt_content),
),
)
.await;
}
@@ -111,7 +107,7 @@ pub(crate) async fn set_read_marker_route(
.private_read_set(&body.room_id, sender_user, count);
}
Ok(set_read_marker::v3::Response {})
Ok(set_read_marker::v3::Response::new())
}
/// # `POST /_matrix/client/r0/rooms/{roomId}/receipt/{receiptType}/{eventId}`
@@ -148,11 +144,8 @@ pub(crate) async fn create_receipt_route(
match body.receipt_type {
| create_receipt::v3::ReceiptType::FullyRead => {
let fully_read_event = ruma::events::fully_read::FullyReadEvent {
content: ruma::events::fully_read::FullyReadEventContent {
event_id: body.event_id.clone(),
},
};
let fully_read_event =
FullyReadEvent::new(FullyReadEventContent::new(body.event_id.clone()));
services
.account_data
.update(
@@ -164,19 +157,16 @@ pub(crate) async fn create_receipt_route(
.await?;
},
| create_receipt::v3::ReceiptType::Read => {
let receipt_content = BTreeMap::from_iter([(
let receipt_content = [(
body.event_id.clone(),
BTreeMap::from_iter([(
ReceiptType::Read,
BTreeMap::from_iter([(
sender_user.to_owned(),
ruma::events::receipt::Receipt {
ts: Some(MilliSecondsSinceUnixEpoch::now()),
thread: ReceiptThread::Unthreaded,
},
Receipt::new(MilliSecondsSinceUnixEpoch::now()),
)]),
)]),
)]);
)];
services
.rooms
@@ -184,10 +174,10 @@ pub(crate) async fn create_receipt_route(
.readreceipt_update(
sender_user,
&body.room_id,
&ruma::events::receipt::ReceiptEvent {
content: ruma::events::receipt::ReceiptEventContent(receipt_content),
room_id: body.room_id.clone(),
},
&ReceiptEvent::new(
body.room_id.clone(),
ReceiptEventContent::from_iter(receipt_content),
),
)
.await;
},
@@ -218,5 +208,5 @@ pub(crate) async fn create_receipt_route(
},
}
Ok(create_receipt::v3::Response {})
Ok(create_receipt::v3::Response::new())
}
+10 -9
View File
@@ -1,8 +1,8 @@
use axum::extract::State;
use axum_client_ip::ClientIp;
use conduwuit::{Err, Result, matrix::pdu::PduBuilder};
use conduwuit::{Err, Result, matrix::pdu::PartialPdu};
use ruma::{
api::client::redact::redact_event, events::room::redaction::RoomRedactionEventContent,
api::client::redact::redact_event, assign, events::room::redaction::RoomRedactionEventContent,
};
use crate::Ruma;
@@ -28,18 +28,19 @@ pub(crate) async fn redact_event_route(
return Err!(Request(UserSuspended("You cannot perform this action while suspended.")));
}
let state_lock = services.rooms.state.mutex.lock(&body.room_id).await;
let state_lock = services.rooms.state.mutex.lock(body.room_id.as_str()).await;
let event_id = services
.rooms
.timeline
.build_and_append_pdu(
PduBuilder {
PartialPdu {
redacts: Some(body.event_id.clone()),
..PduBuilder::timeline(&RoomRedactionEventContent {
redacts: Some(body.event_id.clone()),
reason: body.reason.clone(),
})
..PartialPdu::timeline(
&assign!(RoomRedactionEventContent::new_v11(body.event_id.clone()), {
reason: body.reason.clone()
}),
)
},
sender_user,
Some(&body.room_id),
@@ -49,5 +50,5 @@ pub(crate) async fn redact_event_route(
drop(state_lock);
Ok(redact_event::v3::Response { event_id })
Ok(redact_event::v3::Response::new(event_id))
}
+15 -13
View File
@@ -15,6 +15,7 @@
get_relating_events_with_rel_type_and_event_type,
},
},
assign,
events::{TimelineEventType, relation::RelationType},
};
@@ -39,11 +40,12 @@ pub(crate) async fn get_relating_events_with_rel_type_and_event_type_route(
body.dir,
)
.await
.map(|res| get_relating_events_with_rel_type_and_event_type::v1::Response {
chunk: res.chunk,
next_batch: res.next_batch,
prev_batch: res.prev_batch,
recursion_depth: res.recursion_depth,
.map(|res| {
assign!(get_relating_events_with_rel_type_and_event_type::v1::Response::new(res.chunk), {
next_batch: res.next_batch,
prev_batch: res.prev_batch,
recursion_depth: res.recursion_depth,
})
})
}
@@ -66,11 +68,12 @@ pub(crate) async fn get_relating_events_with_rel_type_route(
body.dir,
)
.await
.map(|res| get_relating_events_with_rel_type::v1::Response {
chunk: res.chunk,
next_batch: res.next_batch,
prev_batch: res.prev_batch,
recursion_depth: res.recursion_depth,
.map(|res| {
assign!(get_relating_events_with_rel_type::v1::Response::new(res.chunk), {
next_batch: res.next_batch,
prev_batch: res.prev_batch,
recursion_depth: res.recursion_depth,
})
})
}
@@ -201,12 +204,11 @@ async fn paginate_relations_with_filter(
.map(Event::into_format)
.collect();
Ok(get_relating_events::v1::Response {
Ok(assign!(get_relating_events::v1::Response::new(chunk), {
next_batch,
prev_batch: from.map(Into::into),
recursion_depth: recurse.then_some(depth.into()),
chunk,
})
}))
}
async fn visibility_filter<Pdu: Event + Send + Sync>(
+33 -39
View File
@@ -7,7 +7,7 @@
use ruma::{
EventId, OwnedEventId, OwnedRoomId, OwnedUserId, RoomId, UserId,
api::client::{
report_user,
reporting::report_user,
room::{report_content, report_room},
},
events::{Mentions, room::message::RoomMessageEventContent},
@@ -22,9 +22,11 @@ struct Report {
event_id: Option<OwnedEventId>,
user_id: Option<OwnedUserId>,
report_type: String,
reason: Option<String>,
reason: String,
}
const MAX_REASON_LENGTH: usize = 2000;
/// # `POST /_matrix/client/v3/rooms/{roomId}/report`
///
/// Reports an abusive room to homeserver admins
@@ -39,10 +41,10 @@ pub(crate) async fn report_room_route(
return Err!(Request(UserSuspended("You cannot perform this action while suspended.")));
}
if body.reason.as_ref().is_some_and(|s| s.len() > 750) {
return Err!(Request(
InvalidParam("Reason too long, should be 750 characters or fewer",)
));
if body.reason.len() > MAX_REASON_LENGTH {
return Err!(Request(InvalidParam(
"Reason too long, should be {MAX_REASON_LENGTH} bytes or fewer",
)));
}
delay_response().await;
@@ -52,8 +54,7 @@ pub(crate) async fn report_room_route(
// their discretion.
info!(
"Received room report by user {sender_user} for room {} with reason: \"{}\"",
body.room_id,
body.reason.as_deref().unwrap_or("")
body.room_id, body.reason
);
if !services
@@ -78,7 +79,7 @@ pub(crate) async fn report_room_route(
services.admin.send_message(build_report(report)).await.ok();
Ok(report_room::v3::Response {})
Ok(report_room::v3::Response::new())
}
/// # `POST /_matrix/client/v3/rooms/{roomId}/report/{eventId}`
@@ -98,26 +99,22 @@ pub(crate) async fn report_event_route(
delay_response().await;
let reason = body
.reason
.clone()
.unwrap_or_else(|| "<no reason provided>".to_owned());
// check if we know about the reported event ID or if it's invalid
let Ok(pdu) = services.rooms.timeline.get_pdu(&body.event_id).await else {
return Err!(Request(NotFound("Event ID is not known to us or Event ID is invalid")));
};
is_event_report_valid(
&services,
&pdu.event_id,
&body.room_id,
sender_user,
body.reason.as_ref(),
&pdu,
)
.await?;
is_event_report_valid(&services, &pdu.event_id, &body.room_id, sender_user, &reason, &pdu)
.await?;
info!(
"Received event report by user {sender_user} for room {} and event ID {}, with reason: \
\"{}\"",
body.room_id,
body.event_id,
body.reason.as_deref().unwrap_or("")
body.room_id, body.event_id, reason
);
let report = Report {
sender: sender_user.to_owned(),
@@ -125,11 +122,11 @@ pub(crate) async fn report_event_route(
event_id: Some(body.event_id.clone()),
user_id: None,
report_type: "event".to_owned(),
reason: body.reason.clone(),
reason,
};
services.admin.send_message(build_report(report)).await.ok();
Ok(report_content::v3::Response {})
Ok(report_content::v3::Response::new())
}
#[tracing::instrument(skip_all, fields(%client), name = "report_user", level = "info")]
@@ -144,17 +141,17 @@ pub(crate) async fn report_user_route(
return Err!(Request(UserSuspended("You cannot perform this action while suspended.")));
}
if body.reason.as_ref().is_some_and(|s| s.len() > 750) {
return Err!(Request(
InvalidParam("Reason too long, should be 750 characters or fewer",)
));
if body.reason.len() > MAX_REASON_LENGTH {
return Err!(Request(InvalidParam(
"Reason too long, should be {MAX_REASON_LENGTH} bytes or fewer",
)));
}
delay_response().await;
if !services.users.is_active_local(&body.user_id).await {
// return 200 as to not reveal if the user exists. Recommended by spec.
return Ok(report_user::v3::Response {});
return Ok(report_user::v3::Response::new());
}
let report = Report {
@@ -168,13 +165,12 @@ pub(crate) async fn report_user_route(
info!(
"Received room report from {sender_user} for user {} with reason: \"{}\"",
body.user_id,
body.reason.as_deref().unwrap_or("")
body.user_id, body.reason
);
services.admin.send_message(build_report(report)).await.ok();
Ok(report_user::v3::Response {})
Ok(report_user::v3::Response::new())
}
/// in the following order:
@@ -188,7 +184,7 @@ async fn is_event_report_valid(
event_id: &EventId,
room_id: &RoomId,
sender_user: &UserId,
reason: Option<&String>,
reason: &str,
pdu: &PduEvent,
) -> Result<()> {
debug_info!(
@@ -200,10 +196,10 @@ async fn is_event_report_valid(
return Err!(Request(NotFound("Event ID does not belong to the reported room",)));
}
if reason.as_ref().is_some_and(|s| s.len() > 750) {
return Err!(Request(
InvalidParam("Reason too long, should be 750 characters or fewer",)
));
if reason.len() > MAX_REASON_LENGTH {
return Err!(Request(InvalidParam(
"Reason too long, should be {MAX_REASON_LENGTH} bytes or fewer",
)));
}
if !services
@@ -232,9 +228,7 @@ fn build_report(report: Report) -> RoomMessageEventContent {
if report.event_id.is_some() {
let _ = writeln!(text, "- Reported Event ID: `{}`", report.event_id.unwrap());
}
if let Some(reason) = report.reason {
let _ = writeln!(text, "- Report Reason: {reason}");
}
let _ = writeln!(text, "- Report Reason: {}", report.reason);
RoomMessageEventContent::text_markdown(text).add_mentions(Mentions::with_room_mention())
}
+8 -9
View File
@@ -26,13 +26,12 @@ pub(crate) async fn get_room_aliases_route(
return Err!(Request(Forbidden("You don't have permission to view this room.",)));
}
Ok(aliases::v3::Response {
aliases: services
.rooms
.alias
.local_aliases_for_room(&body.room_id)
.map(ToOwned::to_owned)
.collect()
.await,
})
let aliases = services
.rooms
.alias
.local_aliases_for_room(&body.room_id)
.collect()
.await;
Ok(aliases::v3::Response::new(aliases))
}
+90 -126
View File
@@ -2,18 +2,19 @@
use axum::extract::State;
use conduwuit::{
Err, Result, RoomVersion, debug, debug_info, debug_warn, err, info,
matrix::{StateKey, pdu::PduBuilder},
Err, Result, debug, debug_info, err, info,
matrix::{StateKey, pdu::PartialPdu},
trace, warn,
};
use conduwuit_service::{Services, appservice::RegistrationInfo};
use futures::FutureExt;
use ruma::{
CanonicalJsonObject, Int, OwnedRoomAliasId, OwnedRoomId, OwnedUserId, RoomId, RoomVersionId,
CanonicalJsonObject, CanonicalJsonValue, Int, MilliSecondsSinceUnixEpoch, OwnedRoomAliasId,
OwnedRoomId, OwnedUserId, RoomAliasId, RoomId, RoomVersionId, UserId,
api::client::room::{self, create_room},
assign,
events::{
TimelineEventType,
invite_permission_config::FilterLevel,
room::{
canonical_alias::RoomCanonicalAliasEventContent,
create::RoomCreateEventContent,
@@ -27,8 +28,10 @@
},
},
int,
room_version_rules::{AuthorizationRules, RoomIdFormatVersion},
serde::{JsonObject, Raw},
};
use ruminuwuity::invite_permission_config::FilterLevel;
use serde_json::{json, value::to_raw_value};
use crate::{Ruma, client::invite_helper};
@@ -81,15 +84,23 @@ pub(crate) async fn create_room_route(
},
| None => services.server.config.default_room_version.clone(),
};
let room_features = RoomVersion::new(&room_version)?;
let room_version_rules = room_version.rules().unwrap();
let room_id: Option<OwnedRoomId> = if !room_features.room_ids_as_hashes {
match &body.room_id {
| Some(custom_room_id) => Some(custom_room_id_check(&services, custom_room_id)?),
| None => Some(RoomId::new(services.globals.server_name())),
}
} else {
None
let room_id: Option<OwnedRoomId> = match room_version_rules.room_id_format {
| RoomIdFormatVersion::V1 => {
// Check for custom room ID field
if let Some(CanonicalJsonValue::String(room_id)) =
body.json_body.as_ref().unwrap().get("room_id")
{
Some(
RoomId::parse(room_id)
.map_err(|_| err!(Request(BadJson("Malformed custom room ID"))))?,
)
} else {
Some(RoomId::new_v1(services.globals.server_name()))
}
},
| _ => None,
};
// check if room ID doesn't already exist instead of erroring on auth check
@@ -167,7 +178,7 @@ pub(crate) async fn create_room_route(
use RoomVersionId::*;
let mut content = content
.deserialize_as::<CanonicalJsonObject>()
.deserialize_as_unchecked::<CanonicalJsonObject>()
.map_err(|e| {
err!(Request(BadJson(error!(
"Failed to deserialise content as canonical JSON: {e}"
@@ -201,8 +212,7 @@ pub(crate) async fn create_room_route(
let content = match room_version {
| V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 =>
RoomCreateEventContent::new_v1(sender_user.to_owned()),
| V11 => RoomCreateEventContent::new_v11(),
| _ => RoomCreateEventContent::new_v12(),
| _ => RoomCreateEventContent::new_v11(),
};
let mut content =
serde_json::from_str::<CanonicalJsonObject>(to_raw_value(&content)?.get())?;
@@ -218,27 +228,40 @@ pub(crate) async fn create_room_route(
.short
.get_or_create_shortroomid(&room_id)
.await;
services.rooms.state.mutex.lock(&room_id).await
services.rooms.state.mutex.lock(room_id.as_str()).await
},
| None => {
let temp_room_id = RoomId::new(services.globals.server_name());
let temp_room_id = RoomId::new_v1(services.globals.server_name());
trace!("Locking temporary room state mutex for {temp_room_id}");
services.rooms.state.mutex.lock(&temp_room_id).await
services.rooms.state.mutex.lock(temp_room_id.as_str()).await
},
};
// 1. The room create event
debug!("Creating room create event for {sender_user} in room {room_id:?}");
let tmp_id = room_id.as_deref();
// Allow requesters to override the `origin_server_ts` to customize room ids
// from v12 onwards
let custom_origin_server_ts = body
.json_body
.as_ref()
.unwrap()
.get("origin_server_ts")
.and_then(CanonicalJsonValue::as_integer)
.map(Into::into)
.and_then(|value: i64| value.try_into().ok())
.map(MilliSecondsSinceUnixEpoch);
let create_event_id = services
.rooms
.timeline
.build_and_append_pdu(
PduBuilder {
PartialPdu {
event_type: TimelineEventType::RoomCreate,
content: to_raw_value(&create_content)?,
state_key: Some(StateKey::new()),
timestamp: body.origin_server_ts,
timestamp: custom_origin_server_ts,
..Default::default()
},
sender_user,
@@ -253,34 +276,27 @@ pub(crate) async fn create_room_route(
| None => {
let as_room_id = create_event_id.as_str().replace('$', "!");
trace!("Creating room with v12 room ID {as_room_id}");
RoomId::parse(&as_room_id)?.to_owned()
RoomId::parse(&as_room_id)?.clone()
},
};
drop(state_lock);
if let Some(expected_room_id) = body.room_id.as_ref() {
if expected_room_id.as_str() != room_id.as_str() {
return Err!(Request(InvalidParam(
"Custom room ID {expected_room_id} does not match the generated room ID \
{room_id}.",
)));
}
}
debug!("Room created with ID {room_id}");
let state_lock = services.rooms.state.mutex.lock(&room_id).await;
let state_lock = services.rooms.state.mutex.lock(room_id.as_str()).await;
// 2. Let the room creator join
let mut join_event = RoomMemberEventContent::new(MembershipState::Join);
join_event.displayname = services.users.displayname(sender_user).await.ok();
join_event.avatar_url = services.users.avatar_url(sender_user).await.ok();
join_event.blurhash = services.users.blurhash(sender_user).await.ok();
join_event.is_direct = Some(body.is_direct);
debug_info!("Joining {sender_user} to room {room_id}");
services
.rooms
.timeline
.build_and_append_pdu(
PduBuilder::state(sender_user.to_string(), &RoomMemberEventContent {
displayname: services.users.displayname(sender_user).await.ok(),
avatar_url: services.users.avatar_url(sender_user).await.ok(),
blurhash: services.users.blurhash(sender_user).await.ok(),
is_direct: Some(body.is_direct),
..RoomMemberEventContent::new(MembershipState::Join)
}),
PartialPdu::state(sender_user.to_string(), &join_event),
sender_user,
Some(&room_id),
&state_lock,
@@ -306,7 +322,10 @@ pub(crate) async fn create_room_route(
let mut creators: Vec<OwnedUserId> = vec![sender_user.to_owned()];
// Do we care about additional_creators?
if room_features.explicitly_privilege_room_creators {
if room_version_rules
.authorization
.explicitly_privilege_room_creators
{
// Have they been specified?
if let Some(additional_creators) = create_content.get("additional_creators") {
// Are they a real array?
@@ -316,9 +335,9 @@ pub(crate) async fn create_room_route(
// Are they a string?
if let Some(creator) = creator.as_str() {
// Do they parse into a real user ID?
if let Ok(creator) = OwnedUserId::parse(creator) {
if let Ok(creator) = UserId::parse(creator) {
// Add them to the power levels and creators
creators.push(creator.clone());
creators.push(creator);
}
}
}
@@ -331,17 +350,20 @@ pub(crate) async fn create_room_route(
}
let power_levels_content = default_power_levels_content(
body.power_level_content_override.as_ref(),
body.power_level_content_override
.as_ref()
.map(Raw::cast_ref),
&body.visibility,
power_levels_to_grant,
creators,
&room_version_rules.authorization,
)?;
services
.rooms
.timeline
.build_and_append_pdu(
PduBuilder {
PartialPdu {
event_type: TimelineEventType::RoomPowerLevels,
content: to_raw_value(&power_levels_content)?,
state_key: Some(StateKey::new()),
@@ -360,10 +382,13 @@ pub(crate) async fn create_room_route(
.rooms
.timeline
.build_and_append_pdu(
PduBuilder::state(String::new(), &RoomCanonicalAliasEventContent {
alias: Some(room_alias_id.to_owned()),
alt_aliases: vec![],
}),
PartialPdu::state(
String::new(),
&assign!(RoomCanonicalAliasEventContent::new(), {
alias: Some(room_alias_id.to_owned()),
alt_aliases: vec![],
}),
),
sender_user,
Some(&room_id),
&state_lock,
@@ -379,7 +404,7 @@ pub(crate) async fn create_room_route(
.rooms
.timeline
.build_and_append_pdu(
PduBuilder::state(
PartialPdu::state(
String::new(),
&RoomJoinRulesEventContent::new(match preset {
| RoomPreset::PublicChat => JoinRule::Public,
@@ -399,7 +424,7 @@ pub(crate) async fn create_room_route(
.rooms
.timeline
.build_and_append_pdu(
PduBuilder::state(
PartialPdu::state(
String::new(),
&RoomHistoryVisibilityEventContent::new(HistoryVisibility::Shared),
),
@@ -415,7 +440,7 @@ pub(crate) async fn create_room_route(
.rooms
.timeline
.build_and_append_pdu(
PduBuilder::state(
PartialPdu::state(
String::new(),
&RoomGuestAccessEventContent::new(match preset {
| RoomPreset::PublicChat => GuestAccess::Forbidden,
@@ -431,26 +456,19 @@ pub(crate) async fn create_room_route(
// 6. Events listed in initial_state
for event in &body.initial_state {
let mut pdu_builder = event.deserialize_as::<PduBuilder>().map_err(|e| {
err!(Request(InvalidParam(warn!("Invalid initial state event: {e:?}"))))
})?;
let mut partial_pdu = event
.deserialize_as_unchecked::<PartialPdu>()
.map_err(|e| {
err!(Request(InvalidParam(warn!("Invalid initial state event: {e:?}"))))
})?;
debug_info!("Room creation initial state event: {event:?}");
// client/appservice workaround: if a user sends an initial_state event with a
// state event in there with the content of literally `{}` (not null or empty
// string), let's just skip it over and warn.
if pdu_builder.content.get().eq("{}") {
debug_warn!("skipping empty initial state event with content of `{{}}`: {event:?}");
debug_warn!("content: {}", pdu_builder.content.get());
continue;
}
// Implicit state key defaults to ""
pdu_builder.state_key.get_or_insert_with(StateKey::new);
partial_pdu.state_key.get_or_insert_with(StateKey::new);
// Silently skip encryption events if they are not allowed
if pdu_builder.event_type == TimelineEventType::RoomEncryption
if partial_pdu.event_type == TimelineEventType::RoomEncryption
&& !services.config.allow_encryption
{
continue;
@@ -459,7 +477,7 @@ pub(crate) async fn create_room_route(
services
.rooms
.timeline
.build_and_append_pdu(pdu_builder, sender_user, Some(&room_id), &state_lock)
.build_and_append_pdu(partial_pdu, sender_user, Some(&room_id), &state_lock)
.boxed()
.await?;
}
@@ -470,7 +488,7 @@ pub(crate) async fn create_room_route(
.rooms
.timeline
.build_and_append_pdu(
PduBuilder::state(String::new(), &RoomNameEventContent::new(name.clone())),
PartialPdu::state(String::new(), &RoomNameEventContent::new(name.clone())),
sender_user,
Some(&room_id),
&state_lock,
@@ -484,7 +502,7 @@ pub(crate) async fn create_room_route(
.rooms
.timeline
.build_and_append_pdu(
PduBuilder::state(String::new(), &RoomTopicEventContent { topic: topic.clone() }),
PartialPdu::state(String::new(), &RoomTopicEventContent::new(topic.clone())),
sender_user,
Some(&room_id),
&state_lock,
@@ -539,10 +557,13 @@ fn default_power_levels_content(
visibility: &room::Visibility,
users: BTreeMap<OwnedUserId, Int>,
creators: Vec<OwnedUserId>,
authorization_rules: &AuthorizationRules,
) -> Result<serde_json::Value> {
let mut power_levels_content =
serde_json::to_value(RoomPowerLevelsEventContent { users, ..Default::default() })
.expect("event is valid, we just created it");
serde_json::to_value(assign!(RoomPowerLevelsEventContent::new(authorization_rules), {
users
}))
.unwrap();
// secure proper defaults of sensitive/dangerous permissions that moderators
// (power level 50) should not have easy access to
@@ -632,7 +653,7 @@ async fn room_alias_check(
}
let server_name = services.globals.server_name();
let full_room_alias = OwnedRoomAliasId::parse(format!("#{room_alias_name}:{server_name}"))
let full_room_alias = RoomAliasId::parse(format!("#{room_alias_name}:{server_name}"))
.map_err(|e| {
err!(Request(InvalidParam(debug_error!(
?e,
@@ -667,60 +688,3 @@ async fn room_alias_check(
Ok(full_room_alias)
}
/// if a room is being created with a custom room ID, run our checks against it
fn custom_room_id_check(services: &Services, custom_room_id: &str) -> Result<OwnedRoomId> {
// apply forbidden room alias checks to custom room IDs too
if services
.globals
.forbidden_alias_names()
.is_match(custom_room_id)
{
return Err!(Request(Unknown("Custom room ID is forbidden.")));
}
if custom_room_id.contains(':') {
return Err!(Request(InvalidParam(
"Custom room ID contained `:` which is not allowed. Please note that this expects a \
localpart, not the full room ID.",
)));
} else if custom_room_id.contains(char::is_whitespace) {
return Err!(Request(InvalidParam(
"Custom room ID contained spaces which is not valid."
)));
}
let server_name = services.globals.server_name();
let mut room_id = custom_room_id.to_owned();
if custom_room_id.contains(':') {
if !custom_room_id.starts_with('!') {
return Err!(Request(InvalidParam(
"Custom room ID contains an unexpected `:` which is not allowed.",
)));
}
} else if custom_room_id.starts_with('!') {
return Err!(Request(InvalidParam(
"Room ID is prefixed with !, but is not fully qualified. You likely did not want \
this.",
)));
} else {
room_id = format!("!{custom_room_id}:{server_name}");
}
OwnedRoomId::parse(room_id)
.map_err(Into::into)
.and_then(|full_room_id| {
if full_room_id
.server_name()
.expect("failed to extract server name from room ID")
!= server_name
{
Err!(Request(InvalidParam("Custom room ID must be on this server.",)))
} else {
Ok(full_room_id)
}
})
.inspect(|full_room_id| {
debug_info!(%full_room_id, "Full custom room ID");
})
.inspect_err(|e| warn!(?e, %custom_room_id, "Failed to create room with custom room ID",))
}
+1 -1
View File
@@ -44,5 +44,5 @@ pub(crate) async fn get_room_event_route(
event.set_unsigned(body.sender_user.as_deref());
Ok(get_room_event::v3::Response { event: event.into_format() })
Ok(get_room_event::v3::Response::new(event.into_format()))
}
+21 -20
View File
@@ -4,7 +4,10 @@
utils::{BoolExt, stream::TryTools},
};
use futures::{FutureExt, TryStreamExt, future::try_join4};
use ruma::api::client::room::initial_sync::v3::{PaginationChunk, Request, Response};
use ruma::{
api::client::peeking::get_current_state::v3::{PaginationChunk, Request, Response},
assign,
};
use crate::Ruma;
@@ -69,29 +72,27 @@ pub(crate) async fn room_initial_sync_route(
.boxed()
.await?;
let messages = PaginationChunk {
start: events.last().map(at!(0)).as_ref().map(ToString::to_string),
let end = events
.first()
.map(at!(0))
.as_ref()
.map(ToString::to_string)
.unwrap_or_default();
let start = events.last().map(at!(0)).as_ref().map(ToString::to_string);
end: events
.first()
.map(at!(0))
.as_ref()
.map(ToString::to_string)
.unwrap_or_default(),
let chunk = events
.into_iter()
.map(at!(1))
.map(Event::into_format)
.collect();
chunk: events
.into_iter()
.map(at!(1))
.map(Event::into_format)
.collect(),
};
let messages = assign!(PaginationChunk::new(chunk, end), { start });
Ok(Response {
room_id: room_id.to_owned(),
account_data: None,
state: state.into(),
Ok(assign!(Response::new(room_id.to_owned()), {
account_data: vec![],
state: state,
messages: messages.chunk.is_empty().or_some(messages),
visibility: visibility.into(),
membership,
})
}))
}
+2 -5
View File
@@ -6,10 +6,7 @@
mod upgrade;
pub(crate) use self::{
aliases::get_room_aliases_route,
create::create_room_route,
event::get_room_event_route,
initial_sync::room_initial_sync_route,
summary::{get_room_summary, get_room_summary_legacy},
aliases::get_room_aliases_route, create::create_room_route, event::get_room_event_route,
initial_sync::room_initial_sync_route, summary::get_room_summary,
upgrade::upgrade_room_route,
};
+15 -318
View File
@@ -1,48 +1,11 @@
use axum::extract::State;
use axum_client_ip::ClientIp;
use conduwuit::{
Err, Result, debug, debug_warn, info, trace,
utils::{IterStream, future::TryExtExt},
};
use futures::{
FutureExt, StreamExt, TryFutureExt,
future::{OptionFuture, join3},
stream::FuturesUnordered,
};
use ruma::{
OwnedServerName, RoomId, UserId,
api::{
client::room::get_summary,
federation::space::{SpaceHierarchyParentSummary, get_hierarchy},
},
events::room::member::MembershipState,
space::SpaceRoomJoinRule::{self, *},
};
use service::Services;
use conduwuit::{Err, Result};
use ruma::api::client::room::get_summary;
use service::rooms::summary::Accessibility;
use crate::{Ruma, RumaResponse};
use crate::Ruma;
/// # `GET /_matrix/client/unstable/im.nheko.summary/rooms/{roomIdOrAlias}/summary`
///
/// Returns a short description of the state of a room.
///
/// This is the "wrong" endpoint that some implementations/clients may use
/// according to the MSC. Request and response bodies are the same as
/// `get_room_summary`.
///
/// An implementation of [MSC3266](https://github.com/matrix-org/matrix-spec-proposals/pull/3266)
pub(crate) async fn get_room_summary_legacy(
State(services): State<crate::State>,
ClientIp(client): ClientIp,
body: Ruma<get_summary::msc3266::Request>,
) -> Result<RumaResponse<get_summary::msc3266::Response>> {
get_room_summary(State(services), ClientIp(client), body)
.boxed()
.await
.map(RumaResponse)
}
/// # `GET /_matrix/client/unstable/im.nheko.summary/summary/{roomIdOrAlias}`
/// # `GET /_matrix/client/v1/room_summary/{roomIdOrAlias}`
///
/// Returns a short description of the state of a room.
@@ -50,8 +13,8 @@ pub(crate) async fn get_room_summary_legacy(
pub(crate) async fn get_room_summary(
State(services): State<crate::State>,
ClientIp(client): ClientIp,
body: Ruma<get_summary::msc3266::Request>,
) -> Result<get_summary::msc3266::Response> {
body: Ruma<get_summary::v1::Request>,
) -> Result<get_summary::v1::Response> {
let (room_id, servers) = services
.rooms
.alias
@@ -62,285 +25,19 @@ pub(crate) async fn get_room_summary(
return Err!(Request(Forbidden("This room is banned on this homeserver.")));
}
room_summary_response(&services, &room_id, &servers, body.sender_user.as_deref())
.boxed()
.await
}
async fn room_summary_response(
services: &Services,
room_id: &RoomId,
servers: &[OwnedServerName],
sender_user: Option<&UserId>,
) -> Result<get_summary::msc3266::Response> {
if services
let summary = services
.rooms
.state_cache
.server_in_room(services.globals.server_name(), room_id)
.await
{
match local_room_summary_response(services, room_id, sender_user)
.boxed()
.await
{
| Ok(response) => return Ok(response),
| Err(e) => {
debug_warn!("Failed to get local room summary: {e:?}, falling back to remote");
},
}
}
let room =
remote_room_summary_hierarchy_response(services, room_id, servers, sender_user).await?;
Ok(get_summary::msc3266::Response {
room_id: room_id.to_owned(),
canonical_alias: room.canonical_alias,
avatar_url: room.avatar_url,
guest_can_join: room.guest_can_join,
name: room.name,
num_joined_members: room.num_joined_members,
topic: room.topic,
world_readable: room.world_readable,
join_rule: room.join_rule,
room_type: room.room_type,
room_version: room.room_version,
encryption: room.encryption,
allowed_room_ids: room.allowed_room_ids,
membership: sender_user.is_some().then_some(MembershipState::Leave),
})
}
async fn local_room_summary_response(
services: &Services,
room_id: &RoomId,
sender_user: Option<&UserId>,
) -> Result<get_summary::msc3266::Response> {
trace!(
sender_user = sender_user.map(tracing::field::display),
"Sending local room summary response for {room_id:?}"
);
let (join_rule, world_readable, guest_can_join) = join3(
services.rooms.state_accessor.get_join_rules(room_id),
services.rooms.state_accessor.is_world_readable(room_id),
services.rooms.state_accessor.guest_can_join(room_id),
)
.await;
// Synapse allows server admins to bypass visibility checks.
// That seems neat so we'll copy that behaviour.
if sender_user.is_none() || !services.users.is_admin(sender_user.unwrap()).await {
user_can_see_summary(
services,
room_id,
&join_rule.clone().into(),
guest_can_join,
world_readable,
join_rule.allowed_rooms(),
sender_user,
)
.summary
.get_room_summary_for_user(body.sender_user.as_deref(), &room_id, &servers)
.await?;
}
let canonical_alias = services
.rooms
.state_accessor
.get_canonical_alias(room_id)
.ok();
let name = services.rooms.state_accessor.get_name(room_id).ok();
let topic = services.rooms.state_accessor.get_room_topic(room_id).ok();
let room_type = services.rooms.state_accessor.get_room_type(room_id).ok();
let avatar_url = services
.rooms
.state_accessor
.get_avatar(room_id)
.map(|res| res.into_option().unwrap_or_default().url);
let room_version = services.rooms.state.get_room_version(room_id).ok();
let encryption = services
.rooms
.state_accessor
.get_room_encryption(room_id)
.ok();
let num_joined_members = services
.rooms
.state_cache
.room_joined_count(room_id)
.unwrap_or(0);
let membership: OptionFuture<_> = sender_user
.map(|sender_user| {
services
.rooms
.state_accessor
.get_member(room_id, sender_user)
.map_ok_or(MembershipState::Leave, |content| content.membership)
})
.into();
let (
canonical_alias,
name,
num_joined_members,
topic,
avatar_url,
room_type,
room_version,
encryption,
membership,
) = futures::join!(
canonical_alias,
name,
num_joined_members,
topic,
avatar_url,
room_type,
room_version,
encryption,
membership,
);
Ok(get_summary::msc3266::Response {
room_id: room_id.to_owned(),
canonical_alias,
avatar_url,
guest_can_join,
name,
num_joined_members: num_joined_members.try_into().unwrap_or_default(),
topic,
world_readable,
room_type,
room_version,
encryption,
membership,
allowed_room_ids: join_rule.allowed_rooms().map(Into::into).collect(),
join_rule: join_rule.into(),
})
}
/// used by MSC3266 to fetch a room's info if we do not know about it
async fn remote_room_summary_hierarchy_response(
services: &Services,
room_id: &RoomId,
servers: &[OwnedServerName],
sender_user: Option<&UserId>,
) -> Result<SpaceHierarchyParentSummary> {
trace!(sender_user = ?sender_user.map(tracing::field::display), ?servers, "Sending remote room summary response for {room_id:?}");
if !services.config.allow_federation {
return Err!(Request(Forbidden("Federation is disabled.")));
}
if services.rooms.metadata.is_disabled(room_id).await {
return Err!(Request(Forbidden(
"Federaton of room {room_id} is currently disabled on this server."
)));
}
if servers.is_empty() {
return Err!(Request(MissingParam(
"No servers were provided to fetch the room over federation"
)));
}
let request = get_hierarchy::v1::Request::new(room_id.to_owned());
let mut requests: FuturesUnordered<_> = servers
.iter()
.map(|server| {
info!("Fetching room summary for {room_id} from server {server}");
services
.sending
.send_federation_request(server, request.clone())
.inspect_ok(move |v| {
debug!("Fetched room summary for {room_id} from server {server}: {v:?}");
})
.inspect_err(move |e| {
info!("Failed to fetch room summary for {room_id} from server {server}: {e}");
})
})
.collect();
while let Some(Ok(response)) = requests.next().await {
trace!("{response:?}");
let room = response.room.clone();
if room.room_id != room_id {
debug_warn!(
"Room ID {} returned does not belong to the requested room ID {}",
room.room_id,
room_id
);
continue;
}
if sender_user.is_none() || !services.users.is_admin(sender_user.unwrap()).await {
return user_can_see_summary(
services,
room_id,
&room.join_rule,
room.guest_can_join,
room.world_readable,
room.allowed_room_ids.iter().map(AsRef::as_ref),
sender_user,
)
.await
.map(|()| room);
}
return Ok(room);
}
Err!(Request(NotFound("Room not found or is not accessible")))
}
async fn user_can_see_summary<'a, I>(
services: &Services,
room_id: &RoomId,
join_rule: &SpaceRoomJoinRule,
guest_can_join: bool,
world_readable: bool,
allowed_room_ids: I,
sender_user: Option<&UserId>,
) -> Result
where
I: Iterator<Item = &'a RoomId> + Send,
{
let is_public_room = matches!(join_rule, Public | Knock | KnockRestricted);
match sender_user {
| Some(sender_user) => {
let user_can_see_state_events = services
.rooms
.state_accessor
.user_can_see_state_events(sender_user, room_id);
let is_guest = services.users.is_deactivated(sender_user).unwrap_or(false);
let user_in_allowed_restricted_room = allowed_room_ids
.stream()
.any(|room| services.rooms.state_cache.is_joined(sender_user, room));
let (user_can_see_state_events, is_guest, user_in_allowed_restricted_room) =
join3(user_can_see_state_events, is_guest, user_in_allowed_restricted_room)
.boxed()
.await;
if user_can_see_state_events
|| (is_guest && guest_can_join)
|| is_public_room
|| user_in_allowed_restricted_room
{
return Ok(());
}
Err!(Request(Forbidden("Room is not accessible")))
match summary {
| Accessibility::Accessible(summary) => Ok(get_summary::v1::Response::new(summary)),
| Accessibility::Inaccessible => {
Err!(Request(Forbidden("You may not preview this room."), FORBIDDEN))
},
| None => {
if is_public_room || world_readable {
return Ok(());
}
Err!(Request(Forbidden("Room is not accessible")))
| Accessibility::NotFound => {
Err!(Request(Forbidden("This room does not exist."), FORBIDDEN))
},
}
}
+106 -91
View File
@@ -2,16 +2,18 @@
use axum::extract::State;
use conduwuit::{
Err, Error, Event, Result, RoomVersion, debug, err, info,
matrix::{StateKey, pdu::PduBuilder},
Err, Error, Event, Result, debug, err, info,
matrix::{StateKey, pdu::PartialPdu},
};
use futures::{FutureExt, StreamExt};
use ruma::{
CanonicalJsonObject, RoomId, RoomVersionId,
api::client::{error::ErrorKind, room::upgrade_room},
api::{client::room::upgrade_room, error::ErrorKind},
assign,
events::{
StateEventType, TimelineEventType,
room::{
create::PreviousRoom,
member::{MembershipState, RoomMemberEventContent},
power_levels::RoomPowerLevelsEventContent,
tombstone::RoomTombstoneEventContent,
@@ -19,6 +21,7 @@
space::child::{RedactedSpaceChildEventContent, SpaceChildEventContent},
},
int,
room_version_rules::RoomIdFormatVersion,
};
use serde_json::{json, value::to_raw_value};
@@ -76,7 +79,7 @@ pub(crate) async fn upgrade_room_route(
// First, check if the user has permission to upgrade the room (send tombstone
// event)
let old_room_state_lock = services.rooms.state.mutex.lock(&body.room_id).await;
let old_room_state_lock = services.rooms.state.mutex.lock(body.room_id.as_str()).await;
// Check tombstone permission by attempting to create (but not send) the event
// Note that this does internally call the policy server with a fake room ID,
@@ -85,10 +88,13 @@ pub(crate) async fn upgrade_room_route(
.rooms
.timeline
.create_hash_and_sign_event(
PduBuilder::state(StateKey::new(), &RoomTombstoneEventContent {
body: "This room has been replaced".to_owned(),
replacement_room: RoomId::new(services.globals.server_name()),
}),
PartialPdu::state(
StateKey::new(),
&RoomTombstoneEventContent::new(
String::new(),
RoomId::new_v1(services.globals.server_name()),
),
),
sender_user,
Some(&body.room_id),
&old_room_state_lock,
@@ -102,16 +108,19 @@ pub(crate) async fn upgrade_room_route(
drop(old_room_state_lock);
// Create a replacement room
let room_features = RoomVersion::new(&body.new_version)?;
let replacement_room_owned = if !room_features.room_ids_as_hashes {
Some(RoomId::new(services.globals.server_name()))
let room_version_rules = body
.new_version
.rules()
.expect("new room version should have defined rules");
let replacement_room_owned = if room_version_rules.room_id_format == RoomIdFormatVersion::V2 {
Some(RoomId::new_v1(services.globals.server_name()))
} else {
None
};
let replacement_room: Option<&RoomId> = replacement_room_owned.as_ref().map(AsRef::as_ref);
let replacement_room_tmp = match replacement_room {
| Some(v) => v,
| None => &RoomId::new(services.globals.server_name()),
| None => &RoomId::new_v1(services.globals.server_name()),
};
let _short_id = services
@@ -121,18 +130,21 @@ pub(crate) async fn upgrade_room_route(
.await;
// For pre-v12 rooms, send tombstone before creating replacement room
let tombstone_event_id = if !room_features.room_ids_as_hashes {
let state_lock = services.rooms.state.mutex.lock(&body.room_id).await;
let tombstone_event_id = if room_version_rules.room_id_format != RoomIdFormatVersion::V2 {
let state_lock = services.rooms.state.mutex.lock(body.room_id.as_str()).await;
// Send a m.room.tombstone event to the old room to indicate that it is not
// intended to be used any further
let tombstone_event_id = services
.rooms
.timeline
.build_and_append_pdu(
PduBuilder::state(StateKey::new(), &RoomTombstoneEventContent {
body: "This room has been replaced".to_owned(),
replacement_room: replacement_room.unwrap().to_owned(),
}),
PartialPdu::state(
StateKey::new(),
&RoomTombstoneEventContent::new(
"This room has been replaced".to_owned(),
replacement_room.unwrap().to_owned(),
),
),
sender_user,
Some(&body.room_id),
&state_lock,
@@ -145,7 +157,12 @@ pub(crate) async fn upgrade_room_route(
} else {
None
};
let state_lock = services.rooms.state.mutex.lock(replacement_room_tmp).await;
let state_lock = services
.rooms
.state
.mutex
.lock(replacement_room_tmp.as_str())
.await;
// Get the old room creation event
let mut create_event_content: CanonicalJsonObject = services
@@ -156,10 +173,13 @@ pub(crate) async fn upgrade_room_route(
.map_err(|_| err!(Database("Found room without m.room.create event.")))?;
// Use the m.room.tombstone event as the predecessor
let predecessor = Some(ruma::events::room::create::PreviousRoom::new(
body.room_id.clone(),
tombstone_event_id,
));
let predecessor = {
#[allow(deprecated, reason = "Clients still use event_id even though it's deprecated")]
Some(assign!(PreviousRoom::new(body.room_id.clone()), {
event_id: tombstone_event_id,
}))
};
// Send a m.room.create event containing a predecessor field and the applicable
// room_version
@@ -211,7 +231,7 @@ pub(crate) async fn upgrade_room_route(
.rooms
.timeline
.build_and_append_pdu(
PduBuilder {
PartialPdu {
event_type: TimelineEventType::RoomCreate,
content: to_raw_value(&create_event_content)
.expect("event is valid, we just created it"),
@@ -227,39 +247,35 @@ pub(crate) async fn upgrade_room_route(
.boxed()
.await?;
let create_id = create_event_id.as_str().replace('$', "!");
let (replacement_room, state_lock) = if room_features.room_ids_as_hashes {
let parsed_room_id = RoomId::parse(&create_id)?;
(Some(parsed_room_id), services.rooms.state.mutex.lock(parsed_room_id).await)
} else {
(replacement_room, state_lock)
};
let (replacement_room, state_lock) =
if room_version_rules.room_id_format == RoomIdFormatVersion::V2 {
let parsed_room_id = RoomId::parse(&create_id)?;
let lock = services
.rooms
.state
.mutex
.lock(parsed_room_id.as_str())
.await;
(Some(parsed_room_id), lock)
} else {
(replacement_room.map(ToOwned::to_owned), state_lock)
};
// Join the new room
services
.rooms
.timeline
.build_and_append_pdu(
PduBuilder {
event_type: TimelineEventType::RoomMember,
content: to_raw_value(&RoomMemberEventContent {
membership: MembershipState::Join,
PartialPdu::state(
sender_user.as_str(),
&assign!(RoomMemberEventContent::new(MembershipState::Join), {
displayname: services.users.displayname(sender_user).await.ok(),
avatar_url: services.users.avatar_url(sender_user).await.ok(),
is_direct: None,
third_party_invite: None,
blurhash: services.users.blurhash(sender_user).await.ok(),
reason: None,
join_authorized_via_users_server: None,
redact_events: None,
})
.expect("event is valid, we just created it"),
unsigned: None,
state_key: Some(sender_user.as_str().into()),
redacts: None,
timestamp: None,
},
}),
),
sender_user,
replacement_room,
replacement_room.as_deref(),
&state_lock,
)
.boxed()
@@ -306,14 +322,14 @@ pub(crate) async fn upgrade_room_route(
.rooms
.timeline
.build_and_append_pdu(
PduBuilder {
PartialPdu {
event_type: event_type.to_string().into(),
content: event_content,
state_key: Some(StateKey::from(state_key)),
..Default::default()
},
sender_user,
replacement_room,
replacement_room.as_deref(),
&state_lock,
)
.boxed()
@@ -332,27 +348,27 @@ pub(crate) async fn upgrade_room_route(
services
.rooms
.alias
.remove_alias(alias, sender_user)
.remove_alias(&alias, sender_user)
.await?;
services
.rooms
.alias
.set_alias(alias, replacement_room.unwrap(), sender_user)?;
services.rooms.alias.set_alias(
&alias,
replacement_room.as_ref().unwrap(),
sender_user,
)?;
}
// Get the old room power levels
let power_levels_event_content: RoomPowerLevelsEventContent = services
let mut power_levels = services
.rooms
.state_accessor
.room_state_get_content(&body.room_id, &StateEventType::RoomPowerLevels, "")
.await
.map_err(|_| err!(Database("Found room without m.room.power_levels event.")))?;
.get_room_power_levels(&body.room_id)
.await;
// Setting events_default and invite to the greater of 50 and users_default + 1
let new_level = max(
int!(50),
power_levels_event_content
power_levels
.users_default
.checked_add(int!(1))
.ok_or_else(|| {
@@ -360,17 +376,19 @@ pub(crate) async fn upgrade_room_route(
})?,
);
power_levels.events_default = new_level;
power_levels.invite = new_level;
// Modify the power levels in the old room to prevent sending of events and
// inviting new users
services
.rooms
.timeline
.build_and_append_pdu(
PduBuilder::state(StateKey::new(), &RoomPowerLevelsEventContent {
events_default: new_level,
invite: new_level,
..power_levels_event_content
}),
PartialPdu::state(
StateKey::new(),
&RoomPowerLevelsEventContent::try_from(power_levels).unwrap(),
),
sender_user,
Some(&body.room_id),
&state_lock,
@@ -381,18 +399,21 @@ pub(crate) async fn upgrade_room_route(
drop(state_lock);
// For v12 rooms, send tombstone AFTER creating replacement room
if room_features.room_ids_as_hashes {
let old_room_state_lock = services.rooms.state.mutex.lock(&body.room_id).await;
if room_version_rules.room_id_format == RoomIdFormatVersion::V2 {
let old_room_state_lock = services.rooms.state.mutex.lock(body.room_id.as_str()).await;
// For v12 rooms, no event reference in predecessor due to cyclic dependency -
// could best effort one maybe?
services
.rooms
.timeline
.build_and_append_pdu(
PduBuilder::state(StateKey::new(), &RoomTombstoneEventContent {
body: "This room has been replaced".to_owned(),
replacement_room: replacement_room.unwrap().to_owned(),
}),
PartialPdu::state(
StateKey::new(),
&RoomTombstoneEventContent::new(
"This room has been replaced".to_owned(),
replacement_room.as_ref().unwrap().to_owned(),
),
),
sender_user,
Some(&body.room_id),
&old_room_state_lock,
@@ -415,7 +436,7 @@ pub(crate) async fn upgrade_room_route(
.rooms
.state_accessor
.room_state_get_content::<SpaceChildEventContent>(
space_id,
&space_id,
&StateEventType::SpaceChild,
body.room_id.as_str(),
)
@@ -427,24 +448,24 @@ pub(crate) async fn upgrade_room_route(
debug!(
"Updating space {space_id} child event for room {} to {}",
&body.room_id,
replacement_room.unwrap()
replacement_room.as_ref().unwrap()
);
// First, drop the space's child event
let state_lock = services.rooms.state.mutex.lock(space_id).await;
let state_lock = services.rooms.state.mutex.lock(space_id.as_str()).await;
debug!("Removing space child event for room {} in space {space_id}", &body.room_id);
services
.rooms
.timeline
.build_and_append_pdu(
PduBuilder {
PartialPdu {
event_type: StateEventType::SpaceChild.into(),
content: to_raw_value(&RedactedSpaceChildEventContent {})
content: to_raw_value(&RedactedSpaceChildEventContent::new())
.expect("event is valid, we just created it"),
state_key: Some(body.room_id.clone().as_str().into()),
..Default::default()
},
sender_user,
Some(space_id),
Some(&space_id),
&state_lock,
)
.boxed()
@@ -453,25 +474,21 @@ pub(crate) async fn upgrade_room_route(
// Now, add a new child event for the replacement room
debug!(
"Adding space child event for room {} in space {space_id}",
replacement_room.unwrap()
replacement_room.as_ref().unwrap()
);
services
.rooms
.timeline
.build_and_append_pdu(
PduBuilder {
event_type: StateEventType::SpaceChild.into(),
content: to_raw_value(&SpaceChildEventContent {
via: vec![sender_user.server_name().to_owned()],
PartialPdu::state(
replacement_room.as_ref().unwrap().as_str(),
&assign!(SpaceChildEventContent::new(vec![sender_user.server_name().to_owned()]), {
order: child.order,
suggested: child.suggested,
})
.expect("event is valid, we just created it"),
state_key: Some(replacement_room.unwrap().as_str().into()),
..Default::default()
},
}),
),
sender_user,
Some(space_id),
Some(&space_id),
&state_lock,
)
.boxed()
@@ -480,13 +497,11 @@ pub(crate) async fn upgrade_room_route(
debug!(
"Finished updating space {space_id} child event for room {} to {}",
&body.room_id,
replacement_room.unwrap()
replacement_room.as_ref().unwrap()
);
drop(state_lock);
}
// Return the replacement room id
Ok(upgrade_room::v3::Response {
replacement_room: replacement_room.unwrap().to_owned(),
})
Ok(upgrade_room::v3::Response::new(replacement_room.as_ref().unwrap().to_owned()))
}
+26 -37
View File
@@ -8,13 +8,17 @@
utils::{IterStream, stream::ReadyExt},
};
use conduwuit_service::{Services, rooms::search::RoomQuery};
use futures::{FutureExt, StreamExt, TryFutureExt, TryStreamExt, future::OptionFuture};
use futures::{FutureExt, StreamExt, TryFutureExt, TryStreamExt};
use ruma::{
OwnedRoomId, RoomId, UInt, UserId,
api::client::search::search_events::{
self,
v3::{Criteria, EventContextResult, ResultCategories, ResultRoomEvents, SearchResult},
v3::{
Criteria, EventContextResult, ResultCategories, ResultGroupMapsByGroupingKey,
ResultRoomEvents, SearchResult,
},
},
assign,
events::AnyStateEvent,
serde::Raw,
};
@@ -41,20 +45,15 @@ pub(crate) async fn search_events_route(
) -> Result<Response> {
let sender_user = body.sender_user();
let next_batch = body.next_batch.as_deref();
let room_events_result: OptionFuture<_> = body
.search_categories
.room_events
.as_ref()
.map(|criteria| category_room_events(&services, sender_user, next_batch, criteria))
.into();
Ok(Response {
search_categories: ResultCategories {
room_events: Box::pin(room_events_result)
.await
.unwrap_or_else(|| Ok(ResultRoomEvents::default()))?,
},
})
let mut result_categories = ResultCategories::new();
if let Some(criteria) = &body.search_categories.room_events {
result_categories.room_events =
category_room_events(&services, sender_user, next_batch, criteria).await?;
}
Ok(Response::new(result_categories))
}
#[allow(clippy::map_unwrap_or)]
@@ -85,14 +84,7 @@ async fn category_room_events(
.map(IntoIterator::into_iter)
.map(IterStream::stream)
.map(StreamExt::boxed)
.unwrap_or_else(|| {
services
.rooms
.state_cache
.rooms_joined(sender_user)
.map(ToOwned::to_owned)
.boxed()
});
.unwrap_or_else(|| services.rooms.state_cache.rooms_joined(sender_user).boxed());
let results: Vec<_> = rooms
.filter_map(|room_id| async move {
@@ -129,7 +121,8 @@ async fn category_room_events(
let total: UInt = results
.iter()
.fold(0, |a: usize, (_, count, _)| a.saturating_add(*count))
.try_into()?;
.try_into()
.expect("total results should fit into a UInt");
let state: RoomStates = results
.iter()
@@ -161,16 +154,12 @@ async fn category_room_events(
pdu
})
.map(Event::into_format)
.map(|result| SearchResult {
rank: None,
result: Some(result),
context: EventContextResult {
profile_info: BTreeMap::new(), //TODO
events_after: Vec::new(), //TODO
events_before: Vec::new(), //TODO
start: None, //TODO
end: None, //TODO
},
.map(|result| {
assign!(SearchResult::new(), {
rank: None,
result: Some(result),
context: EventContextResult::default() // TODO
})
})
.collect()
.await;
@@ -186,14 +175,14 @@ async fn category_room_events(
.as_ref()
.map(ToString::to_string);
Ok(ResultRoomEvents {
Ok(assign!(ResultRoomEvents::new(), {
count: Some(total),
next_batch,
results,
state,
highlights,
groups: BTreeMap::new(), // TODO
})
groups: ResultGroupMapsByGroupingKey::default(), // TODO
}))
}
async fn procure_room_state(services: &Services, room_id: &RoomId) -> Result<RoomState> {
+9 -9
View File
@@ -2,7 +2,7 @@
use axum::extract::State;
use axum_client_ip::ClientIp;
use conduwuit::{Err, Result, err, matrix::pdu::PduBuilder, utils};
use conduwuit::{Err, Result, err, matrix::pdu::PartialPdu, utils};
use ruma::{api::client::message::send_message_event, events::MessageLikeEventType};
use serde_json::from_str;
@@ -40,7 +40,7 @@ pub(crate) async fn send_message_event_route(
return Err!(Request(Forbidden("Encryption has been disabled")));
}
let state_lock = services.rooms.state.mutex.lock(&body.room_id).await;
let state_lock = services.rooms.state.mutex.lock(body.room_id.as_str()).await;
if body.event_type == MessageLikeEventType::CallInvite
&& services.rooms.directory.is_public_room(&body.room_id).await
@@ -62,11 +62,11 @@ pub(crate) async fn send_message_event_route(
)));
}
return Ok(send_message_event::v3::Response {
event_id: utils::string_from_bytes(&response)
.map(TryInto::try_into)
.map_err(|e| err!(Database("Invalid event_id in txnid data: {e:?}")))??,
});
let event_id = utils::string_from_bytes(&response)
.map(TryInto::try_into)
.map_err(|e| err!(Database("Invalid event_id in txnid data: {e:?}")))??;
return Ok(send_message_event::v3::Response::new(event_id));
}
let mut unsigned = BTreeMap::new();
@@ -79,7 +79,7 @@ pub(crate) async fn send_message_event_route(
.rooms
.timeline
.build_and_append_pdu(
PduBuilder {
PartialPdu {
event_type: body.event_type.clone().into(),
content,
unsigned: Some(unsigned),
@@ -101,5 +101,5 @@ pub(crate) async fn send_message_event_route(
drop(state_lock);
Ok(send_message_event::v3::Response { event_id })
Ok(send_message_event::v3::Response::new(event_id))
}
+21 -22
View File
@@ -3,7 +3,7 @@
use axum::extract::State;
use axum_client_ip::ClientIp;
use conduwuit::{
Err, Error, Result, debug, err, info,
Err, Result, debug, err, info,
utils::{self, ReadyExt, hash, stream::BroadbandExt},
warn,
};
@@ -14,7 +14,6 @@
use ruma::{
OwnedUserId, UserId,
api::client::{
error::ErrorKind,
session::{
get_login_token,
get_login_types::{
@@ -27,8 +26,9 @@
},
logout, logout_all,
},
uiaa::UserIdentifier,
uiaa::{EmailUserIdentifier, MatrixUserIdentifier, UserIdentifier},
},
assign,
};
use service::uiaa::Identity;
@@ -48,9 +48,9 @@ pub(crate) async fn get_login_types_route(
Ok(get_login_types::v3::Response::new(vec![
get_login_types::v3::LoginType::Password(PasswordLoginType::default()),
get_login_types::v3::LoginType::ApplicationService(ApplicationServiceLoginType::default()),
get_login_types::v3::LoginType::Token(TokenLoginType {
get_login_types::v3::LoginType::Token(assign!(TokenLoginType::new(), {
get_login_token: services.server.config.login_via_existing_session,
}),
})),
]))
}
@@ -168,9 +168,11 @@ pub(crate) async fn handle_login(
user: Option<&String>,
) -> Result<OwnedUserId> {
debug!("Got password login type");
let user_id_or_localpart = match (identifier, user) {
| (Some(UserIdentifier::UserIdOrLocalpart(localpart)), _) => localpart,
| (Some(UserIdentifier::Email { address }), _) => {
| (Some(UserIdentifier::Matrix(MatrixUserIdentifier { user, .. })), _)
| (None, Some(user)) => user,
| (Some(UserIdentifier::Email(EmailUserIdentifier { address, .. })), _) => {
let email = Address::try_from(address.to_owned())
.map_err(|_| err!(Request(InvalidParam("Email is malformed"))))?;
@@ -180,7 +182,6 @@ pub(crate) async fn handle_login(
.await
.ok_or_else(|| err!(Request(Forbidden("Invalid identifier or password"))))?
},
| (None, Some(user)) => user,
| _ => {
return Err!(Request(InvalidParam("Identifier type not recognized")));
},
@@ -199,11 +200,11 @@ pub(crate) async fn handle_login(
if !services.globals.user_is_local(&user_id)
|| !services.globals.user_is_local(&lowercased_user_id)
{
return Err!(Request(Unknown("User ID does not belong to this homeserver")));
return Err!(Request(InvalidParam("User ID does not belong to this homeserver")));
}
if services.users.is_locked(&user_id).await? {
return Err(Error::BadRequest(ErrorKind::UserLocked, "This account has been locked."));
return Err!(Request(UserLocked("This account has been locked.")));
}
if services.users.is_login_disabled(&user_id).await {
@@ -257,7 +258,7 @@ pub(crate) async fn login_route(
user,
..
}) => handle_login(&services, identifier.as_ref(), password, user.as_ref()).await?,
| login::v3::LoginInfo::Token(login::v3::Token { token }) => {
| login::v3::LoginInfo::Token(login::v3::Token { token, .. }) => {
debug!("Got token login type");
if !services.server.config.login_via_existing_session {
return Err!(Request(Unknown("Token login is not enabled.")));
@@ -268,6 +269,7 @@ pub(crate) async fn login_route(
| login::v3::LoginInfo::ApplicationService(login::v3::ApplicationService {
identifier,
user,
..
}) => {
debug!("Got appservice login type");
@@ -276,8 +278,8 @@ pub(crate) async fn login_route(
};
let user_id =
if let Some(UserIdentifier::UserIdOrLocalpart(user_id)) = identifier {
UserId::parse_with_server_name(user_id, &services.config.server_name)
if let Some(UserIdentifier::Matrix(MatrixUserIdentifier { user, .. })) = identifier {
UserId::parse_with_server_name(user, &services.config.server_name)
} else if let Some(user) = user {
UserId::parse_with_server_name(user, &services.config.server_name)
} else {
@@ -355,15 +357,12 @@ pub(crate) async fn login_route(
info!("{user_id} logged in");
#[allow(deprecated)]
Ok(login::v3::Response {
user_id,
access_token: token,
device_id,
Ok(assign!(login::v3::Response::new(user_id, token, device_id), {
well_known: client_discovery_info,
expires_in: None,
home_server: Some(services.config.server_name.clone()),
refresh_token: None,
})
}))
}
/// # `POST /_matrix/client/v1/login/get_token`
@@ -393,10 +392,10 @@ pub(crate) async fn login_token_route(
let login_token = utils::random_string(TOKEN_LENGTH);
let expires_in = services.users.create_login_token(sender_user, &login_token);
Ok(get_login_token::v1::Response {
expires_in: Duration::from_millis(expires_in),
Ok(get_login_token::v1::Response::new(
Duration::from_millis(expires_in),
login_token,
})
))
}
/// # `POST /_matrix/client/v3/logout`
@@ -464,7 +463,7 @@ pub(crate) async fn logout_all_route(
services
.users
.all_device_ids(sender_user)
.for_each(|device_id| services.users.remove_device(sender_user, device_id))
.for_each(async |device_id| services.users.remove_device(sender_user, &device_id).await)
.await;
services
.pusher
+28 -176
View File
@@ -1,26 +1,12 @@
use std::{
collections::{BTreeSet, VecDeque},
str::FromStr,
};
use axum::extract::State;
use conduwuit::{
Err, Result,
utils::{future::TryExtExt, stream::IterStream},
};
use conduwuit_service::{
Services,
rooms::spaces::{
PaginationToken, SummaryAccessibility, get_parent_children_via, summary_to_chunk,
},
};
use futures::{StreamExt, TryFutureExt, future::OptionFuture};
use ruma::{
OwnedRoomId, OwnedServerName, RoomId, UInt, UserId, api::client::space::get_hierarchy,
};
use conduwuit::{Err, Result};
use ruma::{UInt, api::client::space::get_hierarchy, assign};
use service::rooms::summary::Accessibility;
use crate::Ruma;
const MAX_MAX_DEPTH: u32 = 10;
/// # `GET /_matrix/client/v1/rooms/{room_id}/hierarchy`
///
/// Paginates over the space tree in a depth-first manner to locate child rooms
@@ -29,167 +15,33 @@ pub(crate) async fn get_hierarchy_route(
State(services): State<crate::State>,
body: Ruma<get_hierarchy::v1::Request>,
) -> Result<get_hierarchy::v1::Response> {
let limit = body
.limit
.unwrap_or_else(|| UInt::from(10_u32))
.min(UInt::from(100_u32));
// We don't do pagination for this route (and therefore ignore `limit`), since
// there's no reasonable way to handle a space hierarchy changing during
// pagination.
let max_depth = body
.max_depth
.unwrap_or_else(|| UInt::from(3_u32))
.min(UInt::from(10_u32));
.map(|max_depth| max_depth.min(UInt::from(MAX_MAX_DEPTH)));
let key = body
.from
.as_ref()
.and_then(|s| PaginationToken::from_str(s).ok());
let hierarchy = services
.rooms
.summary
.get_room_hierarchy_for_user(
body.sender_user(),
body.room_id.clone(),
max_depth,
body.suggested_only,
)
.await?;
// Should prevent unexpected behaviour in (bad) clients
if let Some(ref token) = key {
if token.suggested_only != body.suggested_only || token.max_depth != max_depth {
return Err!(Request(InvalidParam(
"suggested_only and max_depth cannot change on paginated requests"
)));
}
match hierarchy {
| Accessibility::Accessible(rooms) =>
Ok(assign!(get_hierarchy::v1::Response::new(), { rooms: rooms })),
| Accessibility::Inaccessible => {
Err!(Request(Forbidden("You may not preview this room."), FORBIDDEN))
},
| Accessibility::NotFound => {
Err!(Request(Forbidden("This room does not exist."), FORBIDDEN))
},
}
get_client_hierarchy(
&services,
body.sender_user(),
&body.room_id,
limit.try_into().unwrap_or(10),
max_depth.try_into().unwrap_or(usize::MAX),
body.suggested_only,
key.as_ref()
.into_iter()
.flat_map(|t| t.short_room_ids.iter()),
)
.await
}
async fn get_client_hierarchy<'a, ShortRoomIds>(
services: &Services,
sender_user: &UserId,
room_id: &RoomId,
limit: usize,
max_depth: usize,
suggested_only: bool,
short_room_ids: ShortRoomIds,
) -> Result<get_hierarchy::v1::Response>
where
ShortRoomIds: Iterator<Item = &'a u64> + Clone + Send + Sync + 'a,
{
type Via = Vec<OwnedServerName>;
type Entry = (OwnedRoomId, Via);
type Rooms = VecDeque<Entry>;
let mut queue: Rooms = [(
room_id.to_owned(),
room_id
.server_name()
.map(ToOwned::to_owned)
.into_iter()
.collect(),
)]
.into();
let mut rooms = Vec::with_capacity(limit);
let mut parents = BTreeSet::new();
while let Some((current_room, via)) = queue.pop_front() {
let summary = services
.rooms
.spaces
.get_summary_and_children_client(&current_room, suggested_only, sender_user, &via)
.await?;
match (summary, current_room == room_id) {
| (None | Some(SummaryAccessibility::Inaccessible), false) => {
// Just ignore other unavailable rooms
},
| (None, true) => {
return Err!(Request(Forbidden("The requested room was not found")));
},
| (Some(SummaryAccessibility::Inaccessible), true) => {
return Err!(Request(Forbidden("The requested room is inaccessible")));
},
| (Some(SummaryAccessibility::Accessible(summary)), _) => {
let populate = parents.len() >= short_room_ids.clone().count();
let mut children: Vec<Entry> = get_parent_children_via(&summary, suggested_only)
.filter(|(room, _)| !parents.contains(room))
.rev()
.map(|(key, val)| (key, val.collect()))
.collect();
if populate {
rooms.push(summary_to_chunk(summary.clone()));
} else {
children = children
.iter()
.rev()
.stream()
.skip_while(|(room, _)| {
services
.rooms
.short
.get_shortroomid(room)
.map_ok(|short| {
Some(&short) != short_room_ids.clone().nth(parents.len())
})
.unwrap_or_else(|_| false)
})
.map(Clone::clone)
.collect::<Vec<Entry>>()
.await
.into_iter()
.rev()
.collect();
}
if !populate && queue.is_empty() && children.is_empty() {
break;
}
parents.insert(current_room.clone());
if rooms.len() >= limit {
break;
}
if parents.len() > max_depth {
continue;
}
queue.extend(children);
},
}
}
let next_batch: OptionFuture<_> = queue
.pop_front()
.map(|(room, _)| async move {
parents.insert(room);
let next_short_room_ids: Vec<_> = parents
.iter()
.stream()
.filter_map(|room_id| services.rooms.short.get_shortroomid(room_id).ok())
.collect()
.await;
(next_short_room_ids.iter().ne(short_room_ids) && !next_short_room_ids.is_empty())
.then_some(PaginationToken {
short_room_ids: next_short_room_ids,
limit: limit.try_into().ok()?,
max_depth: max_depth.try_into().ok()?,
suggested_only,
})
.as_ref()
.map(PaginationToken::to_string)
})
.into();
Ok(get_hierarchy::v1::Response {
next_batch: next_batch.await.flatten(),
rooms,
})
}
+126 -119
View File
@@ -4,14 +4,16 @@
use axum_client_ip::ClientIp;
use conduwuit::{
Err, Result, err,
matrix::{Event, pdu::PduBuilder},
utils::BoolExt,
matrix::{Event, pdu::PartialPdu},
};
use conduwuit_service::Services;
use futures::{FutureExt, TryStreamExt};
use ruma::{
MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, UserId,
api::client::state::{get_state_events, get_state_events_for_key, send_state_event},
api::client::state::{
get_state_event_for_key::{self, v3::StateEventFormat},
get_state_events, send_state_event,
},
events::{
AnyStateEventContent, StateEventType,
room::{
@@ -24,7 +26,7 @@
},
serde::Raw,
};
use serde_json::json;
use serde_json::{json, value::to_raw_value};
use crate::{Ruma, RumaResponse};
@@ -46,23 +48,23 @@ pub(crate) async fn send_state_event_for_key_route(
return Err!(Request(UserSuspended("You cannot perform this action while suspended.")));
}
Ok(send_state_event::v3::Response {
event_id: send_state_event_for_key_helper(
&services,
sender_user,
&body.room_id,
&body.event_type,
&body.body.body,
&body.state_key,
if body.appservice_info.is_some() {
body.timestamp
} else {
None
},
)
.boxed()
.await?,
})
let event_id = send_state_event_for_key_helper(
&services,
sender_user,
&body.room_id,
&body.event_type,
&body.body.body,
&body.state_key,
if body.appservice_info.is_some() {
body.timestamp
} else {
None
},
)
.boxed()
.await?;
Ok(send_state_event::v3::Response::new(event_id))
}
/// # `PUT /_matrix/client/*/rooms/{roomId}/state/{eventType}`
@@ -100,15 +102,15 @@ pub(crate) async fn get_state_events_route(
return Err!(Request(Forbidden("You don't have permission to view the room state.")));
}
Ok(get_state_events::v3::Response {
room_state: services
.rooms
.state_accessor
.room_state_full_pdus(&body.room_id)
.map_ok(Event::into_format)
.try_collect()
.await?,
})
let room_state = services
.rooms
.state_accessor
.room_state_full_pdus(&body.room_id)
.map_ok(Event::into_format)
.try_collect()
.await?;
Ok(get_state_events::v3::Response::new(room_state))
}
/// # `GET /_matrix/client/v3/rooms/{roomid}/state/{eventType}/{stateKey}`
@@ -119,10 +121,10 @@ pub(crate) async fn get_state_events_route(
///
/// - If not joined: Only works if current room history visibility is world
/// readable
pub(crate) async fn get_state_events_for_key_route(
pub(crate) async fn get_state_event_for_key_route(
State(services): State<crate::State>,
body: Ruma<get_state_events_for_key::v3::Request>,
) -> Result<get_state_events_for_key::v3::Response> {
body: Ruma<get_state_event_for_key::v3::Request>,
) -> Result<get_state_event_for_key::v3::Response> {
let sender_user = body.sender_user();
if !services
@@ -149,26 +151,23 @@ pub(crate) async fn get_state_events_for_key_route(
))))
})?;
let event_format = body
.format
.as_ref()
.is_some_and(|f| f.to_lowercase().eq("event"));
let content = match body.format {
| StateEventFormat::Content => event.content,
| StateEventFormat::Event => to_raw_value(&json!({
"content": event.content(),
"event_id": event.event_id(),
"origin_server_ts": event.origin_server_ts(),
"room_id": event.room_id_or_hash(),
"sender": event.sender(),
"state_key": event.state_key(),
"type": event.kind(),
"unsigned": event.unsigned(),
}))
.unwrap(),
| _ => return Err!(Request(InvalidParam("Unknown response format"))),
};
Ok(get_state_events_for_key::v3::Response {
content: event_format.or(|| event.get_content_as_value()),
event: event_format.then(|| {
json!({
"content": event.content(),
"event_id": event.event_id(),
"origin_server_ts": event.origin_server_ts(),
"room_id": event.room_id_or_hash(),
"sender": event.sender(),
"state_key": event.state_key(),
"type": event.kind(),
"unsigned": event.unsigned(),
})
}),
})
Ok(get_state_event_for_key::v3::Response::new(content))
}
/// # `GET /_matrix/client/v3/rooms/{roomid}/state/{eventType}`
@@ -181,9 +180,9 @@ pub(crate) async fn get_state_events_for_key_route(
/// readable
pub(crate) async fn get_state_events_for_empty_key_route(
State(services): State<crate::State>,
body: Ruma<get_state_events_for_key::v3::Request>,
) -> Result<RumaResponse<get_state_events_for_key::v3::Response>> {
get_state_events_for_key_route(State(services), body)
body: Ruma<get_state_event_for_key::v3::Request>,
) -> Result<RumaResponse<get_state_event_for_key::v3::Response>> {
get_state_event_for_key_route(State(services), body)
.await
.map(RumaResponse)
}
@@ -204,7 +203,7 @@ async fn send_state_event_for_key_helper(
.rooms
.timeline
.build_and_append_pdu(
PduBuilder {
PartialPdu {
event_type: event_type.to_string().into(),
content: serde_json::from_str(json.json().get())?,
state_key: Some(state_key.into()),
@@ -237,9 +236,16 @@ async fn allowed_to_send_state_event(
| StateEventType::RoomServerAcl => {
// prevents common ACL paw-guns as ACL management is difficult and prone to
// irreversible mistakes
match json.deserialize_as::<RoomServerAclEventContent>() {
match json.deserialize_as_unchecked::<RoomServerAclEventContent>() {
| Ok(acl_content) => {
if acl_content.allow_is_empty() {
let allow_has_wildcard = acl_content.allow.iter().any(|entry| entry == "*");
let deny_has_wildcard = acl_content.deny.iter().any(|entry| entry == "*");
let allow_has_server = acl_content
.allow
.iter()
.any(|entry| entry == services.globals.server_name().as_str());
if acl_content.allow.is_empty() {
return Err!(Request(BadJson(debug_warn!(
%room_id,
"Sending an ACL event with an empty allow key will permanently \
@@ -248,7 +254,7 @@ async fn allowed_to_send_state_event(
))));
}
if acl_content.deny_contains("*") && acl_content.allow_contains("*") {
if allow_has_wildcard && deny_has_wildcard {
return Err!(Request(BadJson(debug_warn!(
%room_id,
"Sending an ACL event with a deny and allow key value of \"*\" will \
@@ -257,9 +263,9 @@ async fn allowed_to_send_state_event(
))));
}
if acl_content.deny_contains("*")
if deny_has_wildcard
&& !acl_content.is_allowed(services.globals.server_name())
&& !acl_content.allow_contains(services.globals.server_name().as_str())
&& !allow_has_server
{
return Err!(Request(BadJson(debug_warn!(
%room_id,
@@ -269,9 +275,9 @@ async fn allowed_to_send_state_event(
))));
}
if !acl_content.allow_contains("*")
if !allow_has_wildcard
&& !acl_content.is_allowed(services.globals.server_name())
&& !acl_content.allow_contains(services.globals.server_name().as_str())
&& !allow_has_server
{
return Err!(Request(BadJson(debug_warn!(
%room_id,
@@ -297,7 +303,7 @@ async fn allowed_to_send_state_event(
// admin room is a sensitive room, it should not ever be made public
if let Ok(admin_room_id) = services.admin.get_admin_room().await {
if admin_room_id == room_id {
match json.deserialize_as::<RoomJoinRulesEventContent>() {
match json.deserialize_as_unchecked::<RoomJoinRulesEventContent>() {
| Ok(join_rule) =>
if join_rule.join_rule == JoinRule::Public {
return Err!(Request(Forbidden(
@@ -316,7 +322,7 @@ async fn allowed_to_send_state_event(
| StateEventType::RoomHistoryVisibility => {
// admin room is a sensitive room, it should not ever be made world readable
if let Ok(admin_room_id) = services.admin.get_admin_room().await {
match json.deserialize_as::<RoomHistoryVisibilityEventContent>() {
match json.deserialize_as_unchecked::<RoomHistoryVisibilityEventContent>() {
| Ok(visibility_content) => {
if admin_room_id == room_id
&& visibility_content.history_visibility
@@ -337,7 +343,7 @@ async fn allowed_to_send_state_event(
}
},
| StateEventType::RoomCanonicalAlias => {
match json.deserialize_as::<RoomCanonicalAliasEventContent>() {
match json.deserialize_as_unchecked::<RoomCanonicalAliasEventContent>() {
| Ok(canonical_alias_content) => {
let mut aliases = canonical_alias_content.alt_aliases.clone();
@@ -369,65 +375,66 @@ async fn allowed_to_send_state_event(
},
}
},
| StateEventType::RoomMember => match json.deserialize_as::<RoomMemberEventContent>() {
| Ok(mut membership_content) => {
let Ok(state_key) = UserId::parse(state_key) else {
return Err!(Request(BadJson(
"Membership event has invalid or non-existent state key"
)));
};
if let Some(authorising_user) =
membership_content.join_authorized_via_users_server
{
// join_authorized_via_users_server must be thrown away, if user is already a
// member of the room.
if services
.rooms
.state_cache
.is_joined(state_key, room_id)
.await
{
membership_content.join_authorized_via_users_server = None;
*json = Raw::<AnyStateEventContent>::from_json_string(
serde_json::to_string(&membership_content)?,
)?;
return Ok(());
}
if membership_content.membership != MembershipState::Join {
| StateEventType::RoomMember =>
match json.deserialize_as_unchecked::<RoomMemberEventContent>() {
| Ok(mut membership_content) => {
let Ok(state_key) = UserId::parse(state_key) else {
return Err!(Request(BadJson(
"join_authorised_via_users_server is only for member joins"
"Membership event has invalid or non-existent state key"
)));
}
};
if !services.globals.user_is_local(&authorising_user) {
return Err!(Request(InvalidParam(
"Authorising user {authorising_user} does not belong to this \
homeserver"
)));
}
if !services
.rooms
.state_cache
.is_joined(&authorising_user, room_id)
.await
if let Some(authorising_user) =
membership_content.join_authorized_via_users_server
{
return Err!(Request(InvalidParam(
"Authorising user {authorising_user} is not in the room, they \
cannot authorise the join."
)));
// join_authorized_via_users_server must be thrown away, if user is
// already a member of the room.
if services
.rooms
.state_cache
.is_joined(&state_key, room_id)
.await
{
membership_content.join_authorized_via_users_server = None;
*json = Raw::<AnyStateEventContent>::from_json_string(
serde_json::to_string(&membership_content)?,
)?;
return Ok(());
}
if membership_content.membership != MembershipState::Join {
return Err!(Request(BadJson(
"join_authorised_via_users_server is only for member joins"
)));
}
if !services.globals.user_is_local(&authorising_user) {
return Err!(Request(InvalidParam(
"Authorising user {authorising_user} does not belong to this \
homeserver"
)));
}
if !services
.rooms
.state_cache
.is_joined(&authorising_user, room_id)
.await
{
return Err!(Request(InvalidParam(
"Authorising user {authorising_user} is not in the room, they \
cannot authorise the join."
)));
}
}
}
},
| Err(e) => {
return Err!(Request(BadJson(
"Membership content must have a valid JSON body with at least a valid \
membership state: {e}"
)));
},
},
| Err(e) => {
return Err!(Request(BadJson(
"Membership content must have a valid JSON body with at least a valid \
membership state: {e}"
)));
},
},
| _ => (),
}
+1 -1
View File
@@ -14,7 +14,7 @@ fn test_strip_room_member() -> Result<()> {
let json: &mut Raw<AnyStateEventContent> =
&mut Raw::<AnyStateEventContent>::from_json_string(body.to_owned())?;
let mut membership_content: RoomMemberEventContent =
json.deserialize_as::<RoomMemberEventContent>()?;
json.deserialize_as_unchecked::<RoomMemberEventContent>()?;
//Begin Test
membership_content.join_authorized_via_users_server = None;
+1 -2
View File
@@ -152,8 +152,7 @@ async fn share_encrypted_room(
.rooms
.state_cache
.get_shared_rooms(sender_user, user_id)
.ready_filter(|&room_id| Some(room_id) != ignore_room)
.map(ToOwned::to_owned)
.ready_filter(|room_id| Some(room_id.as_ref()) != ignore_room)
.broad_any(|other_room_id| async move {
services
.rooms
+20 -27
View File
@@ -23,17 +23,21 @@
OwnedRoomId, OwnedUserId, RoomId, UserId,
api::client::sync::sync_events::{
UnreadNotificationsCount,
v3::{Ephemeral, JoinedRoom, RoomAccountData, RoomSummary, State as RoomState, Timeline},
v3::{
Ephemeral, JoinedRoom, RoomAccountData, RoomSummary, State as RoomState, StateEvents,
Timeline,
},
},
assign,
events::{
AnyRawAccountDataEvent, StateEventType,
StateEventType,
TimelineEventType::*,
room::member::{MembershipState, RoomMemberEventContent},
},
serde::Raw,
uint,
};
use service::rooms::short::ShortStateHash;
use service::{account_data::AnyRawAccountDataEvent, rooms::short::ShortStateHash};
use super::{load_timeline, share_encrypted_room};
use crate::client::{
@@ -92,17 +96,15 @@ pub(super) async fn load_joined_room(
);
}
let joined_room = JoinedRoom {
let joined_room = assign!(JoinedRoom::new(), {
account_data,
summary: summary.unwrap_or_default(),
unread_notifications: notification_counts.unwrap_or_default(),
timeline,
state: RoomState {
events: state_events.into_iter().map(Event::into_format).collect(),
},
state: RoomState::Before(StateEvents::with_events(state_events.into_iter().map(Event::into_format).collect())),
ephemeral,
unread_thread_notifications: BTreeMap::new(),
};
});
Ok((joined_room, device_list_updates))
}
@@ -126,7 +128,7 @@ async fn build_account_data(
.collect()
.await;
Ok(RoomAccountData { events: account_data_changes })
Ok(assign!(RoomAccountData::new(), { events: account_data_changes }))
}
/// Collect new ephemeral events.
@@ -233,7 +235,7 @@ async fn build_ephemeral(
edus.extend(typing_event);
edus.extend(private_read_event);
Ok(Ephemeral { events: edus })
Ok(assign!(Ephemeral::new(), { events: edus }))
}
/// A struct to hold the state events, timeline, and other data which is
@@ -318,11 +320,11 @@ async fn build_state_and_timeline(
Ok(StateAndTimeline {
state_events,
timeline: Timeline {
timeline: assign!(Timeline::new(), {
limited,
prev_batch: prev_batch.as_ref().map(ToString::to_string),
events: filtered_timeline,
},
}),
summary,
notification_counts,
device_list_updates,
@@ -580,10 +582,10 @@ async fn build_notification_counts(
trace!(%notification_count, %highlight_count, "syncing new notification counts");
Ok(Some(UnreadNotificationsCount {
Ok(Some(assign!(UnreadNotificationsCount::new(), {
notification_count: Some(notification_count),
highlight_count: Some(highlight_count),
}))
})))
} else {
Ok(None)
}
@@ -698,13 +700,13 @@ async fn build_room_summary(
"syncing updated summary"
);
Ok(Some(RoomSummary {
Ok(Some(assign!(RoomSummary::new(), {
heroes: heroes
.map(|heroes| heroes.into_iter().collect())
.unwrap_or_default(),
joined_member_count: Some(ruma_from_u64(joined_member_count)),
invited_member_count: Some(ruma_from_u64(invited_member_count)),
}))
})))
}
/// Fetch the user IDs to include in the `m.heroes` property of the room
@@ -718,18 +720,10 @@ async fn build_heroes(
const MAX_HERO_COUNT: usize = 5;
// fetch joined members from the state cache first
let joined_members_stream = services
.rooms
.state_cache
.room_members(room_id)
.map(ToOwned::to_owned);
let joined_members_stream = services.rooms.state_cache.room_members(room_id);
// then fetch invited members
let invited_members_stream = services
.rooms
.state_cache
.room_members_invited(room_id)
.map(ToOwned::to_owned);
let invited_members_stream = services.rooms.state_cache.room_members_invited(room_id);
// then as a last resort fetch every membership event
let all_members_stream = services
@@ -796,7 +790,6 @@ async fn build_device_list_updates(
.users
.room_keys_changed(room_id, last_sync_end_count, Some(current_count))
.map(at!(0))
.map(ToOwned::to_owned)
.ready_for_each(|user_id| {
device_list_updates.changed.insert(user_id);
})
+11 -10
View File
@@ -7,7 +7,10 @@
use futures::{StreamExt, future::join};
use ruma::{
EventId, OwnedRoomId, RoomId,
api::client::sync::sync_events::v3::{LeftRoom, RoomAccountData, State, Timeline},
api::client::sync::sync_events::v3::{
LeftRoom, RoomAccountData, State, StateEvents, Timeline,
},
assign,
events::{StateEventType, TimelineEventType},
uint,
};
@@ -178,17 +181,15 @@ pub(super) async fn load_left_room(
.collect::<Vec<_>>()
.await;
Ok(Some(LeftRoom {
account_data: RoomAccountData { events: Vec::new() },
timeline: Timeline {
Ok(Some(assign!(LeftRoom::new(), {
account_data: RoomAccountData::new(),
timeline: assign!(Timeline::new(), {
limited: timeline.limited,
prev_batch: Some(current_count.to_string()),
events: raw_timeline_pdus,
},
state: State {
events: state_events.into_iter().map(Event::into_format).collect(),
},
}))
}),
state: State::Before(StateEvents::with_events(state_events.into_iter().map(Event::into_format).collect())),
})))
}
async fn build_left_state_and_timeline(
@@ -317,7 +318,7 @@ fn create_dummy_leave_event(
// clients. perhaps a database table could be created to hold these dummy
// events, or they could be stored as outliers?
PduEvent {
event_id: EventId::new(services.globals.server_name()),
event_id: EventId::new_v1(services.globals.server_name()),
sender: syncing_user.to_owned(),
origin: None,
origin_server_ts: utils::millis_since_unix_epoch()
+64 -52
View File
@@ -11,7 +11,7 @@
use axum::extract::State;
use axum_client_ip::ClientIp;
use conduwuit::{
Result, at, extract_variant,
Err, Result, at, extract_variant,
utils::{
ReadyExt, TryFutureExtExt,
stream::{BroadbandExt, Tools, WidebandExt},
@@ -19,10 +19,7 @@
warn,
};
use conduwuit_service::Services;
use futures::{
FutureExt, StreamExt, TryFutureExt,
future::{OptionFuture, join3, join4, join5},
};
use futures::{FutureExt, StreamExt, TryFutureExt, future::OptionFuture};
use ruma::{
DeviceId, OwnedUserId, RoomId, UserId,
api::client::{
@@ -34,19 +31,19 @@
Presence, Rooms, ToDevice,
},
},
uiaa::UiaaResponse,
},
events::{
AnyRawAccountDataEvent,
presence::{PresenceEvent, PresenceEventContent},
},
assign,
events::presence::{PresenceEvent, PresenceEventContent},
serde::Raw,
};
use service::rooms::lazy_loading::{self, MemberSet, Options as _};
use service::{
account_data::AnyRawAccountDataEvent,
rooms::lazy_loading::{self, MemberSet, Options as _},
};
use super::{load_timeline, share_encrypted_room};
use crate::{
Ruma, RumaResponse,
Ruma,
client::{
is_ignored_invite,
sync::v3::{joined::load_joined_room, left::load_left_room},
@@ -82,10 +79,10 @@ fn is_empty(&self) -> bool { self.changed.is_empty() && self.left.is_empty() }
impl From<DeviceListUpdates> for DeviceLists {
fn from(val: DeviceListUpdates) -> Self {
Self {
assign!(Self::new(), {
changed: val.changed.into_iter().collect(),
left: val.left.into_iter().collect(),
}
})
}
}
@@ -183,7 +180,7 @@ pub(crate) async fn sync_events_route(
State(services): State<crate::State>,
ClientIp(client_ip): ClientIp,
body: Ruma<sync_events::v3::Request>,
) -> Result<sync_events::v3::Response, RumaResponse<UiaaResponse>> {
) -> Result<sync_events::v3::Response> {
let (sender_user, sender_device) = body.sender();
// Presence update
@@ -227,7 +224,7 @@ pub(crate) async fn sync_events_route(
pub(crate) async fn build_sync_events(
services: &Services,
body: &Ruma<sync_events::v3::Request>,
) -> Result<sync_events::v3::Response, RumaResponse<UiaaResponse>> {
) -> Result<sync_events::v3::Response> {
let (syncing_user, syncing_device) = body.sender();
let current_count = services.globals.current_count()?;
@@ -253,6 +250,8 @@ pub(crate) async fn build_sync_events(
.get_filter(syncing_user, filter_id)
.await
.unwrap_or_default(),
// error out for unknown filter types
| _ => return Err!(Request(InvalidParam("Unknown filter type"))),
});
let context = SyncContext {
@@ -268,7 +267,6 @@ pub(crate) async fn build_sync_events(
.rooms
.state_cache
.rooms_joined(syncing_user)
.map(ToOwned::to_owned)
.broad_filter_map(|room_id| async {
let joined_room = load_joined_room(services, context, room_id.clone()).await;
@@ -332,9 +330,9 @@ pub(crate) async fn build_sync_events(
// only sync this invite if it was sent after the last /sync call
if last_sync_end_count < invite_count {
let invited_room = InvitedRoom {
invite_state: InviteState { events: invite_state },
};
let invited_room = assign!(InvitedRoom::new(), {
invite_state: InviteState::from(invite_state),
});
invited_rooms.insert(room_id, invited_room);
}
@@ -355,9 +353,9 @@ pub(crate) async fn build_sync_events(
// only sync this knock if it was sent after the last /sync call
if last_sync_end_count < knock_count {
let knocked_room = KnockedRoom {
knock_state: KnockState { events: knock_state },
};
let knocked_room = assign!(KnockedRoom::new(), {
knock_state: assign!(KnockState::new(), { events: knock_state }),
});
knocked_rooms.insert(room_id, knocked_room);
}
@@ -376,13 +374,6 @@ pub(crate) async fn build_sync_events(
.ready_filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Global))
.collect();
// Look for device list updates of this account
let keys_changed = services
.users
.keys_changed(syncing_user, last_sync_end_count, Some(current_count))
.map(ToOwned::to_owned)
.collect::<HashSet<_>>();
let to_device_events = services
.users
.get_to_device_events(
@@ -394,36 +385,57 @@ pub(crate) async fn build_sync_events(
.map(at!(1))
.collect::<Vec<_>>();
// Look for device list updates of this account
let keys_changed = services
.users
.keys_changed(syncing_user, last_sync_end_count, Some(current_count))
.collect::<HashSet<_>>();
let device_one_time_keys_count = services
.users
.count_one_time_keys(syncing_user, syncing_device);
// Remove all to-device events the device received *last time*
let remove_to_device_events =
services
.users
.remove_to_device_events(syncing_user, syncing_device, last_sync_end_count);
let (
(joined_rooms, mut device_list_updates),
left_rooms,
invited_rooms,
knocked_rooms,
presence_updates,
account_data,
to_device_events,
keys_changed,
device_one_time_keys_count,
) = async {
futures::join!(
joined_rooms,
left_rooms,
invited_rooms,
knocked_rooms,
presence_updates,
account_data,
to_device_events,
keys_changed,
device_one_time_keys_count
)
}
.boxed()
.await;
let rooms = join4(joined_rooms, left_rooms, invited_rooms, knocked_rooms);
let ephemeral = join3(remove_to_device_events, to_device_events, presence_updates);
let top = join5(account_data, ephemeral, device_one_time_keys_count, keys_changed, rooms)
.boxed()
// Remove all to-device events the device received *last time*
services
.users
.remove_to_device_events(syncing_user, syncing_device, last_sync_end_count)
.await;
let (account_data, ephemeral, device_one_time_keys_count, keys_changed, rooms) = top;
let ((), to_device_events, presence_updates) = ephemeral;
let (joined_rooms, left_rooms, invited_rooms, knocked_rooms) = rooms;
let (joined_rooms, mut device_list_updates) = joined_rooms;
device_list_updates.changed.extend(keys_changed);
let response = sync_events::v3::Response {
account_data: GlobalAccountData { events: account_data },
let response = assign!(sync_events::v3::Response::new(current_count.to_string()), {
account_data: assign!(GlobalAccountData::new(), { events: account_data }),
device_lists: device_list_updates.into(),
device_one_time_keys_count,
// Fallback keys are not yet supported
device_unused_fallback_key_types: None,
next_batch: current_count.to_string(),
presence: Presence {
presence: assign!(Presence::new(), {
events: presence_updates
.into_iter()
.flat_map(IntoIterator::into_iter)
@@ -431,15 +443,15 @@ pub(crate) async fn build_sync_events(
.map(|ref event| Raw::new(event))
.filter_map(Result::ok)
.collect(),
},
rooms: Rooms {
}),
rooms: assign!(Rooms::new(), {
leave: left_rooms,
join: joined_rooms,
invite: invited_rooms,
knock: knocked_rooms,
},
to_device: ToDevice { events: to_device_events },
};
}),
to_device: assign!(ToDevice::new(), { events: to_device_events }),
});
Ok(response)
}
+115 -111
View File
@@ -27,17 +27,20 @@
};
use ruma::{
DeviceId, OwnedEventId, OwnedRoomId, RoomId, UInt, UserId,
api::client::sync::sync_events::{self, DeviceLists, UnreadNotificationsCount},
api::client::sync::sync_events::{
self, DeviceLists, UnreadNotificationsCount, v5::request::ExtensionRoomConfig,
},
assign,
directory::RoomTypeFilter,
events::{
AnyRawAccountDataEvent, AnySyncEphemeralRoomEvent, AnySyncStateEvent, StateEventType,
TimelineEventType,
AnySyncEphemeralRoomEvent, AnySyncStateEvent, StateEventType, TimelineEventType,
room::member::{MembershipState, RoomMemberEventContent},
typing::TypingEventContent,
typing::{SyncTypingEvent, TypingEventContent},
},
serde::Raw,
uint,
};
use service::account_data::AnyRawAccountDataEvent;
use super::share_encrypted_room;
use crate::{
@@ -67,8 +70,8 @@ pub(crate) async fn sync_events_v5_route(
body: Ruma<sync_events::v5::Request>,
) -> Result<sync_events::v5::Response> {
debug_assert!(DEFAULT_BUMP_TYPES.is_sorted(), "DEFAULT_BUMP_TYPES is not sorted");
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
let ref sender_user = body.sender_user().to_owned();
let ref sender_device = body.sender_device().to_owned();
services
.users
@@ -90,7 +93,7 @@ pub(crate) async fn sync_events_v5_route(
.and_then(|string| string.parse().ok())
.unwrap_or(0);
let snake_key = into_snake_key(sender_user, sender_device, conn_id);
let snake_key = into_snake_key(sender_user.as_ref(), sender_device.as_str(), conn_id);
if globalsince != 0 && !services.sync.snake_connection_cached(&snake_key) {
return Err!(Request(UnknownPos(
@@ -112,7 +115,6 @@ pub(crate) async fn sync_events_v5_route(
.rooms
.state_cache
.rooms_joined(sender_user)
.map(ToOwned::to_owned)
.collect::<Vec<OwnedRoomId>>();
let all_invited_rooms = services
@@ -164,21 +166,20 @@ pub(crate) async fn sync_events_v5_route(
let (account_data, e2ee, to_device, receipts) =
try_join4(account_data, e2ee, to_device, receipts).await?;
let extensions = sync_events::v5::response::Extensions {
let extensions = assign!(sync_events::v5::response::Extensions::default(), {
account_data,
e2ee,
to_device,
receipts,
typing: sync_events::v5::response::Typing::default(),
};
});
let mut response = sync_events::v5::Response {
let mut response = assign!(sync_events::v5::Response::new(pos), {
txn_id: body.txn_id.clone(),
pos,
lists: BTreeMap::new(),
rooms: BTreeMap::new(),
extensions,
};
});
handle_lists(
services,
@@ -379,11 +380,12 @@ async fn handle_lists<'a, Rooms, AllRooms>(
);
}
}
response
.lists
.insert(list_id.clone(), sync_events::v5::response::List {
response.lists.insert(
list_id.clone(),
assign!(sync_events::v5::response::List::default(), {
count: ruma_from_usize(active_rooms.len()),
});
}),
);
if let Some(conn_id) = body.conn_id.clone() {
let snake_key = into_snake_key(sender_user, sender_device, conn_id);
@@ -563,17 +565,19 @@ async fn process_rooms<'a, Rooms>(
.state_cache
.room_members(room_id)
.ready_filter(|member| *member != sender_user)
.filter_map(|user_id| {
.filter_map(async |user_id| {
services
.rooms
.state_accessor
.get_member(room_id, user_id)
.map_ok(|memberevent| sync_events::v5::response::Hero {
user_id: user_id.into(),
name: memberevent.displayname,
avatar: memberevent.avatar_url,
.get_member(room_id, &user_id)
.map_ok(|member_event| {
assign!(sync_events::v5::response::Hero::new(user_id.clone()), {
name: member_event.displayname,
avatar: member_event.avatar_url,
})
})
.ok()
.await
})
.take(5)
.collect()
@@ -609,73 +613,76 @@ async fn process_rooms<'a, Rooms>(
None
};
rooms.insert(room_id.clone(), sync_events::v5::response::Room {
name: services
.rooms
.state_accessor
.get_name(room_id)
.await
.ok()
.or(name),
avatar: match heroes_avatar {
| Some(heroes_avatar) => ruma::JsOption::Some(heroes_avatar),
| _ => match services.rooms.state_accessor.get_avatar(room_id).await {
| ruma::JsOption::Some(avatar) => ruma::JsOption::from_option(avatar.url),
| ruma::JsOption::Null => ruma::JsOption::Null,
| ruma::JsOption::Undefined => ruma::JsOption::Undefined,
rooms.insert(
room_id.clone(),
assign!(sync_events::v5::response::Room::new(), {
name: services
.rooms
.state_accessor
.get_name(room_id)
.await
.ok()
.or(name),
avatar: match heroes_avatar {
| Some(heroes_avatar) => ruma::JsOption::Some(heroes_avatar),
| _ => match services.rooms.state_accessor.get_avatar(room_id).await {
| ruma::JsOption::Some(avatar) => ruma::JsOption::from_option(avatar.url),
| ruma::JsOption::Null => ruma::JsOption::Null,
| ruma::JsOption::Undefined => ruma::JsOption::Undefined,
},
},
},
initial: Some(roomsince == &0),
is_dm: None,
invite_state,
unread_notifications: UnreadNotificationsCount {
highlight_count: Some(
initial: Some(roomsince == &0),
is_dm: None,
invite_state,
unread_notifications: assign!(UnreadNotificationsCount::new(), {
highlight_count: Some(
services
.rooms
.user
.highlight_count(sender_user, room_id)
.await
.try_into()
.expect("notification count can't go that high"),
),
notification_count: Some(
services
.rooms
.user
.notification_count(sender_user, room_id)
.await
.try_into()
.expect("notification count can't go that high"),
),
}),
timeline: room_events,
required_state,
prev_batch,
limited,
joined_count: Some(
services
.rooms
.user
.highlight_count(sender_user, room_id)
.state_cache
.room_joined_count(room_id)
.await
.unwrap_or(0)
.try_into()
.expect("notification count can't go that high"),
.unwrap_or_else(|_| uint!(0)),
),
notification_count: Some(
invited_count: Some(
services
.rooms
.user
.notification_count(sender_user, room_id)
.state_cache
.room_invited_count(room_id)
.await
.unwrap_or(0)
.try_into()
.expect("notification count can't go that high"),
.unwrap_or_else(|_| uint!(0)),
),
},
timeline: room_events,
required_state,
prev_batch,
limited,
joined_count: Some(
services
.rooms
.state_cache
.room_joined_count(room_id)
.await
.unwrap_or(0)
.try_into()
.unwrap_or_else(|_| uint!(0)),
),
invited_count: Some(
services
.rooms
.state_cache
.room_invited_count(room_id)
.await
.unwrap_or(0)
.try_into()
.unwrap_or_else(|_| uint!(0)),
),
num_live: None, // Count events in timeline greater than global sync counter
bump_stamp: timestamp,
heroes: Some(heroes),
});
num_live: None, // Count events in timeline greater than global sync counter
bump_stamp: timestamp,
heroes: Some(heroes),
}),
);
}
Ok(rooms)
}
@@ -737,7 +744,8 @@ async fn collect_typing_events(
let rooms: Vec<_> = body.extensions.typing.rooms.clone().unwrap_or_else(|| {
body.room_subscriptions
.keys()
.map(ToOwned::to_owned)
.cloned()
.map(ExtensionRoomConfig::Room)
.collect()
});
let lists: Vec<_> = body
@@ -766,9 +774,7 @@ async fn collect_typing_events(
| Ok(typing_users) => {
typing_response.rooms.insert(
room_id.to_owned(), // Already OwnedRoomId
Raw::new(&sync_events::v5::response::SyncTypingEvent {
content: TypingEventContent::new(typing_users),
})?,
Raw::new(&SyncTypingEvent::new(TypingEventContent::new(typing_users)))?,
);
},
| Err(e) => {
@@ -784,10 +790,7 @@ async fn collect_account_data(
services: &Services,
(sender_user, _, globalsince, body): (&UserId, &DeviceId, u64, &sync_events::v5::Request),
) -> sync_events::v5::response::AccountData {
let mut account_data = sync_events::v5::response::AccountData {
global: Vec::new(),
rooms: BTreeMap::new(),
};
let mut account_data = sync_events::v5::response::AccountData::default();
if !body.extensions.account_data.enabled.unwrap_or(false) {
return sync_events::v5::response::AccountData::default();
@@ -802,15 +805,17 @@ async fn collect_account_data(
if let Some(rooms) = &body.extensions.account_data.rooms {
for room in rooms {
account_data.rooms.insert(
room.clone(),
services
.account_data
.changes_since(Some(room), sender_user, Some(globalsince), None)
.ready_filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Room))
.collect()
.await,
);
if let ExtensionRoomConfig::Room(room) = room {
account_data.rooms.insert(
room.clone(),
services
.account_data
.changes_since(Some(room.as_ref()), sender_user, Some(globalsince), None)
.ready_filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Room))
.collect()
.await,
);
}
}
}
@@ -841,7 +846,6 @@ async fn collect_e2ee<'a, Rooms>(
services
.users
.keys_changed(sender_user, Some(globalsince), None)
.map(ToOwned::to_owned)
.collect::<Vec<_>>()
.await,
);
@@ -932,18 +936,18 @@ async fn collect_e2ee<'a, Rooms>(
if !share_encrypted_room(
services,
sender_user,
user_id,
&user_id,
Some(room_id),
)
.await
{
device_list_changes.insert(user_id.to_owned());
device_list_changes.insert(user_id.clone());
}
},
| MembershipState::Leave => {
// Write down users that have left encrypted rooms we
// are in
left_encrypted_users.insert(user_id.to_owned());
left_encrypted_users.insert(user_id.clone());
},
| _ => {},
}
@@ -962,9 +966,10 @@ async fn collect_e2ee<'a, Rooms>(
.ready_filter(|user_id| sender_user != *user_id)
// Only send keys if the sender doesn't share an encrypted room with the target
// already
.filter_map(|user_id| {
share_encrypted_room(services, sender_user, user_id, Some(room_id))
.map(|res| res.or_some(user_id.to_owned()))
.filter_map(async |user_id| {
share_encrypted_room(services, sender_user, &user_id, Some(room_id))
.map(|res| res.or_some(user_id.clone()))
.await
})
.collect::<Vec<_>>()
.await,
@@ -978,7 +983,6 @@ async fn collect_e2ee<'a, Rooms>(
.users
.room_keys_changed(room_id, Some(globalsince), None)
.map(|(user_id, _)| user_id)
.map(ToOwned::to_owned)
.collect::<Vec<_>>()
.await,
);
@@ -995,7 +999,7 @@ async fn collect_e2ee<'a, Rooms>(
}
}
Ok(sync_events::v5::response::E2EE {
Ok(assign!(sync_events::v5::response::E2EE::default(), {
device_unused_fallback_key_types: None,
device_one_time_keys_count: services
@@ -1003,11 +1007,11 @@ async fn collect_e2ee<'a, Rooms>(
.count_one_time_keys(sender_user, sender_device)
.await,
device_lists: DeviceLists {
device_lists: assign!(DeviceLists::new(), {
changed: device_list_changes.into_iter().collect(),
left: device_list_left.into_iter().collect(),
},
})
}),
}))
}
async fn collect_to_device(
@@ -1024,7 +1028,7 @@ async fn collect_to_device(
.remove_to_device_events(sender_user, sender_device, globalsince)
.await;
Some(sync_events::v5::response::ToDevice {
Some(assign!(sync_events::v5::response::ToDevice::default(), {
next_batch: next_batch.to_string(),
events: services
.users
@@ -1032,12 +1036,12 @@ async fn collect_to_device(
.map(at!(1))
.collect()
.await,
})
}))
}
async fn collect_receipts(_services: &Services) -> sync_events::v5::response::Receipts {
sync_events::v5::response::Receipts { rooms: BTreeMap::new() }
// TODO: get explicitly requested read receipts
sync_events::v5::response::Receipts::default()
}
fn filter_rooms<'a, Rooms>(
+6 -12
View File
@@ -27,9 +27,7 @@ pub(crate) async fn update_tag_route(
.account_data
.get_room(&body.room_id, sender_user, RoomAccountDataEventType::Tag)
.await
.unwrap_or(TagEvent {
content: TagEventContent { tags: BTreeMap::new() },
});
.unwrap_or_else(|_| TagEvent::new(TagEventContent::new(BTreeMap::new())));
tags_event
.content
@@ -46,7 +44,7 @@ pub(crate) async fn update_tag_route(
)
.await?;
Ok(create_tag::v3::Response {})
Ok(create_tag::v3::Response::new())
}
/// # `DELETE /_matrix/client/r0/user/{userId}/rooms/{roomId}/tags/{tag}`
@@ -64,9 +62,7 @@ pub(crate) async fn delete_tag_route(
.account_data
.get_room(&body.room_id, sender_user, RoomAccountDataEventType::Tag)
.await
.unwrap_or(TagEvent {
content: TagEventContent { tags: BTreeMap::new() },
});
.unwrap_or_else(|_| TagEvent::new(TagEventContent::new(BTreeMap::new())));
tags_event.content.tags.remove(&body.tag.clone().into());
@@ -80,7 +76,7 @@ pub(crate) async fn delete_tag_route(
)
.await?;
Ok(delete_tag::v3::Response {})
Ok(delete_tag::v3::Response::new())
}
/// # `GET /_matrix/client/r0/user/{userId}/rooms/{roomId}/tags`
@@ -98,9 +94,7 @@ pub(crate) async fn get_tags_route(
.account_data
.get_room(&body.room_id, sender_user, RoomAccountDataEventType::Tag)
.await
.unwrap_or(TagEvent {
content: TagEventContent { tags: BTreeMap::new() },
});
.unwrap_or_else(|_| TagEvent::new(TagEventContent::new(BTreeMap::new())));
Ok(get_tags::v3::Response { tags: tags_event.content.tags })
Ok(get_tags::v3::Response::new(tags_event.content.tags))
}
+2 -2
View File
@@ -11,8 +11,8 @@
pub(crate) async fn get_protocols_route(
_body: Ruma<get_protocols::v3::Request>,
) -> Result<get_protocols::v3::Response> {
// TODO
Ok(get_protocols::v3::Response { protocols: BTreeMap::new() })
// We don't support this at all
Ok(get_protocols::v3::Response::new(BTreeMap::new()))
}
/// # `GET /_matrix/client/unstable/thirdparty/protocols`
+14 -14
View File
@@ -7,7 +7,7 @@
},
};
use futures::StreamExt;
use ruma::{api::client::threads::get_threads, uint};
use ruma::{api::client::threads::get_threads, assign, uint};
use crate::Ruma;
@@ -59,18 +59,18 @@ pub(crate) async fn get_threads_route(
.collect()
.await;
Ok(get_threads::v1::Response {
next_batch: threads
.last()
.filter(|_| threads.len() >= limit)
.map(at!(0))
.as_ref()
.map(ToString::to_string),
let next_batch = threads
.last()
.filter(|_| threads.len() >= limit)
.map(at!(0))
.as_ref()
.map(ToString::to_string);
chunk: threads
.into_iter()
.map(at!(1))
.map(Event::into_format)
.collect(),
})
let chunk = threads
.into_iter()
.map(at!(1))
.map(Event::into_format)
.collect();
Ok(assign!(get_threads::v1::Response::new(chunk), { next_batch }))
}
+31 -26
View File
@@ -1,14 +1,15 @@
use std::collections::BTreeMap;
use axum::extract::State;
use conduwuit::{Error, Result};
use conduwuit::{Err, Result};
use conduwuit_service::sending::EduBuf;
use futures::StreamExt;
use ruma::{
api::{
client::{error::ErrorKind, to_device::send_event_to_device},
federation::{self, transactions::edu::DirectDeviceContent},
client::to_device::send_event_to_device,
federation::transactions::edu::{DirectDeviceContent, Edu},
},
assign,
to_device::DeviceIdOrAllDevices,
};
@@ -31,14 +32,14 @@ pub(crate) async fn send_event_to_device_route(
.await
.is_ok()
{
return Ok(send_event_to_device::v3::Response {});
return Ok(send_event_to_device::v3::Response::new());
}
for (target_user_id, map) in &body.messages {
for (target_device_id_maybe, event) in map {
for (target_device, event) in map {
if !services.globals.user_is_local(target_user_id) {
let mut map = BTreeMap::new();
map.insert(target_device_id_maybe.clone(), event.clone());
map.insert(target_device.clone(), event.clone());
let mut messages = BTreeMap::new();
messages.insert(target_user_id.clone(), map);
let count = services.globals.next_count()?;
@@ -46,12 +47,14 @@ pub(crate) async fn send_event_to_device_route(
let mut buf = EduBuf::new();
serde_json::to_writer(
&mut buf,
&federation::transactions::edu::Edu::DirectToDevice(DirectDeviceContent {
sender: sender_user.to_owned(),
ev_type: body.event_type.clone(),
message_id: count.to_string().into(),
messages,
}),
&Edu::DirectToDevice(assign!(
DirectDeviceContent::new(
sender_user.to_owned(),
body.event_type.clone(),
count.to_string().into(),
),
{ messages }
)),
)
.expect("DirectToDevice EDU can be serialized");
@@ -64,11 +67,11 @@ pub(crate) async fn send_event_to_device_route(
let event_type = &body.event_type.to_string();
let event = event
.deserialize_as()
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Event is invalid"))?;
let Ok(event) = event.deserialize_as() else {
return Err!(Request(InvalidParam("Failed to deserialize event body.")));
};
match target_device_id_maybe {
match target_device {
| DeviceIdOrAllDevices::DeviceId(target_device_id) => {
services
.users
@@ -81,20 +84,22 @@ pub(crate) async fn send_event_to_device_route(
)
.await;
},
| DeviceIdOrAllDevices::AllDevices => {
let (event_type, event) = (&event_type, &event);
services
.users
.all_device_ids(target_user_id)
.for_each(|target_device_id| {
services.users.add_to_device_event(
sender_user,
target_user_id,
target_device_id,
event_type,
event.clone(),
)
.for_each(async |target_device_id| {
services
.users
.add_to_device_event(
sender_user,
target_user_id,
&target_device_id,
event_type,
event.clone(),
)
.await;
})
.await;
},
@@ -107,5 +112,5 @@ pub(crate) async fn send_event_to_device_route(
.transactions
.add_client_txnid(sender_user, sender_device, &body.txn_id, &[]);
Ok(send_event_to_device::v3::Response {})
Ok(send_event_to_device::v3::Response::new())
}
+4 -4
View File
@@ -1,7 +1,7 @@
use axum::extract::State;
use axum_client_ip::ClientIp;
use conduwuit::{Err, Result, utils, utils::math::Tried};
use ruma::api::client::typing::create_typing_event;
use ruma::api::client::typing::create_typing_event::{self, v3::TypingInfo};
use crate::Ruma;
@@ -34,9 +34,9 @@ pub(crate) async fn create_typing_event_route(
}
if services.config.allow_local_typing && !services.users.is_suspended(sender_user).await? {
match body.state {
| Typing::Yes(duration) => {
| Typing::Yes(TypingInfo { timeout, .. }) => {
let duration = utils::clamp(
duration.as_millis().try_into().unwrap_or(u64::MAX),
timeout.as_millis().try_into().unwrap_or(u64::MAX),
services
.server
.config
@@ -78,5 +78,5 @@ pub(crate) async fn create_typing_event_route(
.await?;
}
Ok(create_typing_event::v3::Response {})
Ok(create_typing_event::v3::Response::new())
}

Some files were not shown because too many files have changed in this diff Show More