mirror of
https://forgejo.ellis.link/continuwuation/continuwuity/
synced 2026-05-13 16:53:08 +00:00
Compare commits
199 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 3b66ff6b9e | |||
| e349dd284f | |||
| c57fe66d8d | |||
| ff28fd0927 | |||
| 7307f2dc80 | |||
| 6f56b665e7 | |||
| 7018ce4180 | |||
| 10dd8bebfe | |||
| 1658b3bf6c | |||
| 088fa3e725 | |||
| 4694186c97 | |||
| a5c61d5137 | |||
| 39a882c4a1 | |||
| f091d3a732 | |||
| ebf9a08cd1 | |||
| 4fef0a7ff2 | |||
| 2f37b446bc | |||
| 6185841b6a | |||
| 3e0d4b066e | |||
| 0d2eeed567 | |||
| b296720540 | |||
| d600aed8db | |||
| 9724953b5e | |||
| 1605176956 | |||
| 2b0aedf5fd | |||
| c78c431703 | |||
| 49b48b857d | |||
| bf1e42b225 | |||
| ec76a234db | |||
| 091514e9f9 | |||
| 789ad499f7 | |||
| 1e6eaa4337 | |||
| de97900b07 | |||
| cb68a3d0ae | |||
| d3852abe51 | |||
| 15845b1c55 | |||
| f7d558baa6 | |||
| edd80b2600 | |||
| 03eab32c27 | |||
| 636de8a708 | |||
| e212c91ebf | |||
| 83f3314f08 | |||
| 8c2cf67783 | |||
| 7436e2f4e1 | |||
| 9ba406761b | |||
| 97f49d6357 | |||
| 1a49bc6f87 | |||
| 833216256b | |||
| 5fa3087401 | |||
| e95c0bd53f | |||
| 52d1ed24a9 | |||
| 4c1638e495 | |||
| 3f69cf8ed7 | |||
| 560a615c29 | |||
| 2e19310a87 | |||
| 81c5c6b2bc | |||
| 73d8462ace | |||
| 8b5fda1fb5 | |||
| 6f9b4a989e | |||
| fe0d83d447 | |||
| 37dccdbeb0 | |||
| 1060adc670 | |||
| d963b89a07 | |||
| 680c972b44 | |||
| 88b59eb053 | |||
| 4a99de0d28 | |||
| 0e1f0683c6 | |||
| cec4abc7cd | |||
| e6cae5b8ed | |||
| 02ccf64d2e | |||
| 4d4d875231 | |||
| cdf05b9a8b | |||
| 9491be928d | |||
| 049babc7ca | |||
| 7b99757337 | |||
| d09de005e3 | |||
| e34fd76dc0 | |||
| 72dfe579ec | |||
| cfae9a34f4 | |||
| 0a4808ea79 | |||
| a9a18fc5f0 | |||
| c1434c7935 | |||
| 2e98ba3ed8 | |||
| 551cf48642 | |||
| d256a1c1fa | |||
| 5578144da9 | |||
| 5309a064e8 | |||
| 56d35b4e39 | |||
| 7375d1cad4 | |||
| 80baf948ae | |||
| ed37696cef | |||
| 0a04c60f31 | |||
| e44ac230a6 | |||
| 57c4567380 | |||
| a8a8e1ea51 | |||
| 02f69a7160 | |||
| f68205a341 | |||
| 9899632b8b | |||
| a0524a9566 | |||
| e70004c98f | |||
| e185f56f3a | |||
| 5058b7979a | |||
| 7f06a61242 | |||
| 54fefb421b | |||
| 9d39321deb | |||
| c64a4a71bc | |||
| 385b4b10d1 | |||
| c12dd20431 | |||
| 3ad7c3b30d | |||
| 7a58074a0d | |||
| 0c7abd792d | |||
| 0f64e6b49c | |||
| e7a1c71a25 | |||
| cd3b97ea26 | |||
| 845b731f8c | |||
| 97d2388717 | |||
| 962a4aedc6 | |||
| 0eee63f7a1 | |||
| eba38c2fa0 | |||
| 338cdc2a75 | |||
| 2dacb8e071 | |||
| 398f73b690 | |||
| 78d9c29a05 | |||
| 0406f755c2 | |||
| 1827888f09 | |||
| 8871b1f74b | |||
| c7489fd008 | |||
| 7f5f4df64e | |||
| 15d87c00bf | |||
| 7cae42634e | |||
| bd94ec4033 | |||
| db7d378a2e | |||
| 39b2e461be | |||
| ca358438ee | |||
| 4282d60181 | |||
| 10dbea72e8 | |||
| aa7c2ea1ad | |||
| 698d959407 | |||
| 4c831c3531 | |||
| 4dfdce303f | |||
| 8d8c310a64 | |||
| e50e24e22d | |||
| a215b63077 | |||
| 1d39210a0c | |||
| 360e0dada8 | |||
| cbf24a9483 | |||
| 6cb3f909c9 | |||
| b7c9ef89f0 | |||
| 64f7791ddb | |||
| 836047b54e | |||
| 256f8f679d | |||
| 154cda35f3 | |||
| 1bf6d2a117 | |||
| 69d33931fa | |||
| 83902a584b | |||
| bcff259875 | |||
| 496ca80393 | |||
| 34b992fc40 | |||
| 1ea9330df8 | |||
| 267e1c5d65 | |||
| 36285e7784 | |||
| 53ab20d1cd | |||
| 96adf034e6 | |||
| a75bf32a34 | |||
| c89ecd7b63 | |||
| 7f30f8419b | |||
| 0a81f4d629 | |||
| 4e456249ac | |||
| 01e403f05f | |||
| a2f6141f4b | |||
| 97a01a1500 | |||
| bf9c9716eb | |||
| 471eb54c66 | |||
| 755006c66d | |||
| ccd6072f2d | |||
| 24f7e1d658 | |||
| d62eeda130 | |||
| 3e1f97487f | |||
| a4e64383b7 | |||
| 204bc1367e | |||
| 1cc9dbf2a4 | |||
| 2cf28baf03 | |||
| f3fb218652 | |||
| 0924b7d27e | |||
| 8575f191a0 | |||
| fe7cfd96e7 | |||
| 8b0e86a05d | |||
| 8b8fef998c | |||
| decd6083a0 | |||
| 06184d8c9f | |||
| 7c20e22b75 | |||
| 3f862b58cb | |||
| 046a6356f3 | |||
| 3af0240ff5 | |||
| 5dcfff51cf | |||
| b9989f1713 | |||
| 1d3e3e7e62 | |||
| 0adf3aa956 | |||
| 7b1aabda9f |
@@ -71,7 +71,7 @@ runs:
|
||||
|
||||
- name: Install timelord-cli and git-warp-time
|
||||
if: steps.check-binaries.outputs.need-install == 'true'
|
||||
uses: https://github.com/taiki-e/install-action@74e87cbfa15a59692b158178d8905a61bf6fca95 # v2
|
||||
uses: https://github.com/taiki-e/install-action@b5fddbb5361bce8a06fb168c9d403a6cc552b084 # v2
|
||||
with:
|
||||
tool: git-warp-time,timelord-cli@3.0.1
|
||||
|
||||
|
||||
@@ -45,7 +45,6 @@
|
||||
- [ ] I have [tested my contribution][c1t] (or proof-read it for documentation-only changes)
|
||||
myself, if applicable. This includes ensuring code compiles.
|
||||
- [ ] My commit messages follow the [commit message format][c1cm] and are descriptive.
|
||||
- [ ] I have written a [news fragment][n1] for this PR, if applicable<!--(can be done after hitting open!)-->.
|
||||
|
||||
<!--
|
||||
Notes on these requirements:
|
||||
@@ -79,4 +78,3 @@
|
||||
[c1pc]: https://forgejo.ellis.link/continuwuation/continuwuity/src/branch/main/CONTRIBUTING.md#pre-commit-checks
|
||||
[c1t]: https://forgejo.ellis.link/continuwuation/continuwuity/src/branch/main/CONTRIBUTING.md#running-tests-locally
|
||||
[c1cm]: https://forgejo.ellis.link/continuwuation/continuwuity/src/branch/main/CONTRIBUTING.md#commit-messages
|
||||
[n1]: https://towncrier.readthedocs.io/en/stable/tutorial.html#creating-news-fragments
|
||||
|
||||
@@ -96,7 +96,7 @@ jobs:
|
||||
if [[ ${{ forge.ref_name }} =~ ^v+[0-9]\.+[0-9]\.+[0-9]$ ]]; then
|
||||
# Use the "stable" component for tagged semver releases
|
||||
COMPONENT="stable"
|
||||
elif [[ ${{ forge.ref }} =~ ^refs/tags/^v+[0-9]\.+[0-9]\.+[0-9] ]]; then
|
||||
elif [[ ${{ forge.ref_name }} =~ ^v+[0-9]\.+[0-9]\.+[0-9] ]]; then
|
||||
# Use the "unstable" component for tagged semver pre-releases
|
||||
COMPONENT="unstable"
|
||||
else
|
||||
|
||||
@@ -105,7 +105,7 @@ jobs:
|
||||
RELEASE_SUFFIX=""
|
||||
TAG_NAME="${{ github.ref_name }}"
|
||||
# Extract version from tag (remove v prefix if present)
|
||||
TAG_VERSION=$(echo "$TAG_NAME" | sed 's/^v//')
|
||||
TAG_VERSION=$(echo "$TAG_NAME" | sed 's/^v//' | tr '-' '~')
|
||||
|
||||
# Create spec file with tag version
|
||||
sed -e "s/^Version:.*$/Version: $TAG_VERSION/" \
|
||||
@@ -270,9 +270,13 @@ jobs:
|
||||
|
||||
# Determine the group based on ref type and branch
|
||||
if [[ "${{ github.ref }}" == "refs/tags/"* ]]; then
|
||||
GROUP="stable"
|
||||
# For tags, extract the tag name for version info
|
||||
TAG_NAME="${{ github.ref_name }}"
|
||||
if [[ "$TAG_NAME" == *"-"* ]]; then
|
||||
GROUP="unstable"
|
||||
else
|
||||
GROUP="stable"
|
||||
fi
|
||||
elif [ "${{ github.ref_name }}" = "main" ]; then
|
||||
GROUP="dev"
|
||||
else
|
||||
|
||||
@@ -53,7 +53,7 @@ jobs:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Check for file changes
|
||||
uses: https://github.com/dorny/paths-filter@v4
|
||||
uses: https://github.com/dorny/paths-filter@fbd0ab8f3e69293af611ebaee6363fc25e6d187d # v4
|
||||
id: filter
|
||||
with:
|
||||
filters: |
|
||||
|
||||
@@ -199,6 +199,28 @@ jobs:
|
||||
registry_user: ${{ vars.BUILTIN_REGISTRY_USER || github.actor }}
|
||||
registry_password: ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }}
|
||||
|
||||
release-binaries:
|
||||
name: "Release Binaries"
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- build-release
|
||||
- build-maxperf
|
||||
permissions:
|
||||
contents: write
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
steps:
|
||||
- name: Download binary artifacts
|
||||
uses: forgejo/download-artifact@v4
|
||||
with:
|
||||
pattern: conduwuit*
|
||||
path: binaries
|
||||
merge-multiple: true
|
||||
- name: Create Release and Upload
|
||||
uses: https://github.com/softprops/action-gh-release@v2
|
||||
with:
|
||||
draft: true
|
||||
files: binaries/*
|
||||
|
||||
mirror_images:
|
||||
name: "Mirror Images"
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
@@ -24,7 +24,7 @@ repos:
|
||||
- id: check-added-large-files
|
||||
|
||||
- repo: https://github.com/crate-ci/typos
|
||||
rev: v1.45.1
|
||||
rev: v1.46.1
|
||||
hooks:
|
||||
- id: typos
|
||||
- id: typos
|
||||
|
||||
@@ -1,3 +1,20 @@
|
||||
# Continuwuity 0.5.8 (2026-04-24)
|
||||
|
||||
## Features
|
||||
|
||||
- LDAP can now optionally be connected to using StartTLS, and you may unsafely skip verification. Contributed by @getz (#1389)
|
||||
- Users will now be prevented from removing their email if the server is configured to require an email when registering an account.
|
||||
|
||||
## Bugfixes
|
||||
|
||||
- Fixed a situation where multiple email addresses could be associated with one user when that user changes their email address.
|
||||
|
||||
## Improved Documentation
|
||||
|
||||
- Updated config docs to state we support room version 12, and set it as default. Contributed by @ezera. (#1622)
|
||||
- Improve instructions for generic deployments, removing unnecessary parts and documenting the new initial registration token flow. Contributed by @stratself (#1677)
|
||||
|
||||
|
||||
# Continuwuity v0.5.7 (2026-04-17)
|
||||
|
||||
## Features
|
||||
|
||||
+1
-1
@@ -1 +1 @@
|
||||
Contributors are expected to follow the [Continuwuity Community Guidelines](continuwuity.org/community/guidelines).
|
||||
Contributors are expected to follow the [Continuwuity Community Guidelines](https://continuwuity.org/community/guidelines).
|
||||
|
||||
Generated
+284
-879
File diff suppressed because it is too large
Load Diff
+38
-46
@@ -12,7 +12,7 @@ license = "Apache-2.0"
|
||||
# See also `rust-toolchain.toml`
|
||||
readme = "README.md"
|
||||
repository = "https://forgejo.ellis.link/continuwuation/continuwuity"
|
||||
version = "0.5.7"
|
||||
version = "0.5.9"
|
||||
|
||||
[workspace.metadata.crane]
|
||||
name = "conduwuit"
|
||||
@@ -39,7 +39,10 @@ features = ["ffi", "std", "union"]
|
||||
version = "1.1.0"
|
||||
|
||||
[workspace.dependencies.ctor]
|
||||
version = "0.10.0"
|
||||
version = "0.13.0"
|
||||
|
||||
[workspace.dependencies.dtor]
|
||||
version = "0.13.0"
|
||||
|
||||
[workspace.dependencies.cargo_toml]
|
||||
version = "0.22"
|
||||
@@ -68,7 +71,7 @@ default-features = false
|
||||
version = "0.1.3"
|
||||
|
||||
[workspace.dependencies.rand]
|
||||
version = "0.10.0"
|
||||
version = "0.10.1"
|
||||
|
||||
# Used for the http request / response body type for Ruma endpoints used with reqwest
|
||||
[workspace.dependencies.bytes]
|
||||
@@ -161,7 +164,7 @@ features = ["raw_value"]
|
||||
|
||||
# Used for appservice registration files
|
||||
[workspace.dependencies.serde-saphyr]
|
||||
version = "0.0.24"
|
||||
version = "0.0.25"
|
||||
|
||||
# Used to load forbidden room/user regex from config
|
||||
[workspace.dependencies.serde_regex]
|
||||
@@ -177,7 +180,7 @@ version = "0.5.3"
|
||||
features = ["alloc", "rand"]
|
||||
default-features = false
|
||||
|
||||
# Used to generate thumbnails for images & blurhashes
|
||||
# Used to generate thumbnails for images
|
||||
[workspace.dependencies.image]
|
||||
version = "0.25.5"
|
||||
default-features = false
|
||||
@@ -188,14 +191,6 @@ features = [
|
||||
"webp",
|
||||
]
|
||||
|
||||
[workspace.dependencies.blurhash]
|
||||
version = "0.2.3"
|
||||
default-features = false
|
||||
features = [
|
||||
"fast-linear-to-srgb",
|
||||
"image",
|
||||
]
|
||||
|
||||
# logging
|
||||
[workspace.dependencies.log]
|
||||
version = "0.4.27"
|
||||
@@ -342,51 +337,49 @@ version = "0.1.88"
|
||||
[workspace.dependencies.lru-cache]
|
||||
version = "0.1.2"
|
||||
|
||||
[workspace.dependencies.assign]
|
||||
version = "1.1.1"
|
||||
|
||||
# Used for matrix spec type definitions and helpers
|
||||
[workspace.dependencies.ruma]
|
||||
git = "https://forgejo.ellis.link/continuwuation/ruwuma"
|
||||
#branch = "conduwuit-changes"
|
||||
rev = "d00b51a8669b21689c4eb47fb81f3a8b27c3e371"
|
||||
# version = "0.14.1"
|
||||
git = "https://github.com/ruma/ruma.git"
|
||||
rev = "9c9dccc93f054bbd28f23f630223fffa6289ecbc"
|
||||
features = [
|
||||
"compat",
|
||||
"rand",
|
||||
"appservice-api-c",
|
||||
"client-api",
|
||||
"federation-api",
|
||||
"markdown",
|
||||
"push-gateway-api-c",
|
||||
"unstable-exhaustive-types",
|
||||
"state-res",
|
||||
"rand",
|
||||
"markdown",
|
||||
"ring-compat",
|
||||
"compat-upload-signatures",
|
||||
"identifiers-validation",
|
||||
"unstable-unspecified",
|
||||
"unstable-msc2448",
|
||||
"compat-optional-txn-pdus",
|
||||
"unstable-msc2666",
|
||||
"unstable-msc2867",
|
||||
"unstable-msc2870",
|
||||
"unstable-msc3026",
|
||||
"unstable-msc3061",
|
||||
"unstable-msc3814",
|
||||
"unstable-msc3245",
|
||||
"unstable-msc3266",
|
||||
"unstable-msc3381", # polls
|
||||
"unstable-msc3489", # beacon / live location
|
||||
"unstable-msc3575",
|
||||
"unstable-msc3930", # polls push rules
|
||||
"unstable-msc3381",
|
||||
"unstable-msc3489",
|
||||
"unstable-msc3930",
|
||||
"unstable-msc4075",
|
||||
"unstable-msc4095",
|
||||
"unstable-msc4121",
|
||||
"unstable-msc4125",
|
||||
"unstable-msc4155",
|
||||
"unstable-msc4186",
|
||||
"unstable-msc4203", # sending to-device events to appservices
|
||||
"unstable-msc4210", # remove legacy mentions
|
||||
"unstable-msc4195",
|
||||
"unstable-msc4203",
|
||||
"unstable-msc4310",
|
||||
"unstable-msc4373",
|
||||
"unstable-msc4380",
|
||||
"unstable-msc4143",
|
||||
"unstable-msc4293",
|
||||
"unstable-msc4406",
|
||||
"unstable-msc4439",
|
||||
"unstable-extensible-events",
|
||||
"unstable-pdu",
|
||||
"unstable-msc4155",
|
||||
"unstable-msc4143", # livekit well_known response
|
||||
"unstable-msc4284",
|
||||
"unstable-msc4439", # pgp_key in .well_known/matrix/support
|
||||
]
|
||||
|
||||
[workspace.dependencies.rust-rocksdb]
|
||||
@@ -431,7 +424,7 @@ features = ["http", "grpc-tonic", "trace", "logs", "metrics"]
|
||||
|
||||
# optional sentry metrics for crash/panic reporting
|
||||
[workspace.dependencies.sentry]
|
||||
version = "0.47.0"
|
||||
version = "0.48.0"
|
||||
default-features = false
|
||||
features = [
|
||||
"backtrace",
|
||||
@@ -446,9 +439,9 @@ features = [
|
||||
]
|
||||
|
||||
[workspace.dependencies.sentry-tracing]
|
||||
version = "0.47.0"
|
||||
version = "0.48.0"
|
||||
[workspace.dependencies.sentry-tower]
|
||||
version = "0.47.0"
|
||||
version = "0.48.0"
|
||||
|
||||
# jemalloc usage
|
||||
[workspace.dependencies.tikv-jemalloc-sys]
|
||||
@@ -547,16 +540,11 @@ features = ["std"]
|
||||
[workspace.dependencies.maplit]
|
||||
version = "1.0.2"
|
||||
|
||||
[workspace.dependencies.ldap3]
|
||||
version = "0.12.0"
|
||||
default-features = false
|
||||
features = ["sync", "tls-rustls", "rustls-provider"]
|
||||
|
||||
[workspace.dependencies.yansi]
|
||||
version = "1.0.1"
|
||||
|
||||
[workspace.dependencies.askama]
|
||||
version = "0.15.0"
|
||||
version = "0.16.0"
|
||||
|
||||
[workspace.dependencies.lettre]
|
||||
version = "0.11.19"
|
||||
@@ -658,6 +646,10 @@ default-features = false
|
||||
package = "conduwuit"
|
||||
path = "src/main"
|
||||
|
||||
[workspace.dependencies.ruminuwuity]
|
||||
package = "ruminuwuity"
|
||||
path = "src/ruminuwuity"
|
||||
|
||||
###############################################################################
|
||||
#
|
||||
# Release profiles
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
Users will now be prevented from removing their email if the server is configured to require an email when registering an account.
|
||||
@@ -0,0 +1 @@
|
||||
The invite recipient's membership event is now included in invite stripped state, which should fix flaky invite display in some clients. Contributed by @ginger
|
||||
@@ -0,0 +1 @@
|
||||
Switched from Continuwuity's fork of Ruma back to upstream Ruma. Contributed by @ginger.
|
||||
@@ -0,0 +1 @@
|
||||
Removed support for guest user registration, a little-used and deprecated approach to room previews.
|
||||
@@ -0,0 +1 @@
|
||||
The deprecated `well_known.rtc_focus_server_urls` config option has been removed. MatrixRTC foci should be configured using the `matrix_rtc.foci` config option.
|
||||
@@ -1 +0,0 @@
|
||||
Fixed a situation where multiple email addresses could be associated with one user when that user changes their email address.
|
||||
@@ -0,0 +1 @@
|
||||
Support for server-side blurhashing (part of MSC2448) has been removed.
|
||||
@@ -1 +0,0 @@
|
||||
LDAP can now optionally be connected to using StartTLS, and you may unsafely skip verification. Contributed by @getz
|
||||
@@ -0,0 +1 @@
|
||||
Add performance tuning documentation. Contributed by @stratself.
|
||||
@@ -1 +0,0 @@
|
||||
Updated config docs to state we support room version 12, and set it as default. Contributed by @ezera.
|
||||
@@ -1 +0,0 @@
|
||||
Improve instructions for generic deployments, removing unnecessary parts and documenting the new initial registration token flow. Contributed by @stratself
|
||||
@@ -0,0 +1 @@
|
||||
Removed support for LDAP.
|
||||
@@ -0,0 +1 @@
|
||||
Clarified in the config that `max_request_size` affects federated media as well.
|
||||
@@ -0,0 +1 @@
|
||||
Added support for fallback encryption keys.
|
||||
@@ -0,0 +1 @@
|
||||
Fixed a bug that caused the server to drop events during processing if several events for the same room were sent in a singular transaction. Contributed by @nex.
|
||||
@@ -0,0 +1 @@
|
||||
Add `!admin users reject-all-invites` to clean invite spam
|
||||
@@ -0,0 +1 @@
|
||||
fix `!admin query account-data account-data-get` not returning the content
|
||||
@@ -0,0 +1 @@
|
||||
Fixed an issue where Continuwuity would only advertise support for the unstable endpoint for Mutual Rooms (MSC2666), despite only supporting the stable endpoint. Contributed by @Henry-Hiles (QuadRadical)
|
||||
@@ -7,7 +7,6 @@
|
||||
[global]
|
||||
address = "0.0.0.0"
|
||||
allow_device_name_federation = true
|
||||
allow_guest_registration = true
|
||||
allow_public_room_directory_over_federation = true
|
||||
allow_registration = true
|
||||
database_path = "/database"
|
||||
@@ -32,7 +31,6 @@ rocksdb_log_level = "info"
|
||||
rocksdb_max_log_files = 1
|
||||
rocksdb_recovery_mode = 0
|
||||
rocksdb_paranoid_file_checks = true
|
||||
log_guest_registrations = false
|
||||
allow_legacy_media = true
|
||||
startup_netburst = true
|
||||
startup_netburst_keep = -1
|
||||
|
||||
+1
-151
@@ -291,6 +291,7 @@
|
||||
#ip_lookup_strategy = 5
|
||||
|
||||
# Max request size for file uploads in bytes. Defaults to 20MB.
|
||||
# Also limits incoming federated media.
|
||||
#
|
||||
#max_request_size = 20971520
|
||||
|
||||
@@ -573,18 +574,6 @@
|
||||
#
|
||||
#allow_public_room_directory_over_federation = false
|
||||
|
||||
# Allow guests/unauthenticated users to access TURN credentials.
|
||||
#
|
||||
# This is the equivalent of Synapse's `turn_allow_guests` config option.
|
||||
# This allows any unauthenticated user to call the endpoint
|
||||
# `/_matrix/client/v3/voip/turnServer`.
|
||||
#
|
||||
# It is unlikely you need to enable this as all major clients support
|
||||
# authentication for this endpoint and prevents misuse of your TURN server
|
||||
# from potential bots.
|
||||
#
|
||||
#turn_allow_guests = false
|
||||
|
||||
# Set this to true to lock down your server's public room directory and
|
||||
# only allow admins to publish rooms to the room directory. Unpublishing
|
||||
# is still allowed by all users with this enabled.
|
||||
@@ -1282,21 +1271,6 @@
|
||||
#
|
||||
#brotli_compression = false
|
||||
|
||||
# Set to true to allow user type "guest" registrations. Some clients like
|
||||
# Element attempt to register guest users automatically.
|
||||
#
|
||||
#allow_guest_registration = false
|
||||
|
||||
# Set to true to log guest registrations in the admin room. Note that
|
||||
# these may be noisy or unnecessary if you're a public homeserver.
|
||||
#
|
||||
#log_guest_registrations = false
|
||||
|
||||
# Set to true to allow guest registrations/users to auto join any rooms
|
||||
# specified in `auto_join_rooms`.
|
||||
#
|
||||
#allow_guests_auto_join_rooms = false
|
||||
|
||||
# Enable the legacy unauthenticated Matrix media repository endpoints.
|
||||
# These endpoints consist of:
|
||||
# - /_matrix/media/*/config
|
||||
@@ -1900,34 +1874,6 @@
|
||||
#
|
||||
#support_pgp_key =
|
||||
|
||||
# **DEPRECATED**: Use `[global.matrix_rtc].foci` instead.
|
||||
#
|
||||
# A list of MatrixRTC foci URLs which will be served as part of the
|
||||
# MSC4143 client endpoint at /.well-known/matrix/client.
|
||||
#
|
||||
# This option is deprecated and will be removed in a future release.
|
||||
# Please migrate to the new `[global.matrix_rtc]` config section.
|
||||
#
|
||||
#rtc_focus_server_urls = []
|
||||
|
||||
[global.blurhashing]
|
||||
|
||||
# blurhashing x component, 4 is recommended by https://blurha.sh/
|
||||
#
|
||||
#components_x = 4
|
||||
|
||||
# blurhashing y component, 3 is recommended by https://blurha.sh/
|
||||
#
|
||||
#components_y = 3
|
||||
|
||||
# Max raw size that the server will blurhash, this is the size of the
|
||||
# image after converting it to raw data, it should be higher than the
|
||||
# upload limit but not too high. The higher it is the higher the
|
||||
# potential load will be for clients requesting blurhashes. The default
|
||||
# is 33.55MB. Setting it to 0 disables blurhashing.
|
||||
#
|
||||
#blurhash_max_raw_size = 33554432
|
||||
|
||||
[global.matrix_rtc]
|
||||
|
||||
# A list of MatrixRTC foci (transports) which will be served via the
|
||||
@@ -1945,102 +1891,6 @@
|
||||
#
|
||||
#foci = []
|
||||
|
||||
[global.ldap]
|
||||
|
||||
# Whether to enable LDAP login.
|
||||
#
|
||||
# example: "true"
|
||||
#
|
||||
#enable = false
|
||||
|
||||
# Whether to force LDAP authentication or authorize classical password
|
||||
# login.
|
||||
#
|
||||
# example: "true"
|
||||
#
|
||||
#ldap_only = false
|
||||
|
||||
# URI of the LDAP server.
|
||||
#
|
||||
# example: "ldap://ldap.example.com:389"
|
||||
#
|
||||
#uri = ""
|
||||
|
||||
# StartTLS for LDAP connections.
|
||||
#
|
||||
#use_starttls = false
|
||||
|
||||
# Skip TLS certificate verification, possibly dangerous.
|
||||
#
|
||||
#disable_tls_verification = false
|
||||
|
||||
# Root of the searches.
|
||||
#
|
||||
# example: "ou=users,dc=example,dc=org"
|
||||
#
|
||||
#base_dn = ""
|
||||
|
||||
# Bind DN if anonymous search is not enabled.
|
||||
#
|
||||
# You can use the variable `{username}` that will be replaced by the
|
||||
# entered username. In such case, the password used to bind will be the
|
||||
# one provided for the login and not the one given by
|
||||
# `bind_password_file`. Beware: automatically granting admin rights will
|
||||
# not work if you use this direct bind instead of a LDAP search.
|
||||
#
|
||||
# example: "cn=ldap-reader,dc=example,dc=org" or
|
||||
# "cn={username},ou=users,dc=example,dc=org"
|
||||
#
|
||||
#bind_dn = ""
|
||||
|
||||
# Path to a file on the system that contains the password for the
|
||||
# `bind_dn`.
|
||||
#
|
||||
# The server must be able to access the file, and it must not be empty.
|
||||
#
|
||||
#bind_password_file = ""
|
||||
|
||||
# Search filter to limit user searches.
|
||||
#
|
||||
# You can use the variable `{username}` that will be replaced by the
|
||||
# entered username for more complex filters.
|
||||
#
|
||||
# example: "(&(objectClass=person)(memberOf=matrix))"
|
||||
#
|
||||
#filter = "(objectClass=*)"
|
||||
|
||||
# Attribute to use to uniquely identify the user.
|
||||
#
|
||||
# example: "uid" or "cn"
|
||||
#
|
||||
#uid_attribute = "uid"
|
||||
|
||||
# Attribute containing the display name of the user.
|
||||
#
|
||||
# example: "givenName" or "sn"
|
||||
#
|
||||
#name_attribute = "givenName"
|
||||
|
||||
# Root of the searches for admin users.
|
||||
#
|
||||
# Defaults to `base_dn` if empty.
|
||||
#
|
||||
# example: "ou=admins,dc=example,dc=org"
|
||||
#
|
||||
#admin_base_dn = ""
|
||||
|
||||
# The LDAP search filter to find administrative users for continuwuity.
|
||||
#
|
||||
# If left blank, administrative state must be configured manually for each
|
||||
# user.
|
||||
#
|
||||
# You can use the variable `{username}` that will be replaced by the
|
||||
# entered username for more complex filters.
|
||||
#
|
||||
# example: "(objectClass=conduwuitAdmin)" or "(uid={username})"
|
||||
#
|
||||
#admin_filter = ""
|
||||
|
||||
#[global.antispam]
|
||||
|
||||
#[global.antispam.meowlnir]
|
||||
|
||||
+3
-1
@@ -17,12 +17,14 @@ ARG LLVM_VERSION=21
|
||||
# Line one: compiler tools
|
||||
# Line two: curl, for downloading binaries and wget because llvm.sh is broken with curl
|
||||
# Line three: for xx-verify
|
||||
# golang, cmake: For aws-lc-rs bindgen
|
||||
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||
apt-get update && apt-get install -y \
|
||||
pkg-config make jq \
|
||||
wget curl git software-properties-common \
|
||||
file
|
||||
# golang cmake
|
||||
|
||||
# LLVM packages
|
||||
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||
@@ -48,7 +50,7 @@ EOF
|
||||
|
||||
# Developer tool versions
|
||||
# renovate: datasource=github-releases depName=cargo-bins/cargo-binstall
|
||||
ENV BINSTALL_VERSION=1.18.1
|
||||
ENV BINSTALL_VERSION=1.19.1
|
||||
# renovate: datasource=github-releases depName=psastras/sbom-rs
|
||||
ENV CARGO_SBOM_VERSION=0.9.1
|
||||
# renovate: datasource=crate depName=lddtree
|
||||
|
||||
@@ -18,7 +18,7 @@ RUN --mount=type=cache,target=/etc/apk/cache apk add \
|
||||
|
||||
# Developer tool versions
|
||||
# renovate: datasource=github-releases depName=cargo-bins/cargo-binstall
|
||||
ENV BINSTALL_VERSION=1.18.1
|
||||
ENV BINSTALL_VERSION=1.19.1
|
||||
# renovate: datasource=github-releases depName=psastras/sbom-rs
|
||||
ENV CARGO_SBOM_VERSION=0.9.1
|
||||
# renovate: datasource=crate depName=lddtree
|
||||
|
||||
@@ -8,6 +8,11 @@
|
||||
"type": "file",
|
||||
"name": "dns",
|
||||
"label": "DNS tuning (recommended)"
|
||||
},
|
||||
{
|
||||
"type": "file",
|
||||
"name": "performance",
|
||||
"label": "Performance tuning"
|
||||
}
|
||||
|
||||
]
|
||||
|
||||
@@ -156,9 +156,11 @@ ### Serving well-known files manually
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
Check with the [Matrix Connectivity Tester][federation-tester] to see that it's working.
|
||||
Check that other servers can connect to you.
|
||||
Here are some tools that can help identify federation issues:
|
||||
|
||||
[federation-tester]: https://federationtester.mtrnord.blog/
|
||||
- [Matrix Connectivity Tester](https://federationtester.mtrnord.blog/)
|
||||
- [Matrix Federation Tester](https://federationtester.matrix.org/)
|
||||
|
||||
### Cannot log in with web clients
|
||||
|
||||
|
||||
@@ -0,0 +1,135 @@
|
||||
# Performance tuning
|
||||
|
||||
Continuwuity's default configs are suited for many typical setups and scales appropriately with the size of your hardware. However, there are many scenarios where additional modifications can be made to better utilize your server resources.
|
||||
|
||||
This page aims to outline various performance tweaks for Continuwuity and their effects. These adjustments are especially helpful for homeservers that join many large federated rooms or have many users, and it will become increasingly necessary as the Matrix network expands. As always, your mileage may vary according to your setup's specifics. If you have further discussions or recommendations, please share them in the community rooms.
|
||||
|
||||
## DNS tuning (recommended)
|
||||
|
||||
Please see the dedicated [DNS tuning guide](./dns.mdx).
|
||||
|
||||
## Cache capacities
|
||||
|
||||
If you have memory to spare, consider increasing the `cache_capacity_modifier` value to a larger number to allow more data to be stored in hot memory. This *significantly* speeds up many intensive operations (such as state resolutions) and decreases CPU usage and disk I/O. Start with a baseline of `cache_capacity_modifier = 2.0` and tune up until you are satisfied with RAM usage.
|
||||
|
||||
On the other hand, if your system doesn't have a lot of RAM, consider decreasing the cache capacity modifier to something smaller than `1.0` to avoid low-memory issues (at the cost of higher load on disk/CPU). This recommendation also works if your system has abnormally little RAM compared to the number of CPU cores (for example, 2GB RAM for 12 cores), as cache capacities scale according to number of available cores.
|
||||
|
||||
## Disabling some features
|
||||
|
||||
You can disable outgoing **typing notifications** and **read markers** to reduce strain on the CPU and network when actively participating in rooms.
|
||||
|
||||
```toml
|
||||
# disables sending read receipts
|
||||
allow_outgoing_read_receipts = false
|
||||
# disables sending typing notifications
|
||||
allow_outgoing_typing = false
|
||||
```
|
||||
|
||||
Outgoing presence updates are also considered very expensive and have been disabled by default (`allow_outgoing_presence = false`). For more savings, you may wish to disable _all_ processing of presence entirely.
|
||||
|
||||
```toml title=continuwuity.toml
|
||||
# disabling presence updates entirely
|
||||
allow_local_presence = false
|
||||
allow_incoming_presence = false
|
||||
allow_outgoing_presence = false
|
||||
```
|
||||
|
||||
## Tuning database compression
|
||||
|
||||
:::warning
|
||||
These steps SHOULD be done **before** starting Continuwuity for the first time. While switching database compression midway through is theoretically possible, this has not been tested extensively in the wild.
|
||||
:::
|
||||
|
||||
### Changing the compression algorithm
|
||||
|
||||
For reduced CPU usage at a tradeoff of increased storage space, consider deploying Continuwuity with the faster and less intensive `lz4` algorithm instead of `zstd` for rocksdb, and disable WAL compression entirely:
|
||||
|
||||
```toml
|
||||
### in continuwuity.toml ###
|
||||
rocksdb_compression_algo = "lz4"
|
||||
rocksdb_wal_compression = "none"
|
||||
```
|
||||
|
||||
This tweak can especially be helpful if you have an older or less performant CPU (e.g. a Raspberry Pi) and disk space to spare.
|
||||
|
||||
### Increasing bottommost layer compression (`zstd` only)
|
||||
|
||||
The bottommost layer of the database usually contains old and read-only data, so it is a suitable place for further compression. In Continuwuity, this is possible by setting `rocksdb_bottommost_compression = true` and tuning `rocksdb_bottommost_compression_level` to a more compact level than the default one used in `rocksdb_compression_level`. This tweak comes at a cost of increased CPU usage, but may prevent your database from growing too large in the long run.
|
||||
|
||||
For those using `zstd` compression, the compression level ranges from 1 to 22. An example like this could apply:
|
||||
|
||||
```toml
|
||||
### in continuwuity.toml ###
|
||||
rocksdb_compression_algo = "zstd"
|
||||
rocksdb_compression_level = 32767 # magic number, translates to level 3 on zstd
|
||||
rocksdb_bottommost_compression = true
|
||||
rocksdb_bottommost_compression_level = 9 # level 9 on zstd
|
||||
```
|
||||
|
||||
For `lz4` users, the default level (`-1`) is already the most compact. You can only further decrease it to favor compression speed over ratio.
|
||||
|
||||
Consult these documents for more information on compression tuning and levels:
|
||||
|
||||
- [Rocksdb compression documentation][rocksdb-compression]
|
||||
- [Rocksdb default compression levels][rocksdb-compression-defaults]
|
||||
- [Zstd manual][zstd-manual]
|
||||
- [Lz4 manual][lz4-manual]
|
||||
|
||||
[rocksdb-compression]: https://github.com/facebook/rocksdb/wiki/Compression
|
||||
[rocksdb-compression-defaults]: https://github.com/facebook/rocksdb/blob/main/include/rocksdb/options.h#L208-L217
|
||||
[zstd-manual]: https://facebook.github.io/zstd/zstd_manual.html
|
||||
[lz4-manual]: https://github.com/lz4/lz4/blob/release/doc/lz4_manual.html
|
||||
|
||||
## Other tweaks
|
||||
|
||||
### Using UNIX sockets
|
||||
|
||||
If your homeserver and reverse proxy live on the same machine, you may wish to expose Continuwuity on a UNIX socket instead of a port. This removes TCP overhead between the two programs.
|
||||
|
||||
<details>
|
||||
|
||||
<summary>Example config with Caddy</summary>
|
||||
|
||||
```toml
|
||||
### in continuwuity.toml ###
|
||||
|
||||
# `address` and `port` has to be commented out first
|
||||
#address = ["127.0.0.1", "::1"]
|
||||
#port = 8008
|
||||
unix_socket_path = "/run/continuwuity/continuwuity.sock"
|
||||
```
|
||||
|
||||
```
|
||||
### in your Caddyfile ###
|
||||
https://matrix.example.com {
|
||||
reverse_proxy unix//run/continuwuity/continuwuity.sock
|
||||
|
||||
# alternatively, use the http2-plaintext protocol
|
||||
# reverse_proxy unix+h2c//run/continuwuity/continuwuity.sock
|
||||
}
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
### Tuning your trusted servers
|
||||
|
||||
:::info Vet your trusted servers!
|
||||
Trusted servers are your first point of contact when obtaining public keys from other servers, and they could theoretically impersonate other servers and cause significant harm to your deployment. Please thoroughly verify your trusted servers' credibility before adding them to your configuration.
|
||||
:::
|
||||
|
||||
Trusted servers are queried sequentially in the order they are listed. If you have multiple trusted servers configured, put the faster ones first:
|
||||
|
||||
```toml
|
||||
# Example config, using maintainers' recommended homeservers
|
||||
trusted_servers = ["codestorm.net","starstruck.systems","unredacted.org","matrix.org"]
|
||||
```
|
||||
|
||||
Avoid prioritising `matrix.org` as your primary trusted server, as it tends to be quite slow.
|
||||
|
||||
Some users have also reported that increasing `trusted_server_batch_size` has helped with faster joins for huge rooms. Start with doubling the default to `2048` until you find a suitable value.
|
||||
|
||||
### Enable HTTP/3 on your reverse proxy
|
||||
|
||||
Consider enabling the newer **HTTP/3** protocol for inbound connections to Continuwuity. In Caddy HTTP/3 is allowed by default, but you must expose port :443/**udp** on your firewall.
|
||||
|
||||
HTTP/3 can vastly improve Client-Server connections especially on unstable networks, as it reduces packet losses and latency from TCP head-of-line blocking, includes workarounds for network switching, and reduces connection establishment handshakes. Continuwuity also includes experimental _outbound_ HTTP/3 support in its Docker images, so connections between Continuwuity servers can benefit from this too.
|
||||
@@ -268,9 +268,13 @@ ## Starting Your Server
|
||||
|
||||
## How do I know it works?
|
||||
|
||||
To check if your server can communicate with other homeservers, use the
|
||||
[Matrix Federation Tester](https://federationtester.mtrnord.blog/). If you can
|
||||
register your account but cannot join federated rooms, check your configuration
|
||||
To check if your server can communicate with other homeservers,
|
||||
use an external testing tool:
|
||||
|
||||
- [Matrix Connectivity Tester](https://federationtester.mtrnord.blog/)
|
||||
- [Matrix Federation Tester](https://federationtester.matrix.org/)
|
||||
|
||||
If you can register your account but cannot join federated rooms, check your configuration
|
||||
and verify that your federation endpoints are opened and forwarded correctly.
|
||||
|
||||
As a quick health check, you can also use these cURL commands:
|
||||
|
||||
@@ -81,8 +81,6 @@ ## List of forked dependencies
|
||||
All forked dependencies are maintained under the
|
||||
[continuwuation organization on Forgejo](https://forgejo.ellis.link/continuwuation):
|
||||
|
||||
- [ruwuma][continuwuation-ruwuma] - Fork of [ruma/ruma][ruma] with various
|
||||
performance improvements, more features and better client/server interop
|
||||
- [rocksdb][continuwuation-rocksdb] - Fork of [facebook/rocksdb][rocksdb] via
|
||||
[`@zaidoon1`][8] with liburing build fixes and GCC debug build fixes
|
||||
- [jemallocator][continuwuation-jemallocator] - Fork of
|
||||
|
||||
@@ -6,10 +6,10 @@
|
||||
"message": "Welcome to Continuwuity! Important announcements about the project will appear here."
|
||||
},
|
||||
{
|
||||
"id": 11,
|
||||
"mention_room": false,
|
||||
"date": "2026-04-17",
|
||||
"message": "[v0.5.7](https://forgejo.ellis.link/continuwuation/continuwuity/releases/tag/v0.5.7) is out! Email verification! Terms and Conditions! Deleting notification pushers! So much good stuff. Go grab the release and read the changelog!"
|
||||
"id": 13,
|
||||
"mention_room": true,
|
||||
"date": "2026-05-08",
|
||||
"message": "[v0.5.9](https://forgejo.ellis.link/continuwuation/continuwuity/releases/tag/v0.5.9) has been released, fixing a few low-severity federation-related vulnerabilities. It is recommended you read the changelog and update as soon as possible. There are no new features or other changes in this release, only related bugfixes. Deployments tracking the main branch should also update to the latest commit."
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -7,7 +7,7 @@ ## Running commands
|
||||
|
||||
* All commands listed here may be used by server administrators in the admin room by sending them as messages.
|
||||
* If the `admin_escape_commands` configuration option is enabled, server administrators may run certain commands in public rooms by prefixing them with a single backslash. These commands will only run on _their_ homeserver, even if they are a member of another homeserver's admin room. Some sensitive commands cannot be used outside the admin room and will return an error.
|
||||
* All commands listed here may be used in the server's console, if it is enabled. Commands entered in the console do not require the `!admin` prefix. If Continuwuity is deployed via Docker, be sure to set the appropriate options detailed in [the Docker deployment guide](../../deploying/docker.mdx#accessing-the-servers-console) to enable access to the server's console.
|
||||
* All commands listed here may be used in the server's console, if it is enabled. Commands entered in the console do not require the `!admin` prefix.
|
||||
|
||||
## Categories
|
||||
|
||||
|
||||
+1
-1
@@ -146,7 +146,7 @@ cargo clippy \
|
||||
--locked \
|
||||
--profile test \
|
||||
--no-default-features \
|
||||
--features=console,systemd,element_hacks,direct_tls,perf_measurements,brotli_compression,blurhashing \
|
||||
--features=console,systemd,element_hacks,direct_tls,perf_measurements,brotli_compression \
|
||||
--color=always \
|
||||
-- \
|
||||
-D warnings
|
||||
|
||||
Generated
+15
-15
@@ -3,11 +3,11 @@
|
||||
"advisory-db": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1775907537,
|
||||
"narHash": "sha256-vbeLNgmsx1Z6TwnlDV0dKyeBCcon3UpkV9yLr/yc6HM=",
|
||||
"lastModified": 1777645914,
|
||||
"narHash": "sha256-P1T7QVQS13OvkXEuEhI91CLaQfyv6iqV9vW8IBLLDYg=",
|
||||
"owner": "rustsec",
|
||||
"repo": "advisory-db",
|
||||
"rev": "d99f7b9eb81731bddebf80a355f8be7b2f8b1b28",
|
||||
"rev": "d6ba1f7070ba91f45efe372d68eb648be67d0417",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -18,11 +18,11 @@
|
||||
},
|
||||
"crane": {
|
||||
"locked": {
|
||||
"lastModified": 1775839657,
|
||||
"narHash": "sha256-SPm9ck7jh3Un9nwPuMGbRU04UroFmOHjLP56T10MOeM=",
|
||||
"lastModified": 1777335812,
|
||||
"narHash": "sha256-bEg5xoAxAwsyfnGhkEX7RJViTIBIYPd8ISg4O1c0HFc=",
|
||||
"owner": "ipetkov",
|
||||
"repo": "crane",
|
||||
"rev": "7cf72d978629469c4bd4206b95c402514c1f6000",
|
||||
"rev": "5e0fb2f64edff2822249f21293b8304dedaaf676",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -39,11 +39,11 @@
|
||||
"rust-analyzer-src": "rust-analyzer-src"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1775891769,
|
||||
"narHash": "sha256-EOfVlTKw2n8w1uhfh46GS4hEGnQ7oWrIWQfIY6utIkI=",
|
||||
"lastModified": 1777624102,
|
||||
"narHash": "sha256-thSyElkje577x/kAbP72nHlfiFc1a+tCudskLPHXe9s=",
|
||||
"owner": "nix-community",
|
||||
"repo": "fenix",
|
||||
"rev": "6fbc54dde15aee725bdc7aae5e478849685d5f56",
|
||||
"rev": "4d81601e0b73f20d81d066754ad0e7d1e7f75a06",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -89,11 +89,11 @@
|
||||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1775710090,
|
||||
"narHash": "sha256-ar3rofg+awPB8QXDaFJhJ2jJhu+KqN/PRCXeyuXR76E=",
|
||||
"lastModified": 1777268161,
|
||||
"narHash": "sha256-bxrdOn8SCOv8tN4JbTF/TXq7kjo9ag4M+C8yzzIRYbE=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "4c1018dae018162ec878d42fec712642d214fdfa",
|
||||
"rev": "1c3fe55ad329cbcb28471bb30f05c9827f724c76",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -132,11 +132,11 @@
|
||||
"rust-analyzer-src": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1775843361,
|
||||
"narHash": "sha256-j53ZgyDvmYf3Sjh1IPvvTjqa614qUfVQSzj59+MpzkY=",
|
||||
"lastModified": 1777583169,
|
||||
"narHash": "sha256-dVJ4+wrRKc8oIgp3rLOFSq1obt/sCKlXy3h47qof/w0=",
|
||||
"owner": "rust-lang",
|
||||
"repo": "rust-analyzer",
|
||||
"rev": "9eb97ea96d8400e8957ddd56702e962614296583",
|
||||
"rev": "aa64e4828a2bbba44463c1229a81c748d3cce583",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
||||
Generated
+130
-151
@@ -16,26 +16,24 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@emnapi/core": {
|
||||
"version": "1.9.2",
|
||||
"resolved": "https://registry.npmjs.org/@emnapi/core/-/core-1.9.2.tgz",
|
||||
"integrity": "sha512-UC+ZhH3XtczQYfOlu3lNEkdW/p4dsJ1r/bP7H8+rhao3TTTMO1ATq/4DdIi23XuGoFY+Cz0JmCbdVl0hz9jZcA==",
|
||||
"version": "1.10.0",
|
||||
"resolved": "https://registry.npmjs.org/@emnapi/core/-/core-1.10.0.tgz",
|
||||
"integrity": "sha512-yq6OkJ4p82CAfPl0u9mQebQHKPJkY7WrIuk205cTYnYe+k2Z8YBh11FrbRG/H6ihirqcacOgl2BIO8oyMQLeXw==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"optional": true,
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"@emnapi/wasi-threads": "1.2.1",
|
||||
"tslib": "^2.4.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@emnapi/runtime": {
|
||||
"version": "1.9.2",
|
||||
"resolved": "https://registry.npmjs.org/@emnapi/runtime/-/runtime-1.9.2.tgz",
|
||||
"integrity": "sha512-3U4+MIWHImeyu1wnmVygh5WlgfYDtyf0k8AbLhMFxOipihf6nrWC4syIm/SwEeec0mNSafiiNnMJwbza/Is6Lw==",
|
||||
"version": "1.10.0",
|
||||
"resolved": "https://registry.npmjs.org/@emnapi/runtime/-/runtime-1.10.0.tgz",
|
||||
"integrity": "sha512-ewvYlk86xUoGI0zQRNq/mC+16R1QeDlKQy21Ki3oSYXNgLb45GV1P6A0M+/s6nyCuNDqe5VpaY84BzXGwVbwFA==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"optional": true,
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"tslib": "^2.4.0"
|
||||
}
|
||||
@@ -47,7 +45,6 @@
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"optional": true,
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"tslib": "^2.4.0"
|
||||
}
|
||||
@@ -109,9 +106,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@napi-rs/wasm-runtime": {
|
||||
"version": "1.1.2",
|
||||
"resolved": "https://registry.npmjs.org/@napi-rs/wasm-runtime/-/wasm-runtime-1.1.2.tgz",
|
||||
"integrity": "sha512-sNXv5oLJ7ob93xkZ1XnxisYhGYXfaG9f65/ZgYuAu3qt7b3NadcOEhLvx28hv31PgX8SZJRYrAIPQilQmFpLVw==",
|
||||
"version": "1.1.4",
|
||||
"resolved": "https://registry.npmjs.org/@napi-rs/wasm-runtime/-/wasm-runtime-1.1.4.tgz",
|
||||
"integrity": "sha512-3NQNNgA1YSlJb/kMH1ildASP9HW7/7kYnRI2szWJaofaS1hWmbGI4H+d3+22aGzXXN9IJ+n+GiFVcGipJP18ow==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"optional": true,
|
||||
@@ -128,13 +125,13 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@rsbuild/core": {
|
||||
"version": "2.0.0-rc.1",
|
||||
"resolved": "https://registry.npmjs.org/@rsbuild/core/-/core-2.0.0-rc.1.tgz",
|
||||
"integrity": "sha512-eqxtRlQiFSm/ibCNGiPj8ozsGSNK91NY+GksmPuTCPmWQExGtPqM1V+s13UYeWZS6fYbMRs7NlQKD896e0QkKA==",
|
||||
"version": "2.0.5",
|
||||
"resolved": "https://registry.npmjs.org/@rsbuild/core/-/core-2.0.5.tgz",
|
||||
"integrity": "sha512-KajO50hbXb32S8MsyDh2f+xKcVeRy9Gfzdcy0JjpMLj22djHugly6jrGo7jH7ls9X6/TDcyCTncSuNK4+D2lTw==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@rspack/core": "2.0.0-rc.1",
|
||||
"@rspack/core": "~2.0.2",
|
||||
"@swc/helpers": "^0.5.21"
|
||||
},
|
||||
"bin": {
|
||||
@@ -153,17 +150,17 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@rsbuild/plugin-react": {
|
||||
"version": "1.4.6",
|
||||
"resolved": "https://registry.npmjs.org/@rsbuild/plugin-react/-/plugin-react-1.4.6.tgz",
|
||||
"integrity": "sha512-LAT6xHlEyZKA0VjF/ph5d50iyG+WSmBx+7g98HNZUwb94VeeTMZFB8qVptTkbIRMss3BNKOXmHOu71Lhsh9oEw==",
|
||||
"version": "2.0.0",
|
||||
"resolved": "https://registry.npmjs.org/@rsbuild/plugin-react/-/plugin-react-2.0.0.tgz",
|
||||
"integrity": "sha512-/1gzt39EGUSFEqB83g46QoOwsgv172HI18i6au1b6lgIaX4sv9stuX4ijdHbHCp8PqYEq+MyQ99jIQMO6I+etg==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@rspack/plugin-react-refresh": "^1.6.1",
|
||||
"@rspack/plugin-react-refresh": "2.0.0",
|
||||
"react-refresh": "^0.18.0"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@rsbuild/core": "^1.0.0 || ^2.0.0-0"
|
||||
"@rsbuild/core": "^2.0.0-0"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"@rsbuild/core": {
|
||||
@@ -172,28 +169,28 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@rspack/binding": {
|
||||
"version": "2.0.0-rc.1",
|
||||
"resolved": "https://registry.npmjs.org/@rspack/binding/-/binding-2.0.0-rc.1.tgz",
|
||||
"integrity": "sha512-rhJqtbyiRPOjTAZW0xTZFbOrS5yP5yL1SF0DPE9kvFfzePz30IqjMDMxL0KuhkDZd/M1eUINJyoqd8NTbR9wHw==",
|
||||
"version": "2.0.2",
|
||||
"resolved": "https://registry.npmjs.org/@rspack/binding/-/binding-2.0.2.tgz",
|
||||
"integrity": "sha512-0kZPplW9GWx8mfC6DfsaRY3QBIYPuUs42JfmSM6aSb8tMHZAXQeLeMB8M+h8i4SeI+aFtCgO6UuYGtyWf7+L+A==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"optionalDependencies": {
|
||||
"@rspack/binding-darwin-arm64": "2.0.0-rc.1",
|
||||
"@rspack/binding-darwin-x64": "2.0.0-rc.1",
|
||||
"@rspack/binding-linux-arm64-gnu": "2.0.0-rc.1",
|
||||
"@rspack/binding-linux-arm64-musl": "2.0.0-rc.1",
|
||||
"@rspack/binding-linux-x64-gnu": "2.0.0-rc.1",
|
||||
"@rspack/binding-linux-x64-musl": "2.0.0-rc.1",
|
||||
"@rspack/binding-wasm32-wasi": "2.0.0-rc.1",
|
||||
"@rspack/binding-win32-arm64-msvc": "2.0.0-rc.1",
|
||||
"@rspack/binding-win32-ia32-msvc": "2.0.0-rc.1",
|
||||
"@rspack/binding-win32-x64-msvc": "2.0.0-rc.1"
|
||||
"@rspack/binding-darwin-arm64": "2.0.2",
|
||||
"@rspack/binding-darwin-x64": "2.0.2",
|
||||
"@rspack/binding-linux-arm64-gnu": "2.0.2",
|
||||
"@rspack/binding-linux-arm64-musl": "2.0.2",
|
||||
"@rspack/binding-linux-x64-gnu": "2.0.2",
|
||||
"@rspack/binding-linux-x64-musl": "2.0.2",
|
||||
"@rspack/binding-wasm32-wasi": "2.0.2",
|
||||
"@rspack/binding-win32-arm64-msvc": "2.0.2",
|
||||
"@rspack/binding-win32-ia32-msvc": "2.0.2",
|
||||
"@rspack/binding-win32-x64-msvc": "2.0.2"
|
||||
}
|
||||
},
|
||||
"node_modules/@rspack/binding-darwin-arm64": {
|
||||
"version": "2.0.0-rc.1",
|
||||
"resolved": "https://registry.npmjs.org/@rspack/binding-darwin-arm64/-/binding-darwin-arm64-2.0.0-rc.1.tgz",
|
||||
"integrity": "sha512-fYbeDDDg6QKZzXYt/J0/j0Qhr01wQLuISUsYnNhu5MLwdXVUSVcqz+CTqgF3d0EQVVn6FqLV63lbNRzUGfSq9g==",
|
||||
"version": "2.0.2",
|
||||
"resolved": "https://registry.npmjs.org/@rspack/binding-darwin-arm64/-/binding-darwin-arm64-2.0.2.tgz",
|
||||
"integrity": "sha512-0o7lbgBBsDlICWdjIH0q3e0BsSco4GRiImHWVfZSVEG+q2+ykZJvSvYCVhPM1Co375Z0S3VMPa/8SjcY1FHwlw==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
@@ -205,9 +202,9 @@
|
||||
]
|
||||
},
|
||||
"node_modules/@rspack/binding-darwin-x64": {
|
||||
"version": "2.0.0-rc.1",
|
||||
"resolved": "https://registry.npmjs.org/@rspack/binding-darwin-x64/-/binding-darwin-x64-2.0.0-rc.1.tgz",
|
||||
"integrity": "sha512-MvXi9kr8xXn1y0PD1WI/4YphRNOdbykJjKdEsAG4JxEVoERmhIHOTwKvUqlejajizAwlVZcxQl/FacoPLsKN5Q==",
|
||||
"version": "2.0.2",
|
||||
"resolved": "https://registry.npmjs.org/@rspack/binding-darwin-x64/-/binding-darwin-x64-2.0.2.tgz",
|
||||
"integrity": "sha512-tOwxZpoPlTlRs/w6UyUinXJ4TYRVHMlR7+eQxO1R3muKpixvhXQjtvoaY16HuFyTVky5F0IfOoWr3x9FEsgdLg==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
@@ -219,9 +216,9 @@
|
||||
]
|
||||
},
|
||||
"node_modules/@rspack/binding-linux-arm64-gnu": {
|
||||
"version": "2.0.0-rc.1",
|
||||
"resolved": "https://registry.npmjs.org/@rspack/binding-linux-arm64-gnu/-/binding-linux-arm64-gnu-2.0.0-rc.1.tgz",
|
||||
"integrity": "sha512-j6WsHEwGSdUoiy4BsQBW0RjFl+MBzozdybSYhkiyVSoHlbm7CPt3XaaS3elH5YcwuLHORmVHPP91QhwWl9UFJg==",
|
||||
"version": "2.0.2",
|
||||
"resolved": "https://registry.npmjs.org/@rspack/binding-linux-arm64-gnu/-/binding-linux-arm64-gnu-2.0.2.tgz",
|
||||
"integrity": "sha512-1ZD4YFhG1rmgqj+W8hfwHyKV8xDxGsc/3KgU0FwmiVEX7JfzhCkgBO/xlCG79kRKSrzuVzt4icO/G3cCKn0pag==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
@@ -236,9 +233,9 @@
|
||||
]
|
||||
},
|
||||
"node_modules/@rspack/binding-linux-arm64-musl": {
|
||||
"version": "2.0.0-rc.1",
|
||||
"resolved": "https://registry.npmjs.org/@rspack/binding-linux-arm64-musl/-/binding-linux-arm64-musl-2.0.0-rc.1.tgz",
|
||||
"integrity": "sha512-MPoZE0aS8oH+Wr0R5tIYch8gbUwYYf4LsiGdP6enMKMTrmpJyOVGlhPHVSwsrFgBg7fjTGOuxHuibtsvDUdLOQ==",
|
||||
"version": "2.0.2",
|
||||
"resolved": "https://registry.npmjs.org/@rspack/binding-linux-arm64-musl/-/binding-linux-arm64-musl-2.0.2.tgz",
|
||||
"integrity": "sha512-/PtTkM/DsDLjeuXTmeJeRfbjCDbcL9jvoVgZrgxYFZ28y2cdLvbChbW9uigOzs5dQEs1CIBQXMTTj7KhdBTuQg==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
@@ -253,9 +250,9 @@
|
||||
]
|
||||
},
|
||||
"node_modules/@rspack/binding-linux-x64-gnu": {
|
||||
"version": "2.0.0-rc.1",
|
||||
"resolved": "https://registry.npmjs.org/@rspack/binding-linux-x64-gnu/-/binding-linux-x64-gnu-2.0.0-rc.1.tgz",
|
||||
"integrity": "sha512-gOlPCwtIg9GsFG/8ZdUyV5SyXDaGq2kmtXmyyFU7RO33MaalltNEBMf2hevRPj9z39eSzxwgJDonMOdx5Fo0Og==",
|
||||
"version": "2.0.2",
|
||||
"resolved": "https://registry.npmjs.org/@rspack/binding-linux-x64-gnu/-/binding-linux-x64-gnu-2.0.2.tgz",
|
||||
"integrity": "sha512-bBjsZxMHRaPo6X9SokApm6ucs+UhXtAJFyJJyuk2BH4XJsLeCU9Dz1vMwioeohFbJUUeTASVPm6/BL+RhSaunw==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
@@ -270,9 +267,9 @@
|
||||
]
|
||||
},
|
||||
"node_modules/@rspack/binding-linux-x64-musl": {
|
||||
"version": "2.0.0-rc.1",
|
||||
"resolved": "https://registry.npmjs.org/@rspack/binding-linux-x64-musl/-/binding-linux-x64-musl-2.0.0-rc.1.tgz",
|
||||
"integrity": "sha512-K6Swk1rfP4z4b6bp84NlikGlUWMOPpIWCtlPr/W0TWgc2C/cd844oHdoIu7WtmOH7y9AwB5UG2bWpgFAVwykCw==",
|
||||
"version": "2.0.2",
|
||||
"resolved": "https://registry.npmjs.org/@rspack/binding-linux-x64-musl/-/binding-linux-x64-musl-2.0.2.tgz",
|
||||
"integrity": "sha512-HjlpInqzabDNkhVsUJpsHPqa9QYVWBViJoyWNjzXCAW0vKMDvwaphyUvokSinX8FGTlZi/sr5UEaHJo6XtQ35g==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
@@ -287,9 +284,9 @@
|
||||
]
|
||||
},
|
||||
"node_modules/@rspack/binding-wasm32-wasi": {
|
||||
"version": "2.0.0-rc.1",
|
||||
"resolved": "https://registry.npmjs.org/@rspack/binding-wasm32-wasi/-/binding-wasm32-wasi-2.0.0-rc.1.tgz",
|
||||
"integrity": "sha512-aa9oUTqOb1QjwsHVlMr5sV+7mcBI4MLQ/xhFO2CIEcfVnJIPl8XpKUbDEgqMwcFlzcgzKmHg5cVmIvd82BLgow==",
|
||||
"version": "2.0.2",
|
||||
"resolved": "https://registry.npmjs.org/@rspack/binding-wasm32-wasi/-/binding-wasm32-wasi-2.0.2.tgz",
|
||||
"integrity": "sha512-YaRYNFLJRpkGfYjSWR7n9f+nQKtrlmrrffpAn/blc2geHcRvXoBc5SCs1idPtsLhj7H9qWWhs7ucjyHy4csWFg==",
|
||||
"cpu": [
|
||||
"wasm32"
|
||||
],
|
||||
@@ -297,13 +294,15 @@
|
||||
"license": "MIT",
|
||||
"optional": true,
|
||||
"dependencies": {
|
||||
"@napi-rs/wasm-runtime": "1.1.2"
|
||||
"@emnapi/core": "1.10.0",
|
||||
"@emnapi/runtime": "1.10.0",
|
||||
"@napi-rs/wasm-runtime": "1.1.4"
|
||||
}
|
||||
},
|
||||
"node_modules/@rspack/binding-win32-arm64-msvc": {
|
||||
"version": "2.0.0-rc.1",
|
||||
"resolved": "https://registry.npmjs.org/@rspack/binding-win32-arm64-msvc/-/binding-win32-arm64-msvc-2.0.0-rc.1.tgz",
|
||||
"integrity": "sha512-+UxF0c7E9bE3siFbMHi+mmoeQJzcTKl1j3x+Y6MY/PJ3V70cU23wOaxMvmSsCyq2JNJBT2RCNZ9HaL+o3kReug==",
|
||||
"version": "2.0.2",
|
||||
"resolved": "https://registry.npmjs.org/@rspack/binding-win32-arm64-msvc/-/binding-win32-arm64-msvc-2.0.2.tgz",
|
||||
"integrity": "sha512-d/3kTEKq+asLjRFPO96t+wfWiM7DLN76VQEPDD9bc1kdsZXlVJBuvyXfsgK8bbEvKplWXYcSsokhmEnuXrLOpg==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
@@ -315,9 +314,9 @@
|
||||
]
|
||||
},
|
||||
"node_modules/@rspack/binding-win32-ia32-msvc": {
|
||||
"version": "2.0.0-rc.1",
|
||||
"resolved": "https://registry.npmjs.org/@rspack/binding-win32-ia32-msvc/-/binding-win32-ia32-msvc-2.0.0-rc.1.tgz",
|
||||
"integrity": "sha512-gc0JdkdxSWo+o/b1qTCT6mZ3DrlGe32eW+Ps3xInxcG4UHjUG7hTDgFtOgVQ6VhQ8WMUXG+TQOz0CySVpYjsoQ==",
|
||||
"version": "2.0.2",
|
||||
"resolved": "https://registry.npmjs.org/@rspack/binding-win32-ia32-msvc/-/binding-win32-ia32-msvc-2.0.2.tgz",
|
||||
"integrity": "sha512-161cWineq3RW+Jdm1FAfSpXeUtYWvhB3kAbm46vNT9h/YYz+spwsFMvveAZ1nsVSVL0IC5lDBGUte7yUAY8K2g==",
|
||||
"cpu": [
|
||||
"ia32"
|
||||
],
|
||||
@@ -329,9 +328,9 @@
|
||||
]
|
||||
},
|
||||
"node_modules/@rspack/binding-win32-x64-msvc": {
|
||||
"version": "2.0.0-rc.1",
|
||||
"resolved": "https://registry.npmjs.org/@rspack/binding-win32-x64-msvc/-/binding-win32-x64-msvc-2.0.0-rc.1.tgz",
|
||||
"integrity": "sha512-Dnj0jthyVUikf65MGEyZy3akshtSmR1xsp/Xr0h/NWTo5JFWHKAFNYFE+jFfY0uzC8e4IDcLQLYoFomqV1DsEg==",
|
||||
"version": "2.0.2",
|
||||
"resolved": "https://registry.npmjs.org/@rspack/binding-win32-x64-msvc/-/binding-win32-x64-msvc-2.0.2.tgz",
|
||||
"integrity": "sha512-y7Q0S1FE+OlkL5GMqLG0PwxrPw6E1r892KhGrGKE1Vdufe5YTEx6xTPxzZ+b7N2KPD7s9G1/iJmWHQxb1+Bjkg==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
@@ -343,13 +342,13 @@
|
||||
]
|
||||
},
|
||||
"node_modules/@rspack/core": {
|
||||
"version": "2.0.0-rc.1",
|
||||
"resolved": "https://registry.npmjs.org/@rspack/core/-/core-2.0.0-rc.1.tgz",
|
||||
"integrity": "sha512-OIfkYn05/IWtVIdZ8Y/a0y/k4ipzqfApxIZqnJM59G/bGwQKMBrLHpOMGgV2Wmq1j9UMXzF7ZtsFMUbYBhFb9A==",
|
||||
"version": "2.0.2",
|
||||
"resolved": "https://registry.npmjs.org/@rspack/core/-/core-2.0.2.tgz",
|
||||
"integrity": "sha512-VM3UHOo26uC+4QSqY5tU1ybI7KuXY5rTof8nhFOaBY9SYau0Smvr+hMSAPmrmHwknB6dXT8yaNVxrj7I+qxE1Q==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@rspack/binding": "2.0.0-rc.1"
|
||||
"@rspack/binding": "2.0.2"
|
||||
},
|
||||
"engines": {
|
||||
"node": "^20.19.0 || >=22.12.0"
|
||||
@@ -368,39 +367,36 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@rspack/plugin-react-refresh": {
|
||||
"version": "1.6.2",
|
||||
"resolved": "https://registry.npmjs.org/@rspack/plugin-react-refresh/-/plugin-react-refresh-1.6.2.tgz",
|
||||
"integrity": "sha512-k+/VrfTNgo+KirjI6V+8CWRj6y+DH9jOUWv8JorYY4vKf/9xfnZ8xHzuB4iqCwTtoZl9YnxOaOuoyjJipc2tiQ==",
|
||||
"version": "2.0.0",
|
||||
"resolved": "https://registry.npmjs.org/@rspack/plugin-react-refresh/-/plugin-react-refresh-2.0.0.tgz",
|
||||
"integrity": "sha512-Cf6CxBStNDJbiXMc/GmsvG1G8PRlUpa0MSfWsMTI+e8npzuTN/p8nwLs3shriBZOLciqgkSZpBtPTd10BLpj1g==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"error-stack-parser": "^2.1.4"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"react-refresh": ">=0.10.0 <1.0.0",
|
||||
"webpack-hot-middleware": "2.x"
|
||||
"@rspack/core": "^2.0.0-0",
|
||||
"react-refresh": ">=0.10.0 <1.0.0"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"webpack-hot-middleware": {
|
||||
"@rspack/core": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@rspress/core": {
|
||||
"version": "2.0.9",
|
||||
"resolved": "https://registry.npmjs.org/@rspress/core/-/core-2.0.9.tgz",
|
||||
"integrity": "sha512-cfbqqbWtdimrWIsfeyPnQOTKwJpdNLr8VnwLIL4JYC2ZcRq+xcInpszLXVpV86nONL6qI19usr2Or7uzZJ+ynA==",
|
||||
"version": "2.0.11",
|
||||
"resolved": "https://registry.npmjs.org/@rspress/core/-/core-2.0.11.tgz",
|
||||
"integrity": "sha512-4YBOFmSMFv5GWrCa80qSIW8VxqZQQS/PknVq2r7Hb7kgfB38Fzciopn3hjb3hNwI4TTRbsi/Jev2HyRWD4bYAQ==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@mdx-js/mdx": "^3.1.1",
|
||||
"@mdx-js/react": "^3.1.1",
|
||||
"@rsbuild/core": "2.0.0-rc.1",
|
||||
"@rsbuild/plugin-react": "~1.4.6",
|
||||
"@rspress/shared": "2.0.9",
|
||||
"@rsbuild/core": "^2.0.5",
|
||||
"@rsbuild/plugin-react": "~2.0.0",
|
||||
"@rspress/shared": "2.0.11",
|
||||
"@shikijs/rehype": "^4.0.2",
|
||||
"@types/unist": "^3.0.3",
|
||||
"@unhead/react": "^2.1.13",
|
||||
"@unhead/react": "^2.1.15",
|
||||
"body-scroll-lock": "4.0.0-beta.0",
|
||||
"clsx": "2.1.1",
|
||||
"copy-to-clipboard": "^3.3.3",
|
||||
@@ -411,12 +407,12 @@
|
||||
"mdast-util-mdxjs-esm": "^2.0.1",
|
||||
"medium-zoom": "1.1.0",
|
||||
"nprogress": "^0.2.0",
|
||||
"react": "^19.2.4",
|
||||
"react-dom": "^19.2.4",
|
||||
"react": "^19.2.6",
|
||||
"react-dom": "^19.2.6",
|
||||
"react-lazy-with-preload": "^2.2.1",
|
||||
"react-reconciler": "0.33.0",
|
||||
"react-render-to-markdown": "19.0.1",
|
||||
"react-router-dom": "^7.13.2",
|
||||
"react-router-dom": "^7.15.0",
|
||||
"rehype-external-links": "^3.0.0",
|
||||
"rehype-raw": "^7.0.0",
|
||||
"remark-cjk-friendly": "^2.0.1",
|
||||
@@ -440,39 +436,39 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@rspress/plugin-client-redirects": {
|
||||
"version": "2.0.9",
|
||||
"resolved": "https://registry.npmjs.org/@rspress/plugin-client-redirects/-/plugin-client-redirects-2.0.9.tgz",
|
||||
"integrity": "sha512-r2GyHzOSt8CeS4UIsy/cPM5Zotekt1JVQFmgOYGapvll5ktUlVcd77HLtXDbZjtpgtj0XlaMLrXueOpV2gsBoQ==",
|
||||
"version": "2.0.11",
|
||||
"resolved": "https://registry.npmjs.org/@rspress/plugin-client-redirects/-/plugin-client-redirects-2.0.11.tgz",
|
||||
"integrity": "sha512-DI9vod5mGccg57c19CuFpN3mGP1FEEueOUnEUz1UHXSyXg9YTj+ox7Xla4jUUzAzoPVGiWSSsfbtCTwdoxAsbg==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": "^20.19.0 || >=22.12.0"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@rspress/core": "^2.0.9"
|
||||
"@rspress/core": "^2.0.10"
|
||||
}
|
||||
},
|
||||
"node_modules/@rspress/plugin-sitemap": {
|
||||
"version": "2.0.9",
|
||||
"resolved": "https://registry.npmjs.org/@rspress/plugin-sitemap/-/plugin-sitemap-2.0.9.tgz",
|
||||
"integrity": "sha512-GTuXuySaeaazUZoUxdk2vZ8p0ehIgulPjCP9C7gDg6lIh5JGpUbcjG4def4tWHsxUoKp2rIwu/93bHwKb8T0Mw==",
|
||||
"version": "2.0.11",
|
||||
"resolved": "https://registry.npmjs.org/@rspress/plugin-sitemap/-/plugin-sitemap-2.0.11.tgz",
|
||||
"integrity": "sha512-046LCHgbJXdaPipWB2SWMjZcAtIrOjXGZOD92xlTjhZ74D7Mk1Nod1MQdtOEoISWedcHdgpUVXMDbB1doKBpPQ==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": "^20.19.0 || >=22.12.0"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@rspress/core": "^2.0.9"
|
||||
"@rspress/core": "^2.0.10"
|
||||
}
|
||||
},
|
||||
"node_modules/@rspress/shared": {
|
||||
"version": "2.0.9",
|
||||
"resolved": "https://registry.npmjs.org/@rspress/shared/-/shared-2.0.9.tgz",
|
||||
"integrity": "sha512-G48n3pC7AVAR58pLqwClUCYj5Nt7ZgYEStR8VTBGFuPgXtzb3+KPfo/gz0hb6wxdKJ1cL5ohPsZ6EXqllu6lew==",
|
||||
"version": "2.0.11",
|
||||
"resolved": "https://registry.npmjs.org/@rspress/shared/-/shared-2.0.11.tgz",
|
||||
"integrity": "sha512-7l5Pso4s597utJyisVEnd7n/40h053nfE8DwGQMeS8RLGtSwVgxFwNHsSrvQEGtFlLrg2aWWSITqnAVO1wfTew==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@rsbuild/core": "2.0.0-rc.1",
|
||||
"@rsbuild/core": "^2.0.5",
|
||||
"@shikijs/rehype": "^4.0.2",
|
||||
"unified": "^11.0.5"
|
||||
}
|
||||
@@ -614,9 +610,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@tybys/wasm-util": {
|
||||
"version": "0.10.1",
|
||||
"resolved": "https://registry.npmjs.org/@tybys/wasm-util/-/wasm-util-0.10.1.tgz",
|
||||
"integrity": "sha512-9tTaPJLSiejZKx+Bmog4uSubteqTvFrVrURwkmHixBo0G4seD0zUxp98E1DzUBJxLQ3NPwXrGKDiVjwx/DpPsg==",
|
||||
"version": "0.10.2",
|
||||
"resolved": "https://registry.npmjs.org/@tybys/wasm-util/-/wasm-util-0.10.2.tgz",
|
||||
"integrity": "sha512-RoBvJ2X0wuKlWFIjrwffGw1IqZHKQqzIchKaadZZfnNpsAYp2mM0h36JtPCjNDAHGgYez/15uMBpfGwchhiMgg==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"optional": true,
|
||||
@@ -711,13 +707,13 @@
|
||||
"license": "ISC"
|
||||
},
|
||||
"node_modules/@unhead/react": {
|
||||
"version": "2.1.13",
|
||||
"resolved": "https://registry.npmjs.org/@unhead/react/-/react-2.1.13.tgz",
|
||||
"integrity": "sha512-gC48tNJ0UtbithkiKCc2WUlxbVVk5o171EtruS2w2hQUblfYFHzCPu2hljjT1e0tUHXXqN8EMv7mpxHddMB2sg==",
|
||||
"version": "2.1.15",
|
||||
"resolved": "https://registry.npmjs.org/@unhead/react/-/react-2.1.15.tgz",
|
||||
"integrity": "sha512-5hfAaZ3XJq9JkspRzZdSPsMrXXA8v/SKiEOxZcN9L40o44byF/50bcQuOLgSSCAx8802mI5VG32KZXWTtsLu9Q==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"unhead": "2.1.13"
|
||||
"unhead": "2.1.15"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/harlan-zw"
|
||||
@@ -972,16 +968,6 @@
|
||||
"url": "https://github.com/fb55/entities?sponsor=1"
|
||||
}
|
||||
},
|
||||
"node_modules/error-stack-parser": {
|
||||
"version": "2.1.4",
|
||||
"resolved": "https://registry.npmjs.org/error-stack-parser/-/error-stack-parser-2.1.4.tgz",
|
||||
"integrity": "sha512-Sk5V6wVazPhq5MhpO+AUxJn5x7XSXGl1R93Vn7i+zS15KDVxQijejNCrz8340/2bgLBjR9GtEG8ZVKONDjcqGQ==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"stackframe": "^1.3.4"
|
||||
}
|
||||
},
|
||||
"node_modules/esast-util-from-estree": {
|
||||
"version": "2.0.0",
|
||||
"resolved": "https://registry.npmjs.org/esast-util-from-estree/-/esast-util-from-estree-2.0.0.tgz",
|
||||
@@ -1413,9 +1399,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/hookable": {
|
||||
"version": "6.1.0",
|
||||
"resolved": "https://registry.npmjs.org/hookable/-/hookable-6.1.0.tgz",
|
||||
"integrity": "sha512-ZoKZSJgu8voGK2geJS+6YtYjvIzu9AOM/KZXsBxr83uhLL++e9pEv/dlgwgy3dvHg06kTz6JOh1hk3C8Ceiymw==",
|
||||
"version": "6.1.1",
|
||||
"resolved": "https://registry.npmjs.org/hookable/-/hookable-6.1.1.tgz",
|
||||
"integrity": "sha512-U9LYDy1CwhMCnprUfeAZWZGByVbhd54hwepegYTK7Pi5NvqEj63ifz5z+xukznehT7i6NIZRu89Ay1AZmRsLEQ==",
|
||||
"dev": true,
|
||||
"license": "MIT"
|
||||
},
|
||||
@@ -2697,20 +2683,20 @@
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/oniguruma-parser": {
|
||||
"version": "0.12.1",
|
||||
"resolved": "https://registry.npmjs.org/oniguruma-parser/-/oniguruma-parser-0.12.1.tgz",
|
||||
"integrity": "sha512-8Unqkvk1RYc6yq2WBYRj4hdnsAxVze8i7iPfQr8e4uSP3tRv0rpZcbGUDvxfQQcdwHt/e9PrMvGCsa8OqG9X3w==",
|
||||
"version": "0.12.2",
|
||||
"resolved": "https://registry.npmjs.org/oniguruma-parser/-/oniguruma-parser-0.12.2.tgz",
|
||||
"integrity": "sha512-6HVa5oIrgMC6aA6WF6XyyqbhRPJrKR02L20+2+zpDtO5QAzGHAUGw5TKQvwi5vctNnRHkJYmjAhRVQF2EKdTQw==",
|
||||
"dev": true,
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/oniguruma-to-es": {
|
||||
"version": "4.3.5",
|
||||
"resolved": "https://registry.npmjs.org/oniguruma-to-es/-/oniguruma-to-es-4.3.5.tgz",
|
||||
"integrity": "sha512-Zjygswjpsewa0NLTsiizVuMQZbp0MDyM6lIt66OxsF21npUDlzpHi1Mgb/qhQdkb+dWFTzJmFbEWdvZgRho8eQ==",
|
||||
"version": "4.3.6",
|
||||
"resolved": "https://registry.npmjs.org/oniguruma-to-es/-/oniguruma-to-es-4.3.6.tgz",
|
||||
"integrity": "sha512-csuQ9x3Yr0cEIs/Zgx/OEt9iBw9vqIunAPQkx19R/fiMq2oGVTgcMqO/V3Ybqefr1TBvosI6jU539ksaBULJyA==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"oniguruma-parser": "^0.12.1",
|
||||
"oniguruma-parser": "^0.12.2",
|
||||
"regex": "^6.1.0",
|
||||
"regex-recursion": "^6.0.2"
|
||||
}
|
||||
@@ -2767,9 +2753,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/react": {
|
||||
"version": "19.2.5",
|
||||
"resolved": "https://registry.npmjs.org/react/-/react-19.2.5.tgz",
|
||||
"integrity": "sha512-llUJLzz1zTUBrskt2pwZgLq59AemifIftw4aB7JxOqf1HY2FDaGDxgwpAPVzHU1kdWabH7FauP4i1oEeer2WCA==",
|
||||
"version": "19.2.6",
|
||||
"resolved": "https://registry.npmjs.org/react/-/react-19.2.6.tgz",
|
||||
"integrity": "sha512-sfWGGfavi0xr8Pg0sVsyHMAOziVYKgPLNrS7ig+ivMNb3wbCBw3KxtflsGBAwD3gYQlE/AEZsTLgToRrSCjb0Q==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
@@ -2777,16 +2763,16 @@
|
||||
}
|
||||
},
|
||||
"node_modules/react-dom": {
|
||||
"version": "19.2.5",
|
||||
"resolved": "https://registry.npmjs.org/react-dom/-/react-dom-19.2.5.tgz",
|
||||
"integrity": "sha512-J5bAZz+DXMMwW/wV3xzKke59Af6CHY7G4uYLN1OvBcKEsWOs4pQExj86BBKamxl/Ik5bx9whOrvBlSDfWzgSag==",
|
||||
"version": "19.2.6",
|
||||
"resolved": "https://registry.npmjs.org/react-dom/-/react-dom-19.2.6.tgz",
|
||||
"integrity": "sha512-0prMI+hvBbPjsWnxDLxlCGyM8PN6UuWjEUCYmZhO67xIV9Xasa/r/vDnq+Xyq4Lo27g8QSbO5YzARu0D1Sps3g==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"scheduler": "^0.27.0"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"react": "^19.2.5"
|
||||
"react": "^19.2.6"
|
||||
}
|
||||
},
|
||||
"node_modules/react-lazy-with-preload": {
|
||||
@@ -2836,9 +2822,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/react-router": {
|
||||
"version": "7.14.0",
|
||||
"resolved": "https://registry.npmjs.org/react-router/-/react-router-7.14.0.tgz",
|
||||
"integrity": "sha512-m/xR9N4LQLmAS0ZhkY2nkPA1N7gQ5TUVa5n8TgANuDTARbn1gt+zLPXEm7W0XDTbrQ2AJSJKhoa6yx1D8BcpxQ==",
|
||||
"version": "7.15.0",
|
||||
"resolved": "https://registry.npmjs.org/react-router/-/react-router-7.15.0.tgz",
|
||||
"integrity": "sha512-HW9vYwuM8f4yx66Izy8xfrzCM+SBJluoZcCbww9A1TySax11S5Vgw6fi3ZjMONw9J4gQwngL7PzkyIpJJpJ7RQ==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
@@ -2859,13 +2845,13 @@
|
||||
}
|
||||
},
|
||||
"node_modules/react-router-dom": {
|
||||
"version": "7.14.0",
|
||||
"resolved": "https://registry.npmjs.org/react-router-dom/-/react-router-dom-7.14.0.tgz",
|
||||
"integrity": "sha512-2G3ajSVSZMEtmTjIklRWlNvo8wICEpLihfD/0YMDxbWK2UyP5EGfnoIn9AIQGnF3G/FX0MRbHXdFcD+rL1ZreQ==",
|
||||
"version": "7.15.0",
|
||||
"resolved": "https://registry.npmjs.org/react-router-dom/-/react-router-dom-7.15.0.tgz",
|
||||
"integrity": "sha512-VcrVg64Fo8nwBvDscajG8gRTLIuTC6N50nb22l2HOOV4PTOHgoGp8mUjy9wLiHYoYTSYI36tUnXZgasSRFZorQ==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"react-router": "7.14.0"
|
||||
"react-router": "7.15.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=20.0.0"
|
||||
@@ -3218,13 +3204,6 @@
|
||||
"url": "https://github.com/sponsors/wooorm"
|
||||
}
|
||||
},
|
||||
"node_modules/stackframe": {
|
||||
"version": "1.3.4",
|
||||
"resolved": "https://registry.npmjs.org/stackframe/-/stackframe-1.3.4.tgz",
|
||||
"integrity": "sha512-oeVtt7eWQS+Na6F//S4kJ2K2VbRlS9D43mAlMyVpVWovy9o+jfgH8O9agzANzaiLjclA0oYzUXEM4PurhSUChw==",
|
||||
"dev": true,
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/stringify-entities": {
|
||||
"version": "4.0.4",
|
||||
"resolved": "https://registry.npmjs.org/stringify-entities/-/stringify-entities-4.0.4.tgz",
|
||||
@@ -3311,9 +3290,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/unhead": {
|
||||
"version": "2.1.13",
|
||||
"resolved": "https://registry.npmjs.org/unhead/-/unhead-2.1.13.tgz",
|
||||
"integrity": "sha512-jO9M1sI6b2h/1KpIu4Jeu+ptumLmUKboRRLxys5pYHFeT+lqTzfNHbYUX9bxVDhC1FBszAGuWcUVlmvIPsah8Q==",
|
||||
"version": "2.1.15",
|
||||
"resolved": "https://registry.npmjs.org/unhead/-/unhead-2.1.15.tgz",
|
||||
"integrity": "sha512-MCt5T90mCWyr3Z6pUCdM9lVRXoMoVBlL7z7U4CYVIiaDiuzad/UCfLuMqz5MeNmpZUgoBCQnrucJimU7EZR+XA==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
|
||||
@@ -81,9 +81,11 @@ conduwuit-macros.workspace = true
|
||||
conduwuit-service.workspace = true
|
||||
const-str.workspace = true
|
||||
ctor.workspace = true
|
||||
dtor.workspace = true
|
||||
futures.workspace = true
|
||||
lettre.workspace = true
|
||||
log.workspace = true
|
||||
assign.workspace = true
|
||||
ruma.workspace = true
|
||||
serde_json.workspace = true
|
||||
serde-saphyr.workspace = true
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
#[implement(Context, params = "<'_>")]
|
||||
pub(super) async fn check_all_users(&self) -> Result {
|
||||
let timer = tokio::time::Instant::now();
|
||||
let users = self.services.users.iter().collect::<Vec<_>>().await;
|
||||
let users = self.services.users.stream().collect::<Vec<_>>().await;
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
let total = users.len();
|
||||
|
||||
+37
-24
@@ -79,12 +79,14 @@ pub(super) async fn parse_pdu(&self) -> Result {
|
||||
}
|
||||
|
||||
let string = self.body[1..self.body.len().saturating_sub(1)].join("\n");
|
||||
let room_version_rules = RoomVersionId::V12.rules().unwrap();
|
||||
|
||||
match serde_json::from_str(&string) {
|
||||
| Err(e) => return Err!("Invalid json in command body: {e}"),
|
||||
| Ok(value) => match ruma::signatures::reference_hash(&value, &RoomVersionId::V6) {
|
||||
| Ok(value) => match ruma::signatures::reference_hash(&value, &room_version_rules) {
|
||||
| Err(e) => return Err!("Could not parse PDU JSON: {e:?}"),
|
||||
| Ok(hash) => {
|
||||
let event_id = OwnedEventId::parse(format!("${hash}"));
|
||||
let event_id = EventId::parse(format!("${hash}"));
|
||||
match serde_json::from_value::<PduEvent>(serde_json::to_value(value)?) {
|
||||
| Err(e) => return Err!("EventId: {event_id:?}\nCould not parse event: {e}"),
|
||||
| Ok(pdu) => write!(self, "EventId: {event_id:?}\n{pdu:#?}"),
|
||||
@@ -119,7 +121,7 @@ pub(super) async fn get_pdu(&self, event_id: OwnedEventId) -> Result {
|
||||
} else {
|
||||
"PDU found in our database"
|
||||
};
|
||||
write!(self, "{msg}\n```json\n{text}\n```",)
|
||||
write!(self, "{msg}\n```json\n{text}\n```")
|
||||
},
|
||||
}
|
||||
.await
|
||||
@@ -187,10 +189,7 @@ pub(super) async fn get_remote_pdu_list(&self, server: OwnedServerName, force: b
|
||||
|
||||
for event_id in list {
|
||||
if force {
|
||||
match self
|
||||
.get_remote_pdu(event_id.to_owned(), server.clone())
|
||||
.await
|
||||
{
|
||||
match self.get_remote_pdu(event_id.clone(), server.clone()).await {
|
||||
| Err(e) => {
|
||||
failed_count = failed_count.saturating_add(1);
|
||||
self.services
|
||||
@@ -205,7 +204,7 @@ pub(super) async fn get_remote_pdu_list(&self, server: OwnedServerName, force: b
|
||||
},
|
||||
}
|
||||
} else {
|
||||
self.get_remote_pdu(event_id.to_owned(), server.clone())
|
||||
self.get_remote_pdu(event_id.clone(), server.clone())
|
||||
.await?;
|
||||
success_count = success_count.saturating_add(1);
|
||||
}
|
||||
@@ -237,10 +236,10 @@ pub(super) async fn get_remote_pdu(
|
||||
match self
|
||||
.services
|
||||
.sending
|
||||
.send_federation_request(&server, ruma::api::federation::event::get_event::v1::Request {
|
||||
event_id: event_id.clone(),
|
||||
include_unredacted_content: None,
|
||||
})
|
||||
.send_federation_request(
|
||||
&server,
|
||||
ruma::api::federation::event::get_event::v1::Request::new(event_id.clone()),
|
||||
)
|
||||
.await
|
||||
{
|
||||
| Err(e) => {
|
||||
@@ -330,9 +329,9 @@ pub(super) async fn ping(&self, server: OwnedServerName) -> Result {
|
||||
match self
|
||||
.services
|
||||
.sending
|
||||
.send_federation_request(
|
||||
.send_unauthenticated_request(
|
||||
&server,
|
||||
ruma::api::federation::discovery::get_server_version::v1::Request {},
|
||||
ruma::api::federation::discovery::get_server_version::v1::Request::new(),
|
||||
)
|
||||
.await
|
||||
{
|
||||
@@ -361,7 +360,7 @@ pub(super) async fn force_device_list_updates(&self) -> Result {
|
||||
self.services
|
||||
.users
|
||||
.stream()
|
||||
.for_each(|user_id| self.services.users.mark_device_key_update(user_id))
|
||||
.for_each(async |user_id| self.services.users.mark_device_key_update(&user_id).await)
|
||||
.await;
|
||||
|
||||
write!(self, "Marked all devices for all users as having new keys to update").await
|
||||
@@ -430,9 +429,16 @@ pub(super) async fn verify_json(&self) -> Result {
|
||||
}
|
||||
|
||||
let string = self.body[1..self.body.len().checked_sub(1).unwrap()].join("\n");
|
||||
let room_version_rules = RoomVersionId::V12.rules().unwrap();
|
||||
|
||||
match serde_json::from_str::<CanonicalJsonObject>(&string) {
|
||||
| Err(e) => return Err!("Invalid json: {e}"),
|
||||
| Ok(value) => match self.services.server_keys.verify_json(&value, None).await {
|
||||
| Ok(value) => match self
|
||||
.services
|
||||
.server_keys
|
||||
.verify_json(&value, &room_version_rules)
|
||||
.await
|
||||
{
|
||||
| Err(e) => return Err!("Signature verification failed: {e}"),
|
||||
| Ok(()) => write!(self, "Signature correct"),
|
||||
},
|
||||
@@ -445,9 +451,15 @@ pub(super) async fn verify_pdu(&self, event_id: OwnedEventId) -> Result {
|
||||
use ruma::signatures::Verified;
|
||||
|
||||
let mut event = self.services.rooms.timeline.get_pdu_json(&event_id).await?;
|
||||
let room_version_rules = RoomVersionId::V12.rules().unwrap();
|
||||
|
||||
event.remove("event_id");
|
||||
let msg = match self.services.server_keys.verify_event(&event, None).await {
|
||||
let msg = match self
|
||||
.services
|
||||
.server_keys
|
||||
.verify_event(&event, &room_version_rules)
|
||||
.await
|
||||
{
|
||||
| Err(e) => return Err(e),
|
||||
| Ok(Verified::Signatures) => "signatures OK, but content hash failed (redaction).",
|
||||
| Ok(Verified::All) => "signatures and hashes OK.",
|
||||
@@ -544,16 +556,17 @@ pub(super) async fn force_set_room_state_from_server(
|
||||
};
|
||||
|
||||
let room_version = self.services.rooms.state.get_room_version(&room_id).await?;
|
||||
let room_version_rules = room_version.rules().unwrap();
|
||||
|
||||
let mut state: HashMap<u64, OwnedEventId> = HashMap::new();
|
||||
|
||||
let remote_state_response = self
|
||||
.services
|
||||
.sending
|
||||
.send_federation_request(&server_name, get_room_state::v1::Request {
|
||||
room_id: room_id.clone(),
|
||||
event_id: at_event_id,
|
||||
})
|
||||
.send_federation_request(
|
||||
&server_name,
|
||||
get_room_state::v1::Request::new(at_event_id, room_id.clone()),
|
||||
)
|
||||
.await?;
|
||||
|
||||
for pdu in remote_state_response.pdus.clone() {
|
||||
@@ -576,7 +589,7 @@ pub(super) async fn force_set_room_state_from_server(
|
||||
for result in remote_state_response.pdus.iter().map(|pdu| {
|
||||
self.services
|
||||
.server_keys
|
||||
.validate_and_add_event_id(pdu, &room_version)
|
||||
.validate_and_add_event_id(pdu, &room_version_rules)
|
||||
}) {
|
||||
let Ok((event_id, value)) = result.await else {
|
||||
continue;
|
||||
@@ -608,7 +621,7 @@ pub(super) async fn force_set_room_state_from_server(
|
||||
for result in remote_state_response.auth_chain.iter().map(|pdu| {
|
||||
self.services
|
||||
.server_keys
|
||||
.validate_and_add_event_id(pdu, &room_version)
|
||||
.validate_and_add_event_id(pdu, &room_version_rules)
|
||||
}) {
|
||||
let Ok((event_id, value)) = result.await else {
|
||||
continue;
|
||||
@@ -625,7 +638,7 @@ pub(super) async fn force_set_room_state_from_server(
|
||||
.services
|
||||
.rooms
|
||||
.event_handler
|
||||
.resolve_state(&room_id, &room_version, state)
|
||||
.resolve_state(&room_id, &room_version_rules, state)
|
||||
.await?;
|
||||
|
||||
info!("Compressing new room state");
|
||||
|
||||
@@ -102,16 +102,12 @@ pub(super) async fn remote_user_in_rooms(&self, user_id: OwnedUserId) -> Result
|
||||
);
|
||||
}
|
||||
|
||||
if !self.services.users.exists(&user_id).await {
|
||||
return Err!("Remote user does not exist in our database.",);
|
||||
}
|
||||
|
||||
let mut rooms: Vec<(OwnedRoomId, u64, String)> = self
|
||||
.services
|
||||
.rooms
|
||||
.state_cache
|
||||
.rooms_joined(&user_id)
|
||||
.then(|room_id| get_room_info(self.services, room_id))
|
||||
.then(async |room_id| get_room_info(self.services, &room_id).await)
|
||||
.collect()
|
||||
.await;
|
||||
|
||||
@@ -129,6 +125,6 @@ pub(super) async fn remote_user_in_rooms(&self, user_id: OwnedUserId) -> Result
|
||||
.collect::<Vec<_>>()
|
||||
.join("\n");
|
||||
|
||||
self.write_str(&format!("Rooms {user_id} shares with us ({num}):\n```\n{body}\n```",))
|
||||
self.write_str(&format!("Rooms {user_id} shares with us ({num}):\n```\n{body}\n```"))
|
||||
.await
|
||||
}
|
||||
|
||||
@@ -6,7 +6,8 @@
|
||||
warn,
|
||||
};
|
||||
use conduwuit_service::media::Dim;
|
||||
use ruma::{Mxc, OwnedEventId, OwnedMxcUri, OwnedServerName};
|
||||
use ruma::{OwnedEventId, OwnedMxcUri, OwnedServerName};
|
||||
use service::media::mxc::Mxc;
|
||||
|
||||
use crate::{admin_command, utils::parse_local_user_id};
|
||||
|
||||
@@ -261,7 +262,7 @@ pub(super) async fn delete_past_remote_media(
|
||||
)
|
||||
.await?;
|
||||
|
||||
self.write_str(&format!("Deleted {deleted_count} total files.",))
|
||||
self.write_str(&format!("Deleted {deleted_count} total files."))
|
||||
.await
|
||||
}
|
||||
|
||||
@@ -271,7 +272,7 @@ pub(super) async fn delete_all_from_user(&self, username: String) -> Result {
|
||||
|
||||
let deleted_count = self.services.media.delete_from_user(&user_id).await?;
|
||||
|
||||
self.write_str(&format!("Deleted {deleted_count} total files.",))
|
||||
self.write_str(&format!("Deleted {deleted_count} total files."))
|
||||
.await
|
||||
}
|
||||
|
||||
@@ -330,7 +331,7 @@ pub(super) async fn delete_all_from_server(
|
||||
}
|
||||
}
|
||||
|
||||
self.write_str(&format!("Deleted {deleted_count} total files.",))
|
||||
self.write_str(&format!("Deleted {deleted_count} total files."))
|
||||
.await
|
||||
}
|
||||
|
||||
|
||||
@@ -16,8 +16,8 @@
|
||||
use ruma::{
|
||||
EventId,
|
||||
events::{
|
||||
relation::InReplyTo,
|
||||
room::message::{Relation::Reply, RoomMessageEventContent},
|
||||
relation::{InReplyTo, Reply},
|
||||
room::message::{Relation, RoomMessageEventContent},
|
||||
},
|
||||
};
|
||||
use service::{
|
||||
@@ -38,6 +38,7 @@ pub(super) fn dispatch(services: Arc<Services>, command: CommandInput) -> Proces
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip_all, name = "admin", level = "info")]
|
||||
#[allow(clippy::result_large_err)]
|
||||
async fn handle_command(services: Arc<Services>, command: CommandInput) -> ProcessorResult {
|
||||
AssertUnwindSafe(Box::pin(process_command(services, &command)))
|
||||
.catch_unwind()
|
||||
@@ -277,9 +278,8 @@ fn reply(
|
||||
mut content: RoomMessageEventContent,
|
||||
reply_id: Option<&EventId>,
|
||||
) -> RoomMessageEventContent {
|
||||
content.relates_to = reply_id.map(|event_id| Reply {
|
||||
in_reply_to: InReplyTo { event_id: event_id.to_owned() },
|
||||
});
|
||||
content.relates_to =
|
||||
reply_id.map(|event_id| Relation::Reply(Reply::new(InReplyTo::new(event_id.to_owned()))));
|
||||
|
||||
content
|
||||
}
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
use clap::Subcommand;
|
||||
use conduwuit::Result;
|
||||
use conduwuit_database::Deserialized as _;
|
||||
use futures::StreamExt;
|
||||
use ruma::{OwnedRoomId, OwnedUserId};
|
||||
use ruma::{OwnedRoomId, OwnedUserId, exports::serde::Serialize};
|
||||
|
||||
use crate::{admin_command, admin_command_dispatch};
|
||||
|
||||
@@ -58,13 +59,22 @@ async fn account_data_get(
|
||||
room_id: Option<OwnedRoomId>,
|
||||
) -> Result {
|
||||
let timer = tokio::time::Instant::now();
|
||||
let results = self
|
||||
let result = self
|
||||
.services
|
||||
.account_data
|
||||
.get_raw(room_id.as_deref(), &user_id, &kind)
|
||||
.await;
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"))
|
||||
let json = serde_json::to_string_pretty(&match room_id {
|
||||
| None => result
|
||||
.deserialized::<ruma::serde::Raw<ruma::events::AnyGlobalAccountDataEvent>>()?
|
||||
.serialize(serde_json::value::Serializer)?,
|
||||
| Some(_) => result
|
||||
.deserialized::<ruma::serde::Raw<ruma::events::AnyRoomAccountDataEvent>>()?
|
||||
.serialize(serde_json::value::Serializer)?,
|
||||
})?;
|
||||
|
||||
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{json}\n```"))
|
||||
.await
|
||||
}
|
||||
|
||||
@@ -50,7 +50,7 @@ async fn destinations_cache(&self, server_name: Option<OwnedServerName>) -> Resu
|
||||
|
||||
while let Some((name, CachedDest { dest, host, expire })) = destinations.next().await {
|
||||
if let Some(server_name) = server_name.as_ref() {
|
||||
if name != server_name {
|
||||
if name != *server_name {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
@@ -76,7 +76,7 @@ async fn overrides_cache(&self, server_name: Option<String>) -> Result {
|
||||
overrides.next().await
|
||||
{
|
||||
if let Some(server_name) = server_name.as_ref() {
|
||||
if name != server_name {
|
||||
if name != *server_name {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -41,7 +41,6 @@ pub(super) async fn process(subcommand: RoomAliasCommand, context: &Context<'_>)
|
||||
.rooms
|
||||
.alias
|
||||
.local_aliases_for_room(&room_id)
|
||||
.map(ToOwned::to_owned)
|
||||
.collect()
|
||||
.await;
|
||||
let query_time = timer.elapsed();
|
||||
@@ -54,7 +53,7 @@ pub(super) async fn process(subcommand: RoomAliasCommand, context: &Context<'_>)
|
||||
.rooms
|
||||
.alias
|
||||
.all_local_aliases()
|
||||
.map(|(room_id, alias)| (room_id.to_owned(), alias.to_owned()))
|
||||
.map(|(room_id, alias)| (room_id, alias.to_owned()))
|
||||
.collect::<Vec<_>>()
|
||||
.await;
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
@@ -101,7 +101,6 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Context
|
||||
.rooms
|
||||
.state_cache
|
||||
.room_servers(&room_id)
|
||||
.map(ToOwned::to_owned)
|
||||
.collect()
|
||||
.await;
|
||||
let query_time = timer.elapsed();
|
||||
@@ -118,7 +117,6 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Context
|
||||
.rooms
|
||||
.state_cache
|
||||
.server_rooms(&server)
|
||||
.map(ToOwned::to_owned)
|
||||
.collect()
|
||||
.await;
|
||||
let query_time = timer.elapsed();
|
||||
@@ -135,7 +133,6 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Context
|
||||
.rooms
|
||||
.state_cache
|
||||
.room_members(&room_id)
|
||||
.map(ToOwned::to_owned)
|
||||
.collect()
|
||||
.await;
|
||||
let query_time = timer.elapsed();
|
||||
@@ -152,7 +149,6 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Context
|
||||
.rooms
|
||||
.state_cache
|
||||
.local_users_in_room(&room_id)
|
||||
.map(ToOwned::to_owned)
|
||||
.collect()
|
||||
.await;
|
||||
let query_time = timer.elapsed();
|
||||
@@ -169,7 +165,6 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Context
|
||||
.rooms
|
||||
.state_cache
|
||||
.active_local_users_in_room(&room_id)
|
||||
.map(ToOwned::to_owned)
|
||||
.collect()
|
||||
.await;
|
||||
let query_time = timer.elapsed();
|
||||
@@ -212,7 +207,6 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Context
|
||||
.rooms
|
||||
.state_cache
|
||||
.room_useroncejoined(&room_id)
|
||||
.map(ToOwned::to_owned)
|
||||
.collect()
|
||||
.await;
|
||||
let query_time = timer.elapsed();
|
||||
@@ -229,7 +223,6 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Context
|
||||
.rooms
|
||||
.state_cache
|
||||
.room_members_invited(&room_id)
|
||||
.map(ToOwned::to_owned)
|
||||
.collect()
|
||||
.await;
|
||||
let query_time = timer.elapsed();
|
||||
@@ -276,7 +269,6 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Context
|
||||
.rooms
|
||||
.state_cache
|
||||
.rooms_joined(&user_id)
|
||||
.map(ToOwned::to_owned)
|
||||
.collect()
|
||||
.await;
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
@@ -15,10 +15,6 @@ pub enum UsersCommand {
|
||||
|
||||
IterUsers2,
|
||||
|
||||
PasswordHash {
|
||||
user_id: OwnedUserId,
|
||||
},
|
||||
|
||||
ListDevices {
|
||||
user_id: OwnedUserId,
|
||||
},
|
||||
@@ -104,7 +100,6 @@ async fn get_shared_rooms(&self, user_a: OwnedUserId, user_b: OwnedUserId) -> Re
|
||||
.rooms
|
||||
.state_cache
|
||||
.get_shared_rooms(&user_a, &user_b)
|
||||
.map(ToOwned::to_owned)
|
||||
.collect()
|
||||
.await;
|
||||
let query_time = timer.elapsed();
|
||||
@@ -217,8 +212,7 @@ async fn iter_users2(&self) -> Result {
|
||||
let result: Vec<_> = self.services.users.stream().collect().await;
|
||||
let result: Vec<_> = result
|
||||
.into_iter()
|
||||
.map(ruma::UserId::as_bytes)
|
||||
.map(String::from_utf8_lossy)
|
||||
.map(|user_id| String::from_utf8_lossy(user_id.as_bytes()).into_owned())
|
||||
.collect();
|
||||
|
||||
let query_time = timer.elapsed();
|
||||
@@ -237,16 +231,6 @@ async fn count_users(&self) -> Result {
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
async fn password_hash(&self, user_id: OwnedUserId) -> Result {
|
||||
let timer = tokio::time::Instant::now();
|
||||
let result = self.services.users.password_hash(&user_id).await;
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"))
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
async fn list_devices(&self, user_id: OwnedUserId) -> Result {
|
||||
let timer = tokio::time::Instant::now();
|
||||
@@ -254,7 +238,6 @@ async fn list_devices(&self, user_id: OwnedUserId) -> Result {
|
||||
.services
|
||||
.users
|
||||
.all_device_ids(&user_id)
|
||||
.map(ToOwned::to_owned)
|
||||
.collect::<Vec<_>>()
|
||||
.await;
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
use clap::Subcommand;
|
||||
use conduwuit::{Err, Result};
|
||||
use futures::StreamExt;
|
||||
use ruma::{OwnedRoomAliasId, OwnedRoomId};
|
||||
use ruma::{OwnedRoomAliasId, OwnedRoomId, RoomAliasId};
|
||||
|
||||
use crate::Context;
|
||||
|
||||
@@ -52,7 +52,7 @@ pub(super) async fn process(command: RoomAliasCommand, context: &Context<'_>) ->
|
||||
| RoomAliasCommand::Which { ref room_alias_localpart } => {
|
||||
let room_alias_str =
|
||||
format!("#{}:{}", room_alias_localpart, services.globals.server_name());
|
||||
let room_alias = match OwnedRoomAliasId::parse(room_alias_str) {
|
||||
let room_alias = match RoomAliasId::parse(room_alias_str) {
|
||||
| Ok(alias) => alias,
|
||||
| Err(err) => {
|
||||
return Err!("Failed to parse alias: {err}");
|
||||
@@ -139,7 +139,7 @@ pub(super) async fn process(command: RoomAliasCommand, context: &Context<'_>) ->
|
||||
.rooms
|
||||
.alias
|
||||
.all_local_aliases()
|
||||
.map(|(room_id, localpart)| (room_id.into(), localpart.into()))
|
||||
.map(|(room_id, localpart)| (room_id, localpart.into()))
|
||||
.collect::<Vec<(OwnedRoomId, String)>>()
|
||||
.await;
|
||||
|
||||
|
||||
@@ -22,14 +22,14 @@ pub(super) async fn list_rooms(
|
||||
.metadata
|
||||
.iter_ids()
|
||||
.filter_map(|room_id| async move {
|
||||
(!exclude_disabled || !self.services.rooms.metadata.is_disabled(room_id).await)
|
||||
(!exclude_disabled || !self.services.rooms.metadata.is_disabled(&room_id).await)
|
||||
.then_some(room_id)
|
||||
})
|
||||
.filter_map(|room_id| async move {
|
||||
(!exclude_banned || !self.services.rooms.metadata.is_banned(room_id).await)
|
||||
(!exclude_banned || !self.services.rooms.metadata.is_banned(&room_id).await)
|
||||
.then_some(room_id)
|
||||
})
|
||||
.then(|room_id| get_room_info(self.services, room_id))
|
||||
.then(async |room_id| get_room_info(self.services, &room_id).await)
|
||||
.then(|(room_id, total_members, name)| async move {
|
||||
let local_members: Vec<_> = self
|
||||
.services
|
||||
@@ -72,7 +72,7 @@ pub(super) async fn list_rooms(
|
||||
.collect::<Vec<_>>()
|
||||
.join("\n");
|
||||
|
||||
self.write_str(&format!("Rooms ({}):\n```\n{body}\n```", rooms.len(),))
|
||||
self.write_str(&format!("Rooms ({}):\n```\n{body}\n```", rooms.len()))
|
||||
.await
|
||||
}
|
||||
|
||||
|
||||
@@ -43,7 +43,7 @@ pub(super) async fn process(command: RoomDirectoryCommand, context: &Context<'_>
|
||||
.rooms
|
||||
.directory
|
||||
.public_rooms()
|
||||
.then(|room_id| get_room_info(services, room_id))
|
||||
.then(async |room_id| get_room_info(services, &room_id).await)
|
||||
.collect()
|
||||
.await;
|
||||
|
||||
@@ -67,7 +67,7 @@ pub(super) async fn process(command: RoomDirectoryCommand, context: &Context<'_>
|
||||
.join("\n");
|
||||
|
||||
context
|
||||
.write_str(&format!("Rooms (page {page}):\n```\n{body}\n```",))
|
||||
.write_str(&format!("Rooms (page {page}):\n```\n{body}\n```"))
|
||||
.await
|
||||
},
|
||||
}
|
||||
|
||||
@@ -46,7 +46,6 @@ async fn list_joined_members(&self, room_id: OwnedRoomId, local_only: bool) -> R
|
||||
.then(|| self.services.globals.user_is_local(user_id))
|
||||
.unwrap_or(true)
|
||||
})
|
||||
.map(ToOwned::to_owned)
|
||||
.filter_map(|user_id| async move {
|
||||
Some((
|
||||
self.services
|
||||
@@ -67,7 +66,7 @@ async fn list_joined_members(&self, room_id: OwnedRoomId, local_only: bool) -> R
|
||||
.collect::<Vec<_>>()
|
||||
.join("\n");
|
||||
|
||||
self.write_str(&format!("{num} Members in Room \"{room_name}\":\n```\n{body}\n```",))
|
||||
self.write_str(&format!("{num} Members in Room \"{room_name}\":\n```\n{body}\n```"))
|
||||
.await
|
||||
}
|
||||
|
||||
|
||||
@@ -71,7 +71,7 @@ async fn ban_room(&self, room: OwnedRoomOrAliasId) -> Result {
|
||||
|
||||
debug!("Room specified is a room ID, banning room ID");
|
||||
|
||||
room_id.to_owned()
|
||||
room_id.clone()
|
||||
} else if room.is_room_alias_id() {
|
||||
let room_alias = match RoomAliasId::parse(&room) {
|
||||
| Ok(room_alias) => room_alias,
|
||||
@@ -89,7 +89,7 @@ async fn ban_room(&self, room: OwnedRoomOrAliasId) -> Result {
|
||||
locally, if not using get_alias_helper to fetch room ID remotely"
|
||||
);
|
||||
|
||||
match self.services.rooms.alias.resolve_alias(room_alias).await {
|
||||
match self.services.rooms.alias.resolve_alias(&room_alias).await {
|
||||
| Ok((room_id, servers)) => {
|
||||
debug!(
|
||||
%room_id,
|
||||
@@ -116,7 +116,6 @@ async fn ban_room(&self, room: OwnedRoomOrAliasId) -> Result {
|
||||
.rooms
|
||||
.state_cache
|
||||
.room_members(&room_id)
|
||||
.map(ToOwned::to_owned)
|
||||
.ready_filter(|user| self.services.globals.user_is_local(user))
|
||||
.boxed();
|
||||
|
||||
@@ -140,7 +139,6 @@ async fn ban_room(&self, room: OwnedRoomOrAliasId) -> Result {
|
||||
.rooms
|
||||
.alias
|
||||
.local_aliases_for_room(&room_id)
|
||||
.map(ToOwned::to_owned)
|
||||
.for_each(|local_alias| async move {
|
||||
self.services
|
||||
.rooms
|
||||
@@ -205,7 +203,7 @@ async fn ban_list_of_rooms(&self) -> Result {
|
||||
},
|
||||
};
|
||||
|
||||
room_ids.push(room_id.to_owned());
|
||||
room_ids.push(room_id.clone());
|
||||
}
|
||||
|
||||
if room_alias_or_id.is_room_alias_id() {
|
||||
@@ -215,7 +213,7 @@ async fn ban_list_of_rooms(&self) -> Result {
|
||||
.services
|
||||
.rooms
|
||||
.alias
|
||||
.resolve_local_alias(room_alias)
|
||||
.resolve_local_alias(&room_alias)
|
||||
.await
|
||||
{
|
||||
| Ok(room_id) => room_id,
|
||||
@@ -229,7 +227,7 @@ async fn ban_list_of_rooms(&self) -> Result {
|
||||
.services
|
||||
.rooms
|
||||
.alias
|
||||
.resolve_alias(room_alias)
|
||||
.resolve_alias(&room_alias)
|
||||
.await
|
||||
{
|
||||
| Ok((room_id, servers)) => {
|
||||
@@ -284,7 +282,6 @@ async fn ban_list_of_rooms(&self) -> Result {
|
||||
.rooms
|
||||
.state_cache
|
||||
.room_members(&room_id)
|
||||
.map(ToOwned::to_owned)
|
||||
.ready_filter(|user| self.services.globals.user_is_local(user))
|
||||
.boxed();
|
||||
|
||||
@@ -309,7 +306,6 @@ async fn ban_list_of_rooms(&self) -> Result {
|
||||
.rooms
|
||||
.alias
|
||||
.local_aliases_for_room(&room_id)
|
||||
.map(ToOwned::to_owned)
|
||||
.for_each(|local_alias| async move {
|
||||
self.services
|
||||
.rooms
|
||||
@@ -348,9 +344,9 @@ async fn unban_room(&self, room: OwnedRoomOrAliasId) -> Result {
|
||||
};
|
||||
|
||||
debug!("Room specified is a room ID, unbanning room ID");
|
||||
self.services.rooms.metadata.ban_room(room_id, false);
|
||||
self.services.rooms.metadata.ban_room(&room_id, false);
|
||||
|
||||
room_id.to_owned()
|
||||
room_id.clone()
|
||||
} else if room.is_room_alias_id() {
|
||||
let room_alias = match RoomAliasId::parse(&room) {
|
||||
| Ok(room_alias) => room_alias,
|
||||
@@ -372,7 +368,7 @@ async fn unban_room(&self, room: OwnedRoomOrAliasId) -> Result {
|
||||
.services
|
||||
.rooms
|
||||
.alias
|
||||
.resolve_local_alias(room_alias)
|
||||
.resolve_local_alias(&room_alias)
|
||||
.await
|
||||
{
|
||||
| Ok(room_id) => room_id,
|
||||
@@ -382,7 +378,7 @@ async fn unban_room(&self, room: OwnedRoomOrAliasId) -> Result {
|
||||
room ID over federation"
|
||||
);
|
||||
|
||||
match self.services.rooms.alias.resolve_alias(room_alias).await {
|
||||
match self.services.rooms.alias.resolve_alias(&room_alias).await {
|
||||
| Ok((room_id, servers)) => {
|
||||
debug!(
|
||||
%room_id,
|
||||
@@ -453,6 +449,6 @@ async fn list_banned_rooms(&self, no_details: bool) -> Result {
|
||||
.collect::<Vec<_>>()
|
||||
.join("\n");
|
||||
|
||||
self.write_str(&format!("Rooms Banned ({num}):\n```\n{body}\n```",))
|
||||
self.write_str(&format!("Rooms Banned ({num}):\n```\n{body}\n```"))
|
||||
.await
|
||||
}
|
||||
|
||||
@@ -159,8 +159,8 @@ pub(super) async fn list_features(&self) -> Result {
|
||||
let mut enabled_features = conduwuit::info::introspection::ENABLED_FEATURES
|
||||
.lock()
|
||||
.expect("locked")
|
||||
.iter()
|
||||
.flat_map(|(_, f)| f.iter())
|
||||
.values()
|
||||
.flat_map(|f| f.iter())
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
enabled_features.sort_unstable();
|
||||
|
||||
+171
-107
@@ -4,28 +4,26 @@
|
||||
};
|
||||
|
||||
use api::client::{
|
||||
full_user_deactivate, join_room_by_id_helper, leave_room, recreate_push_rules_and_return,
|
||||
remote_leave_room,
|
||||
full_user_deactivate, leave_room, recreate_push_rules_and_return, remote_leave_room,
|
||||
};
|
||||
use conduwuit::{
|
||||
Err, Result, debug_warn, error, info,
|
||||
matrix::{Event, pdu::PduBuilder},
|
||||
matrix::{Event, pdu::PartialPdu},
|
||||
utils::{self, ReadyExt},
|
||||
warn,
|
||||
};
|
||||
use futures::{FutureExt, StreamExt};
|
||||
use lettre::Address;
|
||||
use ruma::{
|
||||
OwnedEventId, OwnedRoomId, OwnedRoomOrAliasId, OwnedServerName, OwnedUserId, UserId,
|
||||
OwnedEventId, OwnedRoomId, OwnedRoomOrAliasId, OwnedServerName, OwnedUserId, ServerName,
|
||||
UserId, assign,
|
||||
events::{
|
||||
RoomAccountDataEventType, StateEventType,
|
||||
room::{
|
||||
power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent},
|
||||
redaction::RoomRedactionEventContent,
|
||||
},
|
||||
RoomAccountDataEventType,
|
||||
room::{power_levels::RoomPowerLevelsEventContent, redaction::RoomRedactionEventContent},
|
||||
tag::{TagEvent, TagEventContent, TagInfo},
|
||||
},
|
||||
};
|
||||
use service::users::HashedPassword;
|
||||
|
||||
use crate::{
|
||||
admin_command, get_room_info,
|
||||
@@ -41,7 +39,7 @@ pub(super) async fn list_users(&self) -> Result {
|
||||
.services
|
||||
.users
|
||||
.list_local_users()
|
||||
.map(ToString::to_string)
|
||||
.map(|id| id.as_str().to_owned())
|
||||
.collect()
|
||||
.await;
|
||||
|
||||
@@ -72,7 +70,7 @@ pub(super) async fn create_user(&self, username: String, password: Option<String
|
||||
// Create user
|
||||
self.services
|
||||
.users
|
||||
.create(&user_id, Some(password.as_str()), None)
|
||||
.create(&user_id, Some(HashedPassword::new(&password)?))
|
||||
.await?;
|
||||
|
||||
// Default to pretty displayname
|
||||
@@ -103,11 +101,12 @@ pub(super) async fn create_user(&self, username: String, password: Option<String
|
||||
ruma::events::GlobalAccountDataEventType::PushRules
|
||||
.to_string()
|
||||
.into(),
|
||||
&serde_json::to_value(ruma::events::push_rules::PushRulesEvent {
|
||||
content: ruma::events::push_rules::PushRulesEventContent {
|
||||
global: ruma::push::Ruleset::server_default(&user_id),
|
||||
},
|
||||
})?,
|
||||
&serde_json::to_value(ruma::events::push_rules::PushRulesEvent::new(
|
||||
ruma::events::push_rules::PushRulesEventContent::new(
|
||||
ruma::push::Ruleset::server_default(&user_id),
|
||||
),
|
||||
))
|
||||
.unwrap(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -135,18 +134,20 @@ pub(super) async fn create_user(&self, username: String, password: Option<String
|
||||
}
|
||||
|
||||
if let Some(room_server_name) = room.server_name() {
|
||||
match join_room_by_id_helper(
|
||||
self.services,
|
||||
&user_id,
|
||||
&room_id,
|
||||
Some("Automatically joining this room upon registration".to_owned()),
|
||||
&[
|
||||
self.services.globals.server_name().to_owned(),
|
||||
room_server_name.to_owned(),
|
||||
],
|
||||
&None,
|
||||
)
|
||||
.await
|
||||
match self
|
||||
.services
|
||||
.rooms
|
||||
.membership
|
||||
.join_room(
|
||||
&user_id,
|
||||
&room_id,
|
||||
Some("Automatically joining this room upon registration".to_owned()),
|
||||
&[
|
||||
self.services.globals.server_name().to_owned(),
|
||||
room_server_name.to_owned(),
|
||||
],
|
||||
)
|
||||
.await
|
||||
{
|
||||
| Ok(_response) => {
|
||||
info!("Automatically joined room {room} for user {user_id}");
|
||||
@@ -275,24 +276,25 @@ pub(super) async fn reset_password(
|
||||
|
||||
let new_password = password.unwrap_or_else(|| utils::random_string(AUTO_GEN_PASSWORD_LENGTH));
|
||||
|
||||
match self
|
||||
.services
|
||||
self.services
|
||||
.users
|
||||
.set_password(&user_id, Some(new_password.as_str()))
|
||||
.await
|
||||
{
|
||||
| Err(e) => return Err!("Couldn't reset the password for user {user_id}: {e}"),
|
||||
| Ok(()) => {
|
||||
write!(self, "Successfully reset the password for user {user_id}: `{new_password}`")
|
||||
},
|
||||
}
|
||||
.set_password(&user_id, Some(HashedPassword::new(&new_password)?));
|
||||
|
||||
self.write_str(&format!(
|
||||
"Successfully reset the password for user {user_id}: `{new_password}`"
|
||||
))
|
||||
.await?;
|
||||
|
||||
if logout {
|
||||
self.services
|
||||
.users
|
||||
.all_device_ids(&user_id)
|
||||
.for_each(|device_id| self.services.users.remove_device(&user_id, device_id))
|
||||
.for_each(async |device_id| {
|
||||
self.services
|
||||
.users
|
||||
.remove_device(&user_id, &device_id)
|
||||
.await;
|
||||
})
|
||||
.await;
|
||||
write!(self, "\nAll existing sessions have been logged out.").await?;
|
||||
}
|
||||
@@ -427,6 +429,82 @@ pub(super) async fn deactivate_all(&self, no_leave_rooms: bool, force: bool) ->
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn list_invited_rooms(&self, user_id: String) -> Result {
|
||||
// Validate user id
|
||||
let user_id = parse_local_user_id(self.services, &user_id)?;
|
||||
|
||||
let mut rooms: Vec<((OwnedRoomId, u64, String), Result<OwnedUserId>)> = self
|
||||
.services
|
||||
.rooms
|
||||
.state_cache
|
||||
.rooms_invited(&user_id)
|
||||
.then(async |(room_id, _)| {
|
||||
let sender = self
|
||||
.services
|
||||
.rooms
|
||||
.state_cache
|
||||
.invite_sender(&user_id, &room_id)
|
||||
.await;
|
||||
(get_room_info(self.services, &room_id).await, sender)
|
||||
})
|
||||
.collect()
|
||||
.await;
|
||||
|
||||
if rooms.is_empty() {
|
||||
return Err!("User is not invited to any rooms.");
|
||||
}
|
||||
|
||||
rooms.sort_by_key(|r| r.0.1);
|
||||
rooms.reverse();
|
||||
|
||||
let body = rooms
|
||||
.iter()
|
||||
.map(|((id, members, name), sender)| match sender {
|
||||
| Ok(user_id) =>
|
||||
format!("{id}\tInviter: {user_id}\tMembers: {members}\tName: {name}"),
|
||||
| Err(_) => format!("{id}\tMembers: {members}\tName: {name}"),
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
.join("\n");
|
||||
|
||||
self.write_str(&format!("Rooms {user_id} is Invited to ({}):\n```\n{body}\n```", rooms.len()))
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn reject_all_invites(&self, user_id: String) -> Result {
|
||||
let user_id = parse_local_user_id(self.services, &user_id)?;
|
||||
|
||||
assert!(
|
||||
self.services.globals.user_is_local(&user_id),
|
||||
"Parsed user_id must be a local user"
|
||||
);
|
||||
|
||||
let fails = self
|
||||
.services
|
||||
.rooms
|
||||
.state_cache
|
||||
.rooms_invited(&user_id)
|
||||
.filter_map(async |(room_id, _)| {
|
||||
match leave_room(self.services, &user_id, &room_id, None).await {
|
||||
| Err(ref e) => {
|
||||
warn!(%user_id, "Failed to leave {room_id}: {e}");
|
||||
Some(())
|
||||
},
|
||||
| Ok(()) => None,
|
||||
}
|
||||
})
|
||||
.count()
|
||||
.await;
|
||||
|
||||
if fails > 0 {
|
||||
return Err!("{fails} invites could not be rejected");
|
||||
}
|
||||
|
||||
self.write_str("Successfully rejected all invites.").await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn list_joined_rooms(&self, user_id: String) -> Result {
|
||||
// Validate user id
|
||||
@@ -437,7 +515,7 @@ pub(super) async fn list_joined_rooms(&self, user_id: String) -> Result {
|
||||
.rooms
|
||||
.state_cache
|
||||
.rooms_joined(&user_id)
|
||||
.then(|room_id| get_room_info(self.services, room_id))
|
||||
.then(async |room_id| get_room_info(self.services, &room_id).await)
|
||||
.collect()
|
||||
.await;
|
||||
|
||||
@@ -454,7 +532,7 @@ pub(super) async fn list_joined_rooms(&self, user_id: String) -> Result {
|
||||
.collect::<Vec<_>>()
|
||||
.join("\n");
|
||||
|
||||
self.write_str(&format!("Rooms {user_id} Joined ({}):\n```\n{body}\n```", rooms.len(),))
|
||||
self.write_str(&format!("Rooms {user_id} Joined ({}):\n```\n{body}\n```", rooms.len()))
|
||||
.await
|
||||
}
|
||||
|
||||
@@ -506,7 +584,7 @@ pub(super) async fn force_join_list_of_local_users(
|
||||
.rooms
|
||||
.state_cache
|
||||
.room_members(&room_id)
|
||||
.ready_any(|user_id| server_admins.contains(&user_id.to_owned()))
|
||||
.ready_any(|user_id| server_admins.contains(&user_id))
|
||||
.await
|
||||
{
|
||||
return Err!("There is not a single server admin in the room.",);
|
||||
@@ -552,15 +630,12 @@ pub(super) async fn force_join_list_of_local_users(
|
||||
let mut successful_joins: usize = 0;
|
||||
|
||||
for user_id in user_ids {
|
||||
match join_room_by_id_helper(
|
||||
self.services,
|
||||
&user_id,
|
||||
&room_id,
|
||||
Some(String::from(BULK_JOIN_REASON)),
|
||||
&servers,
|
||||
&None,
|
||||
)
|
||||
.await
|
||||
match self
|
||||
.services
|
||||
.rooms
|
||||
.membership
|
||||
.join_room(&user_id, &room_id, Some(String::from(BULK_JOIN_REASON)), &servers)
|
||||
.await
|
||||
{
|
||||
| Ok(_res) => {
|
||||
successful_joins = successful_joins.saturating_add(1);
|
||||
@@ -620,7 +695,7 @@ pub(super) async fn force_join_all_local_users(
|
||||
.rooms
|
||||
.state_cache
|
||||
.room_members(&room_id)
|
||||
.ready_any(|user_id| server_admins.contains(&user_id.to_owned()))
|
||||
.ready_any(|user_id| server_admins.contains(&user_id))
|
||||
.await
|
||||
{
|
||||
return Err!("There is not a single server admin in the room.",);
|
||||
@@ -633,19 +708,15 @@ pub(super) async fn force_join_all_local_users(
|
||||
.services
|
||||
.users
|
||||
.list_local_users()
|
||||
.map(UserId::to_owned)
|
||||
.collect::<Vec<_>>()
|
||||
.await
|
||||
{
|
||||
match join_room_by_id_helper(
|
||||
self.services,
|
||||
user_id,
|
||||
&room_id,
|
||||
Some(String::from(BULK_JOIN_REASON)),
|
||||
&servers,
|
||||
&None,
|
||||
)
|
||||
.await
|
||||
match self
|
||||
.services
|
||||
.rooms
|
||||
.membership
|
||||
.join_room(user_id, &room_id, Some(String::from(BULK_JOIN_REASON)), &servers)
|
||||
.await
|
||||
{
|
||||
| Ok(_res) => {
|
||||
successful_joins = successful_joins.saturating_add(1);
|
||||
@@ -682,9 +753,13 @@ pub(super) async fn force_join_room(
|
||||
self.services.globals.user_is_local(&user_id),
|
||||
"Parsed user_id must be a local user"
|
||||
);
|
||||
join_room_by_id_helper(self.services, &user_id, &room_id, None, &servers, &None).await?;
|
||||
self.services
|
||||
.rooms
|
||||
.membership
|
||||
.join_room(&user_id, &room_id, None, &servers)
|
||||
.await?;
|
||||
|
||||
self.write_str(&format!("{user_id} has been joined to {room_id}.",))
|
||||
self.write_str(&format!("{user_id} has been joined to {room_id}."))
|
||||
.await
|
||||
}
|
||||
|
||||
@@ -716,7 +791,7 @@ pub(super) async fn force_leave_room(
|
||||
.boxed()
|
||||
.await?;
|
||||
|
||||
self.write_str(&format!("{user_id} has left {room_id}.",))
|
||||
self.write_str(&format!("{user_id} has left {room_id}."))
|
||||
.await
|
||||
}
|
||||
|
||||
@@ -730,42 +805,34 @@ pub(super) async fn force_demote(&self, user_id: String, room_id: OwnedRoomOrAli
|
||||
"Parsed user_id must be a local user"
|
||||
);
|
||||
|
||||
let state_lock = self.services.rooms.state.mutex.lock(&room_id).await;
|
||||
let state_lock = self.services.rooms.state.mutex.lock(room_id.as_str()).await;
|
||||
|
||||
let room_power_levels: Option<RoomPowerLevelsEventContent> = self
|
||||
let mut room_power_levels = self
|
||||
.services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.room_state_get_content(&room_id, &StateEventType::RoomPowerLevels, "")
|
||||
.await
|
||||
.ok();
|
||||
.get_room_power_levels(&room_id)
|
||||
.await;
|
||||
|
||||
let user_can_demote_self = room_power_levels
|
||||
.as_ref()
|
||||
.is_some_and(|power_levels_content| {
|
||||
RoomPowerLevels::from(power_levels_content.clone())
|
||||
.user_can_change_user_power_level(&user_id, &user_id)
|
||||
}) || self
|
||||
.services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.room_state_get(&room_id, &StateEventType::RoomCreate, "")
|
||||
.await
|
||||
.is_ok_and(|event| event.sender() == user_id);
|
||||
let user_can_demote_self =
|
||||
room_power_levels.user_can_change_user_power_level(&user_id, &user_id);
|
||||
|
||||
if !user_can_demote_self {
|
||||
return Err!("User is not allowed to modify their own power levels in the room.",);
|
||||
}
|
||||
|
||||
let mut power_levels_content = room_power_levels.unwrap_or_default();
|
||||
power_levels_content.users.remove(&user_id);
|
||||
room_power_levels.users.remove(&user_id);
|
||||
|
||||
let event_id = self
|
||||
.services
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder::state(String::new(), &power_levels_content),
|
||||
PartialPdu::state(
|
||||
String::new(),
|
||||
&RoomPowerLevelsEventContent::try_from(room_power_levels)
|
||||
.expect("PLs should be valid for room version"),
|
||||
),
|
||||
&user_id,
|
||||
Some(&room_id),
|
||||
&state_lock,
|
||||
@@ -793,7 +860,7 @@ pub(super) async fn make_user_admin(&self, user_id: String) -> Result {
|
||||
.boxed()
|
||||
.await?;
|
||||
|
||||
self.write_str(&format!("{user_id} has been granted admin privileges.",))
|
||||
self.write_str(&format!("{user_id} has been granted admin privileges."))
|
||||
.await
|
||||
}
|
||||
|
||||
@@ -811,9 +878,7 @@ pub(super) async fn put_room_tag(
|
||||
.account_data
|
||||
.get_room(&room_id, &user_id, RoomAccountDataEventType::Tag)
|
||||
.await
|
||||
.unwrap_or(TagEvent {
|
||||
content: TagEventContent { tags: BTreeMap::new() },
|
||||
});
|
||||
.unwrap_or_else(|_| TagEvent::new(TagEventContent::new(BTreeMap::new())));
|
||||
|
||||
tags_event
|
||||
.content
|
||||
@@ -850,9 +915,7 @@ pub(super) async fn delete_room_tag(
|
||||
.account_data
|
||||
.get_room(&room_id, &user_id, RoomAccountDataEventType::Tag)
|
||||
.await
|
||||
.unwrap_or(TagEvent {
|
||||
content: TagEventContent { tags: BTreeMap::new() },
|
||||
});
|
||||
.unwrap_or_else(|_| TagEvent::new(TagEventContent::new(BTreeMap::new())));
|
||||
|
||||
tags_event.content.tags.remove(&tag.clone().into());
|
||||
|
||||
@@ -882,9 +945,7 @@ pub(super) async fn get_room_tags(&self, user_id: String, room_id: OwnedRoomId)
|
||||
.account_data
|
||||
.get_room(&room_id, &user_id, RoomAccountDataEventType::Tag)
|
||||
.await
|
||||
.unwrap_or(TagEvent {
|
||||
content: TagEventContent { tags: BTreeMap::new() },
|
||||
});
|
||||
.unwrap_or_else(|_| TagEvent::new(TagEventContent::new(BTreeMap::new())));
|
||||
|
||||
self.write_str(&format!("```\n{:#?}\n```", tags_event.content.tags))
|
||||
.await
|
||||
@@ -921,19 +982,19 @@ pub(super) async fn redact_event(&self, event_id: OwnedEventId) -> Result {
|
||||
.rooms
|
||||
.state
|
||||
.mutex
|
||||
.lock(&event.room_id_or_hash())
|
||||
.lock(event.room_id_or_hash().as_str())
|
||||
.await;
|
||||
|
||||
self.services
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
PartialPdu {
|
||||
redacts: Some(event.event_id().to_owned()),
|
||||
..PduBuilder::timeline(&RoomRedactionEventContent {
|
||||
..PartialPdu::timeline(&assign!(RoomRedactionEventContent::new_v1(), {
|
||||
redacts: Some(event.event_id().to_owned()),
|
||||
reason: Some(reason),
|
||||
})
|
||||
}))
|
||||
},
|
||||
event.sender(),
|
||||
Some(&event.room_id_or_hash()),
|
||||
@@ -963,7 +1024,7 @@ pub(super) async fn force_leave_remote_room(
|
||||
.resolve_with_servers(
|
||||
&room_id,
|
||||
if let Some(v) = via.clone() {
|
||||
Some(vec![OwnedServerName::parse(v)?])
|
||||
Some(vec![ServerName::parse(v)?])
|
||||
} else {
|
||||
None
|
||||
},
|
||||
@@ -976,7 +1037,7 @@ pub(super) async fn force_leave_remote_room(
|
||||
);
|
||||
let mut vias: HashSet<OwnedServerName> = HashSet::new();
|
||||
if let Some(via) = via {
|
||||
vias.insert(OwnedServerName::parse(via)?);
|
||||
vias.insert(ServerName::parse(via)?);
|
||||
}
|
||||
for server in vias_raw {
|
||||
vias.insert(server);
|
||||
@@ -1051,7 +1112,12 @@ pub(super) async fn logout(&self, user_id: String) -> Result {
|
||||
self.services
|
||||
.users
|
||||
.all_device_ids(&user_id)
|
||||
.for_each(|device_id| self.services.users.remove_device(&user_id, device_id))
|
||||
.for_each(async |device_id| {
|
||||
self.services
|
||||
.users
|
||||
.remove_device(&user_id, &device_id)
|
||||
.await;
|
||||
})
|
||||
.await;
|
||||
self.write_str(&format!("User {user_id} has been logged out from all devices."))
|
||||
.await
|
||||
@@ -1129,11 +1195,9 @@ pub(super) async fn get_user_by_email(&self, email: String) -> Result {
|
||||
|
||||
match self.services.threepid.get_localpart_for_email(&email).await {
|
||||
| Some(localpart) => {
|
||||
let user_id = OwnedUserId::parse(format!(
|
||||
"@{localpart}:{}",
|
||||
self.services.globals.server_name()
|
||||
))
|
||||
.unwrap();
|
||||
let user_id =
|
||||
UserId::parse(format!("@{localpart}:{}", self.services.globals.server_name()))
|
||||
.unwrap();
|
||||
|
||||
self.write_str(&format!("{email} belongs to {user_id}."))
|
||||
.await
|
||||
|
||||
@@ -160,6 +160,17 @@ pub enum UserCommand {
|
||||
#[clap(alias = "list")]
|
||||
ListUsers,
|
||||
|
||||
/// Lists all the rooms (local and remote) that the specified user is
|
||||
/// invited to
|
||||
ListInvitedRooms {
|
||||
user_id: String,
|
||||
},
|
||||
|
||||
/// Manually make a user reject all current invites
|
||||
RejectAllInvites {
|
||||
user_id: String,
|
||||
},
|
||||
|
||||
/// Lists all the rooms (local and remote) that the specified user is
|
||||
/// joined in
|
||||
ListJoinedRooms {
|
||||
|
||||
+1
-1
@@ -48,7 +48,7 @@ pub(crate) fn parse_local_user_id(services: &Services, user_id: &str) -> Result<
|
||||
Ok(user_id)
|
||||
}
|
||||
|
||||
/// Parses user ID that is an active (not guest or deactivated) local user
|
||||
/// Parses user ID that is an active (not deactivated) local user
|
||||
pub(crate) async fn parse_active_local_user_id(
|
||||
services: &Services,
|
||||
user_id: &str,
|
||||
|
||||
+3
-3
@@ -48,9 +48,6 @@ jemalloc_stats = [
|
||||
"conduwuit-core/jemalloc_stats",
|
||||
"conduwuit-service/jemalloc_stats",
|
||||
]
|
||||
ldap = [
|
||||
"conduwuit-service/ldap"
|
||||
]
|
||||
release_max_log_level = [
|
||||
"conduwuit-core/release_max_log_level",
|
||||
"conduwuit-service/release_max_log_level",
|
||||
@@ -77,6 +74,7 @@ conduwuit-macros.workspace = true
|
||||
conduwuit-service.workspace = true
|
||||
const-str.workspace = true
|
||||
ctor.workspace = true
|
||||
dtor.workspace = true
|
||||
futures.workspace = true
|
||||
hmac.workspace = true
|
||||
http.workspace = true
|
||||
@@ -88,7 +86,9 @@ lettre.workspace = true
|
||||
log.workspace = true
|
||||
rand.workspace = true
|
||||
reqwest.workspace = true
|
||||
assign.workspace = true
|
||||
ruma.workspace = true
|
||||
ruminuwuity.workspace = true
|
||||
serde_html_form.workspace = true
|
||||
serde_json.workspace = true
|
||||
serde.workspace = true
|
||||
|
||||
@@ -1,10 +1,8 @@
|
||||
use axum::extract::State;
|
||||
use conduwuit::{Err, Result, info, utils::ReadyExt, warn};
|
||||
use futures::{FutureExt, StreamExt};
|
||||
use ruma::{
|
||||
OwnedRoomAliasId, continuwuity_admin_api::rooms,
|
||||
events::room::message::RoomMessageEventContent,
|
||||
};
|
||||
use ruma::{OwnedRoomAliasId, events::room::message::RoomMessageEventContent};
|
||||
use ruminuwuity::admin::continuwuity::rooms;
|
||||
|
||||
use crate::{Ruma, client::leave_room};
|
||||
|
||||
@@ -36,7 +34,6 @@ pub(crate) async fn ban_room(
|
||||
.rooms
|
||||
.state_cache
|
||||
.room_members(&body.room_id)
|
||||
.map(ToOwned::to_owned)
|
||||
.ready_filter(|user| services.globals.user_is_local(user))
|
||||
.boxed();
|
||||
let mut evicted = Vec::new();
|
||||
@@ -63,9 +60,9 @@ pub(crate) async fn ban_room(
|
||||
.rooms
|
||||
.alias
|
||||
.local_aliases_for_room(&body.room_id)
|
||||
.map(ToOwned::to_owned)
|
||||
.collect::<Vec<_>>()
|
||||
.collect()
|
||||
.await;
|
||||
|
||||
for alias in &aliases {
|
||||
info!("Removing alias {} for banned room {}", alias, body.room_id);
|
||||
services
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
use axum::extract::State;
|
||||
use conduwuit::{Err, Result};
|
||||
use futures::StreamExt;
|
||||
use ruma::{OwnedRoomId, continuwuity_admin_api::rooms};
|
||||
use ruma::OwnedRoomId;
|
||||
use ruminuwuity::admin::continuwuity::rooms;
|
||||
|
||||
use crate::Ruma;
|
||||
|
||||
@@ -22,8 +23,8 @@ pub(crate) async fn list_rooms(
|
||||
.metadata
|
||||
.iter_ids()
|
||||
.filter_map(|room_id| async move {
|
||||
if !services.rooms.metadata.is_banned(room_id).await {
|
||||
Some(room_id.to_owned())
|
||||
if !services.rooms.metadata.is_banned(&room_id).await {
|
||||
Some(room_id.clone())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
|
||||
@@ -1,15 +1,15 @@
|
||||
use axum::extract::State;
|
||||
use axum_client_ip::ClientIp;
|
||||
use conduwuit::{
|
||||
Err, Event, Result, err, info,
|
||||
pdu::PduBuilder,
|
||||
Err, Result, err, info,
|
||||
pdu::PartialPdu,
|
||||
utils::{ReadyExt, stream::BroadbandExt},
|
||||
};
|
||||
use conduwuit_service::Services;
|
||||
use futures::{FutureExt, StreamExt};
|
||||
use lettre::{Address, message::Mailbox};
|
||||
use ruma::{
|
||||
OwnedRoomId, OwnedUserId, UserId,
|
||||
OwnedRoomId, UserId,
|
||||
api::client::{
|
||||
account::{
|
||||
ThirdPartyIdRemovalStatus, change_password, check_registration_token_validity,
|
||||
@@ -18,17 +18,15 @@
|
||||
},
|
||||
uiaa::{AuthFlow, AuthType},
|
||||
},
|
||||
events::{
|
||||
StateEventType,
|
||||
room::{
|
||||
member::{MembershipState, RoomMemberEventContent},
|
||||
power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent},
|
||||
},
|
||||
assign,
|
||||
events::room::{
|
||||
member::{MembershipState, RoomMemberEventContent},
|
||||
power_levels::RoomPowerLevelsEventContent,
|
||||
},
|
||||
};
|
||||
use service::{mailer::messages, uiaa::Identity};
|
||||
use service::{mailer::messages, uiaa::Identity, users::HashedPassword};
|
||||
|
||||
use super::{DEVICE_ID_LENGTH, TOKEN_LENGTH, join_room_by_id_helper};
|
||||
use super::{DEVICE_ID_LENGTH, TOKEN_LENGTH};
|
||||
use crate::Ruma;
|
||||
|
||||
pub(crate) mod register;
|
||||
@@ -87,7 +85,7 @@ pub(crate) async fn get_register_available_route(
|
||||
return Err!(Request(Exclusive("Username is reserved by an appservice.")));
|
||||
}
|
||||
|
||||
Ok(get_username_availability::v3::Response { available: true })
|
||||
Ok(get_username_availability::v3::Response::new(true))
|
||||
}
|
||||
|
||||
/// # `POST /_matrix/client/r0/account/password`
|
||||
@@ -143,7 +141,7 @@ pub(crate) async fn change_password_route(
|
||||
.await?
|
||||
};
|
||||
|
||||
let sender_user = OwnedUserId::parse(format!(
|
||||
let sender_user = UserId::parse(format!(
|
||||
"@{}:{}",
|
||||
identity.localpart.expect("localpart should be known"),
|
||||
services.globals.server_name()
|
||||
@@ -152,8 +150,7 @@ pub(crate) async fn change_password_route(
|
||||
|
||||
services
|
||||
.users
|
||||
.set_password(&sender_user, Some(&body.new_password))
|
||||
.await?;
|
||||
.set_password(&sender_user, Some(HashedPassword::new(&body.new_password)?));
|
||||
|
||||
if body.logout_devices {
|
||||
// Logout all devices except the current one
|
||||
@@ -161,7 +158,7 @@ pub(crate) async fn change_password_route(
|
||||
.users
|
||||
.all_device_ids(&sender_user)
|
||||
.ready_filter(|id| *id != body.sender_device())
|
||||
.for_each(|id| services.users.remove_device(&sender_user, id))
|
||||
.for_each(async |id| services.users.remove_device(&sender_user, &id).await)
|
||||
.await;
|
||||
|
||||
// Remove all pushers except the ones associated with this session
|
||||
@@ -175,8 +172,8 @@ pub(crate) async fn change_password_route(
|
||||
.get_pusher_device(&pushkey)
|
||||
.await
|
||||
.ok()
|
||||
.filter(|pusher_device| pusher_device != body.sender_device())
|
||||
.is_some()
|
||||
.as_ref()
|
||||
.is_some_and(|pusher_device| pusher_device != body.sender_device())
|
||||
.then_some(pushkey)
|
||||
})
|
||||
.for_each(async |pushkey| {
|
||||
@@ -194,7 +191,7 @@ pub(crate) async fn change_password_route(
|
||||
.await;
|
||||
}
|
||||
|
||||
Ok(change_password::v3::Response {})
|
||||
Ok(change_password::v3::Response::new())
|
||||
}
|
||||
|
||||
/// # `POST /_matrix/client/v3/account/password/email/requestToken`
|
||||
@@ -215,7 +212,7 @@ pub(crate) async fn request_password_change_token_via_email_route(
|
||||
};
|
||||
|
||||
let user_id =
|
||||
OwnedUserId::parse(format!("@{localpart}:{}", services.globals.server_name())).unwrap();
|
||||
UserId::parse(format!("@{localpart}:{}", services.globals.server_name())).unwrap();
|
||||
let display_name = services.users.displayname(&user_id).await.ok();
|
||||
|
||||
let session = services
|
||||
@@ -241,21 +238,12 @@ pub(crate) async fn request_password_change_token_via_email_route(
|
||||
///
|
||||
/// Note: Also works for Application Services
|
||||
pub(crate) async fn whoami_route(
|
||||
State(services): State<crate::State>,
|
||||
State(_): State<crate::State>,
|
||||
body: Ruma<whoami::v3::Request>,
|
||||
) -> Result<whoami::v3::Response> {
|
||||
let is_guest = services
|
||||
.users
|
||||
.is_deactivated(body.sender_user())
|
||||
.await
|
||||
.map_err(|_| {
|
||||
err!(Request(Forbidden("Application service has not registered this user.")))
|
||||
})? && body.appservice_info.is_none();
|
||||
Ok(whoami::v3::Response {
|
||||
user_id: body.sender_user().to_owned(),
|
||||
device_id: body.sender_device.clone(),
|
||||
is_guest,
|
||||
})
|
||||
Ok(assign!(whoami::v3::Response::new(body.sender_user().to_owned(), false), {
|
||||
device_id: body.sender_device,
|
||||
}))
|
||||
}
|
||||
|
||||
/// # `POST /_matrix/client/r0/account/deactivate`
|
||||
@@ -310,9 +298,7 @@ pub(crate) async fn deactivate_route(
|
||||
.await;
|
||||
}
|
||||
|
||||
Ok(deactivate::v3::Response {
|
||||
id_server_unbind_result: ThirdPartyIdRemovalStatus::Success,
|
||||
})
|
||||
Ok(deactivate::v3::Response::new(ThirdPartyIdRemovalStatus::Success))
|
||||
}
|
||||
|
||||
/// # `GET /_matrix/client/v1/register/m.login.registration_token/validity`
|
||||
@@ -330,14 +316,12 @@ pub(crate) async fn check_registration_token_validity(
|
||||
.await
|
||||
.is_some();
|
||||
|
||||
Ok(check_registration_token_validity::v1::Response { valid })
|
||||
Ok(check_registration_token_validity::v1::Response::new(valid))
|
||||
}
|
||||
|
||||
/// Runs through all the deactivation steps:
|
||||
///
|
||||
/// - Mark as deactivated
|
||||
/// - Removing display name
|
||||
/// - Removing avatar URL and blurhash
|
||||
/// - Removing all profile data
|
||||
/// - Leaving all rooms (and forgets all of them)
|
||||
pub async fn full_user_deactivate(
|
||||
@@ -354,13 +338,7 @@ pub async fn full_user_deactivate(
|
||||
.await;
|
||||
}
|
||||
|
||||
services
|
||||
.users
|
||||
.all_profile_keys(user_id)
|
||||
.ready_for_each(|(profile_key, _)| {
|
||||
services.users.set_profile_key(user_id, &profile_key, None);
|
||||
})
|
||||
.await;
|
||||
services.users.clear_profile(user_id).await;
|
||||
|
||||
services
|
||||
.pusher
|
||||
@@ -372,62 +350,49 @@ pub async fn full_user_deactivate(
|
||||
|
||||
// TODO: Rescind all user invites
|
||||
|
||||
let mut pdu_queue: Vec<(PduBuilder, &OwnedRoomId)> = Vec::new();
|
||||
let mut pdu_queue: Vec<(PartialPdu, &OwnedRoomId)> = Vec::new();
|
||||
|
||||
for room_id in all_joined_rooms {
|
||||
let room_power_levels = services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.room_state_get_content::<RoomPowerLevelsEventContent>(
|
||||
room_id,
|
||||
&StateEventType::RoomPowerLevels,
|
||||
"",
|
||||
)
|
||||
.await
|
||||
.ok();
|
||||
.get_room_power_levels(room_id)
|
||||
.await;
|
||||
|
||||
let user_can_demote_self =
|
||||
room_power_levels
|
||||
.as_ref()
|
||||
.is_some_and(|power_levels_content| {
|
||||
RoomPowerLevels::from(power_levels_content.clone())
|
||||
.user_can_change_user_power_level(user_id, user_id)
|
||||
}) || services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.room_state_get(room_id, &StateEventType::RoomCreate, "")
|
||||
.await
|
||||
.is_ok_and(|event| event.sender() == user_id);
|
||||
room_power_levels.user_can_change_user_power_level(user_id, user_id);
|
||||
|
||||
if user_can_demote_self {
|
||||
let mut power_levels_content = room_power_levels.unwrap_or_default();
|
||||
if user_can_demote_self
|
||||
&& let Ok(mut power_levels_content) =
|
||||
RoomPowerLevelsEventContent::try_from(room_power_levels)
|
||||
{
|
||||
power_levels_content.users.remove(user_id);
|
||||
let pl_evt = PduBuilder::state(String::new(), &power_levels_content);
|
||||
let pl_evt = PartialPdu::state(String::new(), &power_levels_content);
|
||||
pdu_queue.push((pl_evt, room_id));
|
||||
}
|
||||
|
||||
// Leave the room
|
||||
pdu_queue.push((
|
||||
PduBuilder::state(user_id.to_string(), &RoomMemberEventContent {
|
||||
avatar_url: None,
|
||||
blurhash: None,
|
||||
membership: MembershipState::Leave,
|
||||
displayname: None,
|
||||
join_authorized_via_users_server: None,
|
||||
reason: None,
|
||||
is_direct: None,
|
||||
third_party_invite: None,
|
||||
redact_events: None,
|
||||
}),
|
||||
PartialPdu::state(
|
||||
user_id.to_string(),
|
||||
&RoomMemberEventContent::new(MembershipState::Leave),
|
||||
),
|
||||
room_id,
|
||||
));
|
||||
|
||||
// TODO: Redact all messages sent by the user in the room
|
||||
}
|
||||
|
||||
super::update_all_rooms(services, pdu_queue, user_id)
|
||||
.boxed()
|
||||
.await;
|
||||
for (pdu, room_id) in pdu_queue {
|
||||
let state_lock = services.rooms.state.mutex.lock(room_id.as_str()).await;
|
||||
|
||||
let _ = services
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(pdu, user_id, Some(room_id.as_ref()), &state_lock)
|
||||
.await;
|
||||
}
|
||||
|
||||
for room_id in all_joined_rooms {
|
||||
services.rooms.state_cache.forget(room_id, user_id);
|
||||
}
|
||||
|
||||
@@ -10,23 +10,26 @@
|
||||
use conduwuit_service::Services;
|
||||
use futures::{FutureExt, StreamExt};
|
||||
use lettre::{Address, message::Mailbox};
|
||||
use register::RegistrationKind;
|
||||
use ruma::{
|
||||
OwnedUserId, UserId,
|
||||
api::client::{
|
||||
account::{
|
||||
register::{self, LoginType},
|
||||
register::{self, LoginType, RegistrationKind},
|
||||
request_registration_token_via_email,
|
||||
},
|
||||
uiaa::{AuthFlow, AuthType},
|
||||
},
|
||||
events::{GlobalAccountDataEventType, room::message::RoomMessageEventContent},
|
||||
assign,
|
||||
events::{
|
||||
GlobalAccountDataEventType, push_rules::PushRulesEvent,
|
||||
room::message::RoomMessageEventContent,
|
||||
},
|
||||
push,
|
||||
};
|
||||
use serde_json::value::RawValue;
|
||||
use service::mailer::messages;
|
||||
use service::{mailer::messages, users::HashedPassword};
|
||||
|
||||
use super::{DEVICE_ID_LENGTH, TOKEN_LENGTH, join_room_by_id_helper};
|
||||
use super::{DEVICE_ID_LENGTH, TOKEN_LENGTH};
|
||||
use crate::Ruma;
|
||||
|
||||
const RANDOM_USER_ID_LENGTH: usize = 10;
|
||||
@@ -38,16 +41,6 @@
|
||||
/// You can use [`GET
|
||||
/// /_matrix/client/v3/register/available`](fn.get_register_available_route.
|
||||
/// html) to check if the user id is valid and available.
|
||||
///
|
||||
/// - Only works if registration is enabled
|
||||
/// - If type is guest: ignores all parameters except
|
||||
/// initial_device_display_name
|
||||
/// - If sender is not appservice: Requires UIAA (but we only use a dummy stage)
|
||||
/// - If type is not guest and no username is given: Always fails after UIAA
|
||||
/// check
|
||||
/// - Creates a new account and populates it with default account data
|
||||
/// - If `inhibit_login` is false: Creates a device and returns device id and
|
||||
/// access_token
|
||||
#[allow(clippy::doc_markdown)]
|
||||
#[tracing::instrument(skip_all, fields(%client), name = "register", level = "info")]
|
||||
pub(crate) async fn register_route(
|
||||
@@ -55,7 +48,10 @@ pub(crate) async fn register_route(
|
||||
ClientIp(client): ClientIp,
|
||||
body: Ruma<register::v3::Request>,
|
||||
) -> Result<register::v3::Response> {
|
||||
let is_guest = body.kind == RegistrationKind::Guest;
|
||||
if body.kind != RegistrationKind::User {
|
||||
return Err!(Request(GuestAccessForbidden("Guests may not register on this server.")));
|
||||
}
|
||||
|
||||
let emergency_mode_enabled = services.config.emergency_password.is_some();
|
||||
|
||||
// Allow registration if it's enabled in the config file or if this is the first
|
||||
@@ -64,69 +60,19 @@ pub(crate) async fn register_route(
|
||||
services.config.allow_registration || services.firstrun.is_first_run();
|
||||
|
||||
if !allow_registration && body.appservice_info.is_none() {
|
||||
match (body.username.as_ref(), body.initial_device_display_name.as_ref()) {
|
||||
| (Some(username), Some(device_display_name)) => {
|
||||
info!(
|
||||
%is_guest,
|
||||
user = %username,
|
||||
device_name = %device_display_name,
|
||||
"Rejecting registration attempt as registration is disabled"
|
||||
);
|
||||
},
|
||||
| (Some(username), _) => {
|
||||
info!(
|
||||
%is_guest,
|
||||
user = %username,
|
||||
"Rejecting registration attempt as registration is disabled"
|
||||
);
|
||||
},
|
||||
| (_, Some(device_display_name)) => {
|
||||
info!(
|
||||
%is_guest,
|
||||
device_name = %device_display_name,
|
||||
"Rejecting registration attempt as registration is disabled"
|
||||
);
|
||||
},
|
||||
| (None, _) => {
|
||||
info!(
|
||||
%is_guest,
|
||||
"Rejecting registration attempt as registration is disabled"
|
||||
);
|
||||
},
|
||||
}
|
||||
|
||||
return Err!(Request(Forbidden(
|
||||
"This server is not accepting registrations at this time."
|
||||
)));
|
||||
}
|
||||
|
||||
if is_guest && !services.config.allow_guest_registration {
|
||||
info!(
|
||||
"Guest registration disabled, rejecting guest registration attempt, initial device \
|
||||
name: \"{}\"",
|
||||
body.initial_device_display_name.as_deref().unwrap_or("")
|
||||
?body.username,
|
||||
?body.initial_device_display_name,
|
||||
"Rejecting registration attempt as registration is disabled"
|
||||
);
|
||||
return Err!(Request(GuestAccessForbidden("Guest registration is disabled.")));
|
||||
}
|
||||
|
||||
// forbid guests from registering if there is not a real admin user yet. give
|
||||
// generic user error.
|
||||
if is_guest && services.firstrun.is_first_run() {
|
||||
warn!(
|
||||
"Guest account attempted to register before a real admin user has been registered, \
|
||||
rejecting registration. Guest's initial device name: \"{}\"",
|
||||
body.initial_device_display_name.as_deref().unwrap_or("")
|
||||
);
|
||||
return Err!(Request(Forbidden(
|
||||
"This server is not accepting registrations at this time."
|
||||
)));
|
||||
}
|
||||
|
||||
// Appeservices and guests get to skip auth
|
||||
let skip_auth = body.appservice_info.is_some() || is_guest;
|
||||
|
||||
let identity = if skip_auth {
|
||||
// Appservices and guests have no identity
|
||||
let identity = if body.appservice_info.is_some() {
|
||||
// Appservices can skip auth
|
||||
None
|
||||
} else {
|
||||
// Perform UIAA to determine the user's identity
|
||||
@@ -153,13 +99,9 @@ pub(crate) async fn register_route(
|
||||
}
|
||||
});
|
||||
|
||||
let user_id = determine_registration_user_id(
|
||||
&services,
|
||||
supplied_username,
|
||||
is_guest,
|
||||
emergency_mode_enabled,
|
||||
)
|
||||
.await?;
|
||||
let user_id =
|
||||
determine_registration_user_id(&services, supplied_username, emergency_mode_enabled)
|
||||
.await?;
|
||||
|
||||
if body.body.login_type == Some(LoginType::ApplicationService) {
|
||||
// For appservice logins, make sure that the user ID is in the appservice's
|
||||
@@ -183,10 +125,16 @@ pub(crate) async fn register_route(
|
||||
return Err!(Request(Exclusive("Username is reserved by an appservice.")));
|
||||
}
|
||||
|
||||
let password = if is_guest { None } else { body.password.as_deref() };
|
||||
let password = if body.appservice_info.is_some() {
|
||||
None
|
||||
} else if let Some(password) = body.password.as_deref() {
|
||||
Some(HashedPassword::new(password)?)
|
||||
} else {
|
||||
return Err!(Request(InvalidParam("A password must be provided")));
|
||||
};
|
||||
|
||||
// Create user
|
||||
services.users.create(&user_id, password, None).await?;
|
||||
services.users.create(&user_id, password).await?;
|
||||
|
||||
// Set an initial display name
|
||||
let mut displayname = user_id.localpart().to_owned();
|
||||
@@ -209,24 +157,18 @@ pub(crate) async fn register_route(
|
||||
None,
|
||||
&user_id,
|
||||
GlobalAccountDataEventType::PushRules.to_string().into(),
|
||||
&serde_json::to_value(ruma::events::push_rules::PushRulesEvent {
|
||||
content: ruma::events::push_rules::PushRulesEventContent {
|
||||
global: push::Ruleset::server_default(&user_id),
|
||||
},
|
||||
})?,
|
||||
&serde_json::to_value(PushRulesEvent::new(
|
||||
push::Ruleset::server_default(&user_id).into(),
|
||||
))
|
||||
.expect("should be able to serialize push rules"),
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Generate new device id if the user didn't specify one
|
||||
let no_device = body.inhibit_login
|
||||
|| body
|
||||
.appservice_info
|
||||
.as_ref()
|
||||
.is_some_and(|aps| aps.registration.device_management);
|
||||
|
||||
let (token, device) = if !no_device {
|
||||
// Don't create a device for inhibited logins
|
||||
let device_id = if is_guest { None } else { body.device_id.clone() }
|
||||
let (token, device) = if !body.inhibit_login {
|
||||
let device_id = body
|
||||
.device_id
|
||||
.clone()
|
||||
.unwrap_or_else(|| utils::random_string(DEVICE_ID_LENGTH).into());
|
||||
|
||||
// Generate new token for the device
|
||||
@@ -243,12 +185,14 @@ pub(crate) async fn register_route(
|
||||
Some(client.to_string()),
|
||||
)
|
||||
.await?;
|
||||
debug_info!(%user_id, %device_id, "User account was created");
|
||||
(Some(new_token), Some(device_id))
|
||||
} else {
|
||||
// Don't create a device for inhibited logins
|
||||
(None, None)
|
||||
};
|
||||
|
||||
debug_info!(%user_id, ?device, "User account was created");
|
||||
|
||||
// If the user registered with an email, associate it with their account.
|
||||
if let Some(identity) = identity
|
||||
&& let Some(email) = identity.email
|
||||
@@ -265,8 +209,7 @@ pub(crate) async fn register_route(
|
||||
|
||||
let device_display_name = body.initial_device_display_name.as_deref().unwrap_or("");
|
||||
|
||||
// log in conduit admin channel if a non-guest user registered
|
||||
if body.appservice_info.is_none() && !is_guest {
|
||||
if body.appservice_info.is_none() {
|
||||
if !device_display_name.is_empty() {
|
||||
let notice = format!(
|
||||
"New user \"{user_id}\" registered on this server from IP {client} and device \
|
||||
@@ -287,65 +230,32 @@ pub(crate) async fn register_route(
|
||||
}
|
||||
}
|
||||
|
||||
// log in conduit admin channel if a guest registered
|
||||
if body.appservice_info.is_none() && is_guest && services.config.log_guest_registrations {
|
||||
debug_info!("New guest user \"{user_id}\" registered on this server.");
|
||||
// Make the first user to register an administrator and disable first-run mode.
|
||||
let was_first_user = services.firstrun.empower_first_user(&user_id).await?;
|
||||
|
||||
if !device_display_name.is_empty() {
|
||||
if services.server.config.admin_room_notices {
|
||||
services
|
||||
.admin
|
||||
.notice(&format!(
|
||||
"Guest user \"{user_id}\" with device display name \
|
||||
\"{device_display_name}\" registered on this server from IP {client}"
|
||||
))
|
||||
.await;
|
||||
}
|
||||
} else {
|
||||
#[allow(clippy::collapsible_else_if)]
|
||||
if services.server.config.admin_room_notices {
|
||||
services
|
||||
.admin
|
||||
.notice(&format!(
|
||||
"Guest user \"{user_id}\" with no device display name registered on \
|
||||
this server from IP {client}",
|
||||
))
|
||||
.await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !is_guest {
|
||||
// Make the first user to register an administrator and disable first-run mode.
|
||||
let was_first_user = services.firstrun.empower_first_user(&user_id).await?;
|
||||
|
||||
// If the registering user was not the first and we're suspending users on
|
||||
// register, suspend them.
|
||||
if !was_first_user && services.config.suspend_on_register {
|
||||
// Note that we can still do auto joins for suspended users
|
||||
// If the registering user was not the first and we're suspending users on
|
||||
// register, suspend them.
|
||||
if !was_first_user && services.config.suspend_on_register {
|
||||
// Note that we can still do auto joins for suspended users
|
||||
services
|
||||
.users
|
||||
.suspend_account(&user_id, &services.globals.server_user)
|
||||
.await;
|
||||
// And send an @room notice to the admin room, to prompt admins to review the
|
||||
// new user and ideally unsuspend them if deemed appropriate.
|
||||
if services.server.config.admin_room_notices {
|
||||
services
|
||||
.users
|
||||
.suspend_account(&user_id, &services.globals.server_user)
|
||||
.await;
|
||||
// And send an @room notice to the admin room, to prompt admins to review the
|
||||
// new user and ideally unsuspend them if deemed appropriate.
|
||||
if services.server.config.admin_room_notices {
|
||||
services
|
||||
.admin
|
||||
.send_loud_message(RoomMessageEventContent::text_plain(format!(
|
||||
"User {user_id} has been suspended as they are not the first user on \
|
||||
this server. Please review and unsuspend them if appropriate."
|
||||
)))
|
||||
.await
|
||||
.ok();
|
||||
}
|
||||
.admin
|
||||
.send_loud_message(RoomMessageEventContent::text_plain(format!(
|
||||
"User {user_id} has been suspended as they are not the first user on this \
|
||||
server. Please review and unsuspend them if appropriate."
|
||||
)))
|
||||
.await
|
||||
.ok();
|
||||
}
|
||||
}
|
||||
|
||||
if body.appservice_info.is_none()
|
||||
&& !services.server.config.auto_join_rooms.is_empty()
|
||||
&& (services.config.allow_guests_auto_join_rooms || !is_guest)
|
||||
{
|
||||
if body.appservice_info.is_none() && !services.server.config.auto_join_rooms.is_empty() {
|
||||
for room in &services.server.config.auto_join_rooms {
|
||||
let Ok(room_id) = services.rooms.alias.resolve(room).await else {
|
||||
error!(
|
||||
@@ -368,16 +278,17 @@ pub(crate) async fn register_route(
|
||||
}
|
||||
|
||||
if let Some(room_server_name) = room.server_name() {
|
||||
match join_room_by_id_helper(
|
||||
&services,
|
||||
&user_id,
|
||||
&room_id,
|
||||
Some("Automatically joining this room upon registration".to_owned()),
|
||||
&[services.globals.server_name().to_owned(), room_server_name.to_owned()],
|
||||
&body.appservice_info,
|
||||
)
|
||||
.boxed()
|
||||
.await
|
||||
match services
|
||||
.rooms
|
||||
.membership
|
||||
.join_room(
|
||||
&user_id,
|
||||
&room_id,
|
||||
Some("Automatically joining this room upon registration".to_owned()),
|
||||
&[services.globals.server_name().to_owned(), room_server_name.to_owned()],
|
||||
)
|
||||
.boxed()
|
||||
.await
|
||||
{
|
||||
| Err(e) => {
|
||||
// don't return this error so we don't fail registrations
|
||||
@@ -393,13 +304,12 @@ pub(crate) async fn register_route(
|
||||
}
|
||||
}
|
||||
|
||||
Ok(register::v3::Response {
|
||||
Ok(assign!(register::v3::Response::new(user_id), {
|
||||
access_token: token,
|
||||
user_id,
|
||||
device_id: device,
|
||||
refresh_token: None,
|
||||
expires_in: None,
|
||||
})
|
||||
}))
|
||||
}
|
||||
|
||||
/// Determine which flows and parameters should be presented when
|
||||
@@ -514,12 +424,9 @@ async fn create_registration_uiaa_session(
|
||||
async fn determine_registration_user_id(
|
||||
services: &Services,
|
||||
supplied_username: Option<String>,
|
||||
is_guest: bool,
|
||||
emergency_mode_enabled: bool,
|
||||
) -> Result<OwnedUserId> {
|
||||
if let Some(supplied_username) = supplied_username
|
||||
&& !is_guest
|
||||
{
|
||||
if let Some(supplied_username) = supplied_username {
|
||||
// The user gets to pick their username. Do some validation to make sure it's
|
||||
// acceptable.
|
||||
|
||||
@@ -572,7 +479,7 @@ async fn determine_registration_user_id(
|
||||
|
||||
Ok(user_id)
|
||||
} else {
|
||||
// The user is a guest or didn't specify a username. Generate a username for
|
||||
// The user didn't specify a username. Generate a username for
|
||||
// them.
|
||||
|
||||
loop {
|
||||
|
||||
@@ -141,9 +141,7 @@ pub(crate) async fn delete_3pid_route(
|
||||
let sender_user = body.sender_user();
|
||||
|
||||
if body.medium != Medium::Email {
|
||||
return Ok(delete_3pid::v3::Response {
|
||||
id_server_unbind_result: ThirdPartyIdRemovalStatus::NoSupport,
|
||||
});
|
||||
return Ok(delete_3pid::v3::Response::new(ThirdPartyIdRemovalStatus::NoSupport));
|
||||
}
|
||||
|
||||
if !services.threepid.email_requirement().may_remove() {
|
||||
@@ -159,7 +157,5 @@ pub(crate) async fn delete_3pid_route(
|
||||
return Err!(Request(ThreepidNotFound("Your account has no associated email.")));
|
||||
}
|
||||
|
||||
Ok(delete_3pid::v3::Response {
|
||||
id_server_unbind_result: ThirdPartyIdRemovalStatus::Success,
|
||||
})
|
||||
Ok(delete_3pid::v3::Response::new(ThirdPartyIdRemovalStatus::Success))
|
||||
}
|
||||
|
||||
@@ -7,10 +7,7 @@
|
||||
get_global_account_data, get_room_account_data, set_global_account_data,
|
||||
set_room_account_data,
|
||||
},
|
||||
events::{
|
||||
AnyGlobalAccountDataEventContent, AnyRoomAccountDataEventContent,
|
||||
RoomAccountDataEventType,
|
||||
},
|
||||
events::{AnyGlobalAccountDataEventContent, AnyRoomAccountDataEventContent},
|
||||
serde::Raw,
|
||||
};
|
||||
use serde::Deserialize;
|
||||
@@ -40,7 +37,7 @@ pub(crate) async fn set_global_account_data_route(
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(set_global_account_data::v3::Response {})
|
||||
Ok(set_global_account_data::v3::Response::new())
|
||||
}
|
||||
|
||||
/// # `PUT /_matrix/client/r0/user/{userId}/rooms/{roomId}/account_data/{type}`
|
||||
@@ -65,7 +62,7 @@ pub(crate) async fn set_room_account_data_route(
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(set_room_account_data::v3::Response {})
|
||||
Ok(set_room_account_data::v3::Response::new())
|
||||
}
|
||||
|
||||
/// # `GET /_matrix/client/r0/user/{userId}/account_data/{type}`
|
||||
@@ -87,7 +84,7 @@ pub(crate) async fn get_global_account_data_route(
|
||||
.await
|
||||
.map_err(|_| err!(Request(NotFound("Data not found."))))?;
|
||||
|
||||
Ok(get_global_account_data::v3::Response { account_data: account_data.content })
|
||||
Ok(get_global_account_data::v3::Response::new(account_data.content))
|
||||
}
|
||||
|
||||
/// # `GET /_matrix/client/r0/user/{userId}/rooms/{roomId}/account_data/{type}`
|
||||
@@ -109,7 +106,7 @@ pub(crate) async fn get_room_account_data_route(
|
||||
.await
|
||||
.map_err(|_| err!(Request(NotFound("Data not found."))))?;
|
||||
|
||||
Ok(get_room_account_data::v3::Response { account_data: account_data.content })
|
||||
Ok(get_room_account_data::v3::Response::new(account_data.content))
|
||||
}
|
||||
|
||||
async fn set_account_data(
|
||||
@@ -119,7 +116,7 @@ async fn set_account_data(
|
||||
event_type_s: &str,
|
||||
data: &RawJsonValue,
|
||||
) -> Result {
|
||||
if event_type_s == RoomAccountDataEventType::FullyRead.to_cow_str() {
|
||||
if event_type_s == "m.fully_read" {
|
||||
return Err!(Request(BadJson(
|
||||
"This endpoint cannot be used for marking a room as fully read (setting \
|
||||
m.fully_read)"
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use axum::extract::State;
|
||||
use conduwuit::{Err, Result};
|
||||
use futures::future::{join, join3};
|
||||
use ruma::api::client::admin::{get_suspended, set_suspended};
|
||||
use ruminuwuity::admin::{get_suspended, set_suspended};
|
||||
|
||||
use crate::Ruma;
|
||||
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
use axum::extract::State;
|
||||
use conduwuit::{Err, Result, err};
|
||||
use ruma::api::{appservice::ping, client::appservice::request_ping};
|
||||
use ruma::{
|
||||
api::{appservice::ping, client::appservice::request_ping},
|
||||
assign,
|
||||
};
|
||||
|
||||
use crate::Ruma;
|
||||
|
||||
@@ -40,12 +43,12 @@ pub(crate) async fn appservice_ping(
|
||||
.sending
|
||||
.send_appservice_request(
|
||||
appservice_info.registration.clone(),
|
||||
ping::send_ping::v1::Request {
|
||||
assign!(ping::send_ping::v1::Request::new(), {
|
||||
transaction_id: body.transaction_id.clone(),
|
||||
},
|
||||
}),
|
||||
)
|
||||
.await?
|
||||
.expect("We already validated if an appservice URL exists above");
|
||||
|
||||
Ok(request_ping::v1::Response { duration: timer.elapsed() })
|
||||
Ok(request_ping::v1::Response::new(timer.elapsed()))
|
||||
}
|
||||
|
||||
+29
-33
@@ -3,7 +3,6 @@
|
||||
use axum::extract::State;
|
||||
use conduwuit::{Err, Result, err};
|
||||
use conduwuit_service::Services;
|
||||
use futures::{FutureExt, future::try_join};
|
||||
use ruma::{
|
||||
UInt, UserId,
|
||||
api::client::backup::{
|
||||
@@ -28,7 +27,7 @@ pub(crate) async fn create_backup_version_route(
|
||||
.key_backups
|
||||
.create_backup(body.sender_user(), &body.algorithm)?;
|
||||
|
||||
Ok(create_backup_version::v3::Response { version })
|
||||
Ok(create_backup_version::v3::Response::new(version))
|
||||
}
|
||||
|
||||
/// # `PUT /_matrix/client/r0/room_keys/version/{version}`
|
||||
@@ -44,7 +43,7 @@ pub(crate) async fn update_backup_version_route(
|
||||
.update_backup(body.sender_user(), &body.version, &body.algorithm)
|
||||
.await?;
|
||||
|
||||
Ok(update_backup_version::v3::Response {})
|
||||
Ok(update_backup_version::v3::Response::new())
|
||||
}
|
||||
|
||||
/// # `GET /_matrix/client/r0/room_keys/version`
|
||||
@@ -60,9 +59,9 @@ pub(crate) async fn get_latest_backup_info_route(
|
||||
.await
|
||||
.map_err(|_| err!(Request(NotFound("Key backup does not exist."))))?;
|
||||
|
||||
let (count, etag) = get_count_etag(&services, body.sender_user(), &version).await?;
|
||||
let (count, etag) = get_count_etag(&services, body.sender_user(), &version).await;
|
||||
|
||||
Ok(get_latest_backup_info::v3::Response { algorithm, count, etag, version })
|
||||
Ok(get_latest_backup_info::v3::Response::new(algorithm, count, etag, version))
|
||||
}
|
||||
|
||||
/// # `GET /_matrix/client/v3/room_keys/version/{version}`
|
||||
@@ -80,14 +79,9 @@ pub(crate) async fn get_backup_info_route(
|
||||
err!(Request(NotFound("Key backup does not exist at version {:?}", body.version)))
|
||||
})?;
|
||||
|
||||
let (count, etag) = get_count_etag(&services, body.sender_user(), &body.version).await?;
|
||||
let (count, etag) = get_count_etag(&services, body.sender_user(), &body.version).await;
|
||||
|
||||
Ok(get_backup_info::v3::Response {
|
||||
algorithm,
|
||||
count,
|
||||
etag,
|
||||
version: body.version.clone(),
|
||||
})
|
||||
Ok(get_backup_info::v3::Response::new(algorithm, count, etag, body.version.clone()))
|
||||
}
|
||||
|
||||
/// # `DELETE /_matrix/client/r0/room_keys/version/{version}`
|
||||
@@ -105,7 +99,7 @@ pub(crate) async fn delete_backup_version_route(
|
||||
.delete_backup(body.sender_user(), &body.version)
|
||||
.await;
|
||||
|
||||
Ok(delete_backup_version::v3::Response {})
|
||||
Ok(delete_backup_version::v3::Response::new())
|
||||
}
|
||||
|
||||
/// # `PUT /_matrix/client/r0/room_keys/keys`
|
||||
@@ -140,9 +134,9 @@ pub(crate) async fn add_backup_keys_route(
|
||||
}
|
||||
}
|
||||
|
||||
let (count, etag) = get_count_etag(&services, body.sender_user(), &body.version).await?;
|
||||
let (count, etag) = get_count_etag(&services, body.sender_user(), &body.version).await;
|
||||
|
||||
Ok(add_backup_keys::v3::Response { count, etag })
|
||||
Ok(add_backup_keys::v3::Response::new(etag, count))
|
||||
}
|
||||
|
||||
/// # `PUT /_matrix/client/r0/room_keys/keys/{roomId}`
|
||||
@@ -175,9 +169,9 @@ pub(crate) async fn add_backup_keys_for_room_route(
|
||||
.await?;
|
||||
}
|
||||
|
||||
let (count, etag) = get_count_etag(&services, body.sender_user(), &body.version).await?;
|
||||
let (count, etag) = get_count_etag(&services, body.sender_user(), &body.version).await;
|
||||
|
||||
Ok(add_backup_keys_for_room::v3::Response { count, etag })
|
||||
Ok(add_backup_keys_for_room::v3::Response::new(etag, count))
|
||||
}
|
||||
|
||||
/// # `PUT /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}`
|
||||
@@ -275,9 +269,9 @@ pub(crate) async fn add_backup_keys_for_session_route(
|
||||
.await?;
|
||||
}
|
||||
|
||||
let (count, etag) = get_count_etag(&services, body.sender_user(), &body.version).await?;
|
||||
let (count, etag) = get_count_etag(&services, body.sender_user(), &body.version).await;
|
||||
|
||||
Ok(add_backup_keys_for_session::v3::Response { count, etag })
|
||||
Ok(add_backup_keys_for_session::v3::Response::new(etag, count))
|
||||
}
|
||||
|
||||
/// # `GET /_matrix/client/r0/room_keys/keys`
|
||||
@@ -292,7 +286,7 @@ pub(crate) async fn get_backup_keys_route(
|
||||
.get_all(body.sender_user(), &body.version)
|
||||
.await;
|
||||
|
||||
Ok(get_backup_keys::v3::Response { rooms })
|
||||
Ok(get_backup_keys::v3::Response::new(rooms))
|
||||
}
|
||||
|
||||
/// # `GET /_matrix/client/r0/room_keys/keys/{roomId}`
|
||||
@@ -307,7 +301,7 @@ pub(crate) async fn get_backup_keys_for_room_route(
|
||||
.get_room(body.sender_user(), &body.version, &body.room_id)
|
||||
.await;
|
||||
|
||||
Ok(get_backup_keys_for_room::v3::Response { sessions })
|
||||
Ok(get_backup_keys_for_room::v3::Response::new(sessions))
|
||||
}
|
||||
|
||||
/// # `GET /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}`
|
||||
@@ -325,7 +319,7 @@ pub(crate) async fn get_backup_keys_for_session_route(
|
||||
err!(Request(NotFound(debug_error!("Backup key not found for this user's session."))))
|
||||
})?;
|
||||
|
||||
Ok(get_backup_keys_for_session::v3::Response { key_data })
|
||||
Ok(get_backup_keys_for_session::v3::Response::new(key_data))
|
||||
}
|
||||
|
||||
/// # `DELETE /_matrix/client/r0/room_keys/keys`
|
||||
@@ -340,9 +334,9 @@ pub(crate) async fn delete_backup_keys_route(
|
||||
.delete_all_keys(body.sender_user(), &body.version)
|
||||
.await;
|
||||
|
||||
let (count, etag) = get_count_etag(&services, body.sender_user(), &body.version).await?;
|
||||
let (count, etag) = get_count_etag(&services, body.sender_user(), &body.version).await;
|
||||
|
||||
Ok(delete_backup_keys::v3::Response { count, etag })
|
||||
Ok(delete_backup_keys::v3::Response::new(etag, count))
|
||||
}
|
||||
|
||||
/// # `DELETE /_matrix/client/r0/room_keys/keys/{roomId}`
|
||||
@@ -357,9 +351,9 @@ pub(crate) async fn delete_backup_keys_for_room_route(
|
||||
.delete_room_keys(body.sender_user(), &body.version, &body.room_id)
|
||||
.await;
|
||||
|
||||
let (count, etag) = get_count_etag(&services, body.sender_user(), &body.version).await?;
|
||||
let (count, etag) = get_count_etag(&services, body.sender_user(), &body.version).await;
|
||||
|
||||
Ok(delete_backup_keys_for_room::v3::Response { count, etag })
|
||||
Ok(delete_backup_keys_for_room::v3::Response::new(etag, count))
|
||||
}
|
||||
|
||||
/// # `DELETE /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}`
|
||||
@@ -374,22 +368,24 @@ pub(crate) async fn delete_backup_keys_for_session_route(
|
||||
.delete_room_key(body.sender_user(), &body.version, &body.room_id, &body.session_id)
|
||||
.await;
|
||||
|
||||
let (count, etag) = get_count_etag(&services, body.sender_user(), &body.version).await?;
|
||||
let (count, etag) = get_count_etag(&services, body.sender_user(), &body.version).await;
|
||||
|
||||
Ok(delete_backup_keys_for_session::v3::Response { count, etag })
|
||||
Ok(delete_backup_keys_for_session::v3::Response::new(etag, count))
|
||||
}
|
||||
|
||||
async fn get_count_etag(
|
||||
services: &Services,
|
||||
sender_user: &UserId,
|
||||
version: &str,
|
||||
) -> Result<(UInt, String)> {
|
||||
let count = services
|
||||
) -> (UInt, String) {
|
||||
let count: UInt = services
|
||||
.key_backups
|
||||
.count_keys(sender_user, version)
|
||||
.map(TryInto::try_into);
|
||||
.await
|
||||
.try_into()
|
||||
.expect("number of keys should fit into a UInt");
|
||||
|
||||
let etag = services.key_backups.get_etag(sender_user, version).map(Ok);
|
||||
let etag = services.key_backups.get_etag(sender_user, version).await;
|
||||
|
||||
Ok(try_join(count, etag).await?)
|
||||
(count, etag)
|
||||
}
|
||||
|
||||
@@ -5,8 +5,11 @@
|
||||
use ruma::{
|
||||
RoomVersionId,
|
||||
api::client::discovery::get_capabilities::{
|
||||
self, Capabilities, GetLoginTokenCapability, RoomVersionStability,
|
||||
RoomVersionsCapability, ThirdPartyIdChangesCapability,
|
||||
self,
|
||||
v3::{
|
||||
Capabilities, GetLoginTokenCapability, RoomVersionStability, RoomVersionsCapability,
|
||||
ThirdPartyIdChangesCapability,
|
||||
},
|
||||
},
|
||||
};
|
||||
use serde_json::json;
|
||||
@@ -25,19 +28,17 @@ pub(crate) async fn get_capabilities_route(
|
||||
Server::available_room_versions().collect();
|
||||
|
||||
let mut capabilities = Capabilities::default();
|
||||
capabilities.room_versions = RoomVersionsCapability {
|
||||
capabilities.room_versions = RoomVersionsCapability::new(
|
||||
services.server.config.default_room_version.clone(),
|
||||
available,
|
||||
default: services.server.config.default_room_version.clone(),
|
||||
};
|
||||
);
|
||||
|
||||
// Only allow 3pid changes if SMTP is configured
|
||||
capabilities.thirdparty_id_changes = ThirdPartyIdChangesCapability {
|
||||
enabled: services.threepid.email_requirement().may_change(),
|
||||
};
|
||||
capabilities.thirdparty_id_changes =
|
||||
ThirdPartyIdChangesCapability::new(services.threepid.email_requirement().may_change());
|
||||
|
||||
capabilities.get_login_token = GetLoginTokenCapability {
|
||||
enabled: services.server.config.login_via_existing_session,
|
||||
};
|
||||
capabilities.get_login_token =
|
||||
GetLoginTokenCapability::new(services.server.config.login_via_existing_session);
|
||||
|
||||
// MSC4133 capability
|
||||
capabilities.set("uk.tcpip.msc4133.profile_fields", json!({"enabled": true}))?;
|
||||
@@ -56,5 +57,5 @@ pub(crate) async fn get_capabilities_route(
|
||||
capabilities.set("uk.timedout.msc4323", json!({"suspend": true, "lock": false}))?;
|
||||
}
|
||||
|
||||
Ok(get_capabilities::v3::Response { capabilities })
|
||||
Ok(get_capabilities::v3::Response::new(capabilities))
|
||||
}
|
||||
|
||||
@@ -12,7 +12,9 @@
|
||||
FutureExt, StreamExt, TryFutureExt, TryStreamExt,
|
||||
future::{OptionFuture, join, join3, try_join3},
|
||||
};
|
||||
use ruma::{OwnedEventId, UserId, api::client::context::get_context, events::StateEventType};
|
||||
use ruma::{
|
||||
OwnedEventId, UserId, api::client::context::get_context, assign, events::StateEventType,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
Ruma,
|
||||
@@ -213,7 +215,7 @@ pub(crate) async fn get_context_route(
|
||||
.collect()
|
||||
.await;
|
||||
|
||||
Ok(get_context::v3::Response {
|
||||
Ok(assign!(get_context::v3::Response::new(), {
|
||||
event: base_event.map(at!(1)).map(Event::into_format),
|
||||
|
||||
start: events_before
|
||||
@@ -243,5 +245,5 @@ pub(crate) async fn get_context_route(
|
||||
.collect(),
|
||||
|
||||
state,
|
||||
})
|
||||
}))
|
||||
}
|
||||
|
||||
@@ -2,10 +2,14 @@
|
||||
use axum_client_ip::ClientIp;
|
||||
use conduwuit::{Err, Result, at};
|
||||
use futures::StreamExt;
|
||||
use ruma::api::client::dehydrated_device::{
|
||||
delete_dehydrated_device::unstable as delete_dehydrated_device,
|
||||
get_dehydrated_device::unstable as get_dehydrated_device, get_events::unstable as get_events,
|
||||
put_dehydrated_device::unstable as put_dehydrated_device,
|
||||
use ruma::{
|
||||
api::client::dehydrated_device::{
|
||||
delete_dehydrated_device::unstable as delete_dehydrated_device,
|
||||
get_dehydrated_device::unstable as get_dehydrated_device,
|
||||
get_events::unstable as get_events,
|
||||
put_dehydrated_device::unstable as put_dehydrated_device,
|
||||
},
|
||||
assign,
|
||||
};
|
||||
|
||||
use crate::Ruma;
|
||||
@@ -33,7 +37,7 @@ pub(crate) async fn put_dehydrated_device_route(
|
||||
.set_dehydrated_device(sender_user, body.body)
|
||||
.await?;
|
||||
|
||||
Ok(put_dehydrated_device::Response { device_id })
|
||||
Ok(put_dehydrated_device::Response::new(device_id))
|
||||
}
|
||||
|
||||
/// # `DELETE /_matrix/client/../dehydrated_device`
|
||||
@@ -51,7 +55,7 @@ pub(crate) async fn delete_dehydrated_device_route(
|
||||
|
||||
services.users.remove_device(sender_user, &device_id).await;
|
||||
|
||||
Ok(delete_dehydrated_device::Response { device_id })
|
||||
Ok(delete_dehydrated_device::Response::new(device_id))
|
||||
}
|
||||
|
||||
/// # `GET /_matrix/client/../dehydrated_device`
|
||||
@@ -67,10 +71,7 @@ pub(crate) async fn get_dehydrated_device_route(
|
||||
|
||||
let device = services.users.get_dehydrated_device(sender_user).await?;
|
||||
|
||||
Ok(get_dehydrated_device::Response {
|
||||
device_id: device.device_id,
|
||||
device_data: device.device_data,
|
||||
})
|
||||
Ok(get_dehydrated_device::Response::new(device.device_id, device.device_data))
|
||||
}
|
||||
|
||||
/// # `GET /_matrix/client/../dehydrated_device/{device_id}/events`
|
||||
@@ -114,8 +115,7 @@ pub(crate) async fn get_dehydrated_events_route(
|
||||
.collect()
|
||||
.await;
|
||||
|
||||
Ok(get_events::Response {
|
||||
events,
|
||||
Ok(assign!(get_events::Response::new(events), {
|
||||
next_batch: next_batch.as_ref().map(ToString::to_string),
|
||||
})
|
||||
}))
|
||||
}
|
||||
|
||||
+23
-46
@@ -25,7 +25,7 @@ pub(crate) async fn get_devices_route(
|
||||
.collect()
|
||||
.await;
|
||||
|
||||
Ok(get_devices::v3::Response { devices })
|
||||
Ok(get_devices::v3::Response::new(devices))
|
||||
}
|
||||
|
||||
/// # `GET /_matrix/client/r0/devices/{deviceId}`
|
||||
@@ -41,7 +41,7 @@ pub(crate) async fn get_device_route(
|
||||
.await
|
||||
.map_err(|_| err!(Request(NotFound("Device not found."))))?;
|
||||
|
||||
Ok(get_device::v3::Response { device })
|
||||
Ok(get_device::v3::Response::new(device))
|
||||
}
|
||||
|
||||
/// # `PUT /_matrix/client/r0/devices/{deviceId}`
|
||||
@@ -73,19 +73,16 @@ pub(crate) async fn update_device_route(
|
||||
.update_device_metadata(sender_user, &body.device_id, &device)
|
||||
.await?;
|
||||
|
||||
Ok(update_device::v3::Response {})
|
||||
Ok(update_device::v3::Response::new())
|
||||
},
|
||||
| Err(_) => {
|
||||
let Some(appservice) = appservice else {
|
||||
return Err!(Request(NotFound("Device not found.")));
|
||||
};
|
||||
if !appservice.registration.device_management {
|
||||
return Err!(Request(NotFound("Device not found.")));
|
||||
}
|
||||
|
||||
debug!(
|
||||
"Creating new device for {sender_user} from appservice {} as MSC4190 is enabled \
|
||||
and device ID does not exist",
|
||||
"Creating new device for {sender_user} from appservice {} as device ID does not \
|
||||
exist",
|
||||
appservice.registration.id
|
||||
);
|
||||
|
||||
@@ -102,7 +99,7 @@ pub(crate) async fn update_device_route(
|
||||
)
|
||||
.await?;
|
||||
|
||||
return Ok(update_device::v3::Response {});
|
||||
return Ok(update_device::v3::Response::new());
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -124,39 +121,28 @@ pub(crate) async fn delete_device_route(
|
||||
let sender_user = body.sender_user();
|
||||
let appservice = body.appservice_info.as_ref();
|
||||
|
||||
if appservice.is_some_and(|appservice| appservice.registration.device_management) {
|
||||
debug!(
|
||||
"Skipping UIAA for {sender_user} as this is from an appservice and MSC4190 is \
|
||||
enabled"
|
||||
);
|
||||
services
|
||||
.users
|
||||
.remove_device(sender_user, &body.device_id)
|
||||
.await;
|
||||
|
||||
return Ok(delete_device::v3::Response {});
|
||||
// Appservices get to skip UIAA for this endpoint
|
||||
if appservice.is_none() {
|
||||
// Prompt the user to confirm with their password using UIAA
|
||||
let _ = services
|
||||
.uiaa
|
||||
.authenticate_password(&body.auth, Some(Identity::from_user_id(sender_user)))
|
||||
.await?;
|
||||
}
|
||||
|
||||
// Prompt the user to confirm with their password using UIAA
|
||||
let _ = services
|
||||
.uiaa
|
||||
.authenticate_password(&body.auth, Some(Identity::from_user_id(sender_user)))
|
||||
.await?;
|
||||
|
||||
services
|
||||
.users
|
||||
.remove_device(sender_user, &body.device_id)
|
||||
.await;
|
||||
|
||||
Ok(delete_device::v3::Response {})
|
||||
Ok(delete_device::v3::Response::new())
|
||||
}
|
||||
|
||||
/// # `POST /_matrix/client/v3/delete_devices`
|
||||
///
|
||||
/// Deletes the given list of devices.
|
||||
///
|
||||
/// - Requires UIAA to verify user password unless from an appservice with
|
||||
/// MSC4190 enabled.
|
||||
/// - Requires UIAA to verify user password.
|
||||
///
|
||||
/// For each device:
|
||||
/// - Invalidates access token
|
||||
@@ -171,27 +157,18 @@ pub(crate) async fn delete_devices_route(
|
||||
let sender_user = body.sender_user();
|
||||
let appservice = body.appservice_info.as_ref();
|
||||
|
||||
if appservice.is_some_and(|appservice| appservice.registration.device_management) {
|
||||
debug!(
|
||||
"Skipping UIAA for {sender_user} as this is from an appservice and MSC4190 is \
|
||||
enabled"
|
||||
);
|
||||
for device_id in &body.devices {
|
||||
services.users.remove_device(sender_user, device_id).await;
|
||||
}
|
||||
|
||||
return Ok(delete_devices::v3::Response {});
|
||||
// Appservices get to skip UIAA for this endpoint
|
||||
if appservice.is_none() {
|
||||
// Prompt the user to confirm with their password using UIAA
|
||||
let _ = services
|
||||
.uiaa
|
||||
.authenticate_password(&body.auth, Some(Identity::from_user_id(sender_user)))
|
||||
.await?;
|
||||
}
|
||||
|
||||
// Prompt the user to confirm with their password using UIAA
|
||||
let _ = services
|
||||
.uiaa
|
||||
.authenticate_password(&body.auth, Some(Identity::from_user_id(sender_user)))
|
||||
.await?;
|
||||
|
||||
for device_id in &body.devices {
|
||||
services.users.remove_device(sender_user, device_id).await;
|
||||
}
|
||||
|
||||
Ok(delete_devices::v3::Response {})
|
||||
Ok(delete_devices::v3::Response::new())
|
||||
}
|
||||
|
||||
+50
-150
@@ -1,23 +1,16 @@
|
||||
use std::iter::once;
|
||||
|
||||
use axum::extract::State;
|
||||
use axum_client_ip::ClientIp;
|
||||
use conduwuit::{
|
||||
Err, Event, Result, RoomVersion, err, info,
|
||||
Err, Result, err, info,
|
||||
utils::{
|
||||
TryFutureExtExt,
|
||||
math::Expected,
|
||||
result::FlatOk,
|
||||
stream::{ReadyExt, WidebandExt},
|
||||
},
|
||||
};
|
||||
use conduwuit_service::Services;
|
||||
use futures::{
|
||||
FutureExt, StreamExt, TryFutureExt,
|
||||
future::{join, join4, join5},
|
||||
};
|
||||
use futures::StreamExt;
|
||||
use ruma::{
|
||||
OwnedRoomId, RoomId, ServerName, UInt, UserId,
|
||||
RoomId, ServerName, UInt, UserId,
|
||||
api::{
|
||||
client::{
|
||||
directory::{
|
||||
@@ -28,15 +21,9 @@
|
||||
},
|
||||
federation,
|
||||
},
|
||||
directory::{Filter, PublicRoomJoinRule, PublicRoomsChunk, RoomNetwork, RoomTypeFilter},
|
||||
events::{
|
||||
StateEventType,
|
||||
room::{
|
||||
create::RoomCreateEventContent,
|
||||
join_rules::{JoinRule, RoomJoinRulesEventContent},
|
||||
power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent},
|
||||
},
|
||||
},
|
||||
assign,
|
||||
directory::{Filter, PublicRoomsChunk, RoomNetwork, RoomTypeFilter},
|
||||
events::StateEventType,
|
||||
uint,
|
||||
};
|
||||
use tokio::join;
|
||||
@@ -109,12 +96,11 @@ pub(crate) async fn get_public_rooms_route(
|
||||
err!(Request(Unknown(warn!(?body.server, "Failed to return /publicRooms: {e}"))))
|
||||
})?;
|
||||
|
||||
Ok(get_public_rooms::v3::Response {
|
||||
chunk: response.chunk,
|
||||
Ok(assign!(get_public_rooms::v3::Response::new(response.chunk), {
|
||||
prev_batch: response.prev_batch,
|
||||
next_batch: response.next_batch,
|
||||
total_room_count_estimate: response.total_room_count_estimate,
|
||||
})
|
||||
}))
|
||||
}
|
||||
|
||||
/// # `PUT /_matrix/client/r0/directory/list/room/{roomId}`
|
||||
@@ -136,16 +122,6 @@ pub(crate) async fn set_room_visibility_route(
|
||||
return Err!(Request(UserSuspended("You cannot perform this action while suspended.")));
|
||||
}
|
||||
|
||||
if services
|
||||
.users
|
||||
.is_deactivated(sender_user)
|
||||
.await
|
||||
.unwrap_or(false)
|
||||
&& body.appservice_info.is_none()
|
||||
{
|
||||
return Err!(Request(Forbidden("Guests cannot publish to room directories")));
|
||||
}
|
||||
|
||||
if !user_can_publish_room(&services, sender_user, &body.room_id).await? {
|
||||
return Err!(Request(Forbidden("User is not allowed to publish this room")));
|
||||
}
|
||||
@@ -197,7 +173,7 @@ pub(crate) async fn set_room_visibility_route(
|
||||
},
|
||||
}
|
||||
|
||||
Ok(set_room_visibility::v3::Response {})
|
||||
Ok(set_room_visibility::v3::Response::new())
|
||||
}
|
||||
|
||||
/// # `GET /_matrix/client/r0/directory/list/room/{roomId}`
|
||||
@@ -212,13 +188,13 @@ pub(crate) async fn get_room_visibility_route(
|
||||
return Err!(Request(NotFound("Room not found")));
|
||||
}
|
||||
|
||||
Ok(get_room_visibility::v3::Response {
|
||||
visibility: if services.rooms.directory.is_public_room(&body.room_id).await {
|
||||
room::Visibility::Public
|
||||
} else {
|
||||
room::Visibility::Private
|
||||
},
|
||||
})
|
||||
let visibility = if services.rooms.directory.is_public_room(&body.room_id).await {
|
||||
room::Visibility::Public
|
||||
} else {
|
||||
room::Visibility::Private
|
||||
};
|
||||
|
||||
Ok(get_room_visibility::v3::Response::new(visibility))
|
||||
}
|
||||
|
||||
pub(crate) async fn get_public_rooms_filtered_helper(
|
||||
@@ -236,24 +212,24 @@ pub(crate) async fn get_public_rooms_filtered_helper(
|
||||
.sending
|
||||
.send_federation_request(
|
||||
other_server,
|
||||
federation::directory::get_public_rooms_filtered::v1::Request {
|
||||
assign!(federation::directory::get_public_rooms_filtered::v1::Request::new(), {
|
||||
limit,
|
||||
since: since.map(ToOwned::to_owned),
|
||||
filter: Filter {
|
||||
filter: assign!(Filter::new(), {
|
||||
generic_search_term: filter.generic_search_term.clone(),
|
||||
room_types: filter.room_types.clone(),
|
||||
},
|
||||
}),
|
||||
room_network: RoomNetwork::Matrix,
|
||||
},
|
||||
}),
|
||||
)
|
||||
.await?;
|
||||
|
||||
return Ok(get_public_rooms_filtered::v3::Response {
|
||||
return Ok(assign!(get_public_rooms_filtered::v3::Response::new(), {
|
||||
chunk: response.chunk,
|
||||
prev_batch: response.prev_batch,
|
||||
next_batch: response.next_batch,
|
||||
total_room_count_estimate: response.total_room_count_estimate,
|
||||
});
|
||||
}));
|
||||
}
|
||||
|
||||
// Use limit or else 10, with maximum 100
|
||||
@@ -284,16 +260,24 @@ pub(crate) async fn get_public_rooms_filtered_helper(
|
||||
.rooms
|
||||
.directory
|
||||
.public_rooms()
|
||||
.map(ToOwned::to_owned)
|
||||
.wide_then(|room_id| public_rooms_chunk(services, room_id))
|
||||
.ready_filter_map(|chunk| {
|
||||
.wide_then(async |room_id| {
|
||||
let summary = services
|
||||
.rooms
|
||||
.summary
|
||||
.build_local_room_summary(&room_id)
|
||||
.await
|
||||
.expect("room in public room directory should exist");
|
||||
|
||||
summary.into()
|
||||
})
|
||||
.ready_filter_map(|chunk: PublicRoomsChunk| {
|
||||
if !filter.room_types.is_empty() && !filter.room_types.contains(&RoomTypeFilter::from(chunk.room_type.clone())) {
|
||||
return None;
|
||||
}
|
||||
|
||||
if let Some(query) = filter.generic_search_term.as_ref().map(|q| q.to_lowercase()) {
|
||||
if let Some(name) = &chunk.name {
|
||||
if name.as_str().to_lowercase().contains(&query) {
|
||||
if name.to_lowercase().contains(&query) {
|
||||
return Some(chunk);
|
||||
}
|
||||
}
|
||||
@@ -320,7 +304,7 @@ pub(crate) async fn get_public_rooms_filtered_helper(
|
||||
.collect()
|
||||
.await;
|
||||
|
||||
all_rooms.sort_by(|l, r| r.num_joined_members.cmp(&l.num_joined_members));
|
||||
all_rooms.sort_by_key(|r| std::cmp::Reverse(r.num_joined_members));
|
||||
|
||||
let total_room_count_estimate = UInt::try_from(all_rooms.len())
|
||||
.unwrap_or_else(|_| uint!(0))
|
||||
@@ -335,12 +319,12 @@ pub(crate) async fn get_public_rooms_filtered_helper(
|
||||
.ge(&limit)
|
||||
.then_some(format!("n{}", num_since.expected_add(limit)));
|
||||
|
||||
Ok(get_public_rooms_filtered::v3::Response {
|
||||
Ok(assign!(get_public_rooms_filtered::v3::Response::new(), {
|
||||
chunk,
|
||||
prev_batch,
|
||||
next_batch,
|
||||
total_room_count_estimate,
|
||||
})
|
||||
}))
|
||||
}
|
||||
|
||||
/// Checks whether the given user ID is allowed to publish the target room to
|
||||
@@ -356,109 +340,25 @@ async fn user_can_publish_room(
|
||||
// Server admins can always publish to their own room directory.
|
||||
return Ok(true);
|
||||
}
|
||||
let (create_event, room_version, power_levels_content) = join!(
|
||||
services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.room_state_get(room_id, &StateEventType::RoomCreate, ""),
|
||||
|
||||
let (room_version, room_creators, power_levels) = join!(
|
||||
services.rooms.state.get_room_version(room_id),
|
||||
services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.room_state_get_content::<RoomPowerLevelsEventContent>(
|
||||
room_id,
|
||||
&StateEventType::RoomPowerLevels,
|
||||
""
|
||||
)
|
||||
services.rooms.state_accessor.get_room_creators(room_id),
|
||||
services.rooms.state_accessor.get_room_power_levels(room_id),
|
||||
);
|
||||
|
||||
let room_version = room_version
|
||||
.as_ref()
|
||||
.map_err(|_| err!(Request(NotFound("Unknown room"))))?;
|
||||
let create_event = create_event.map_err(|_| err!(Request(NotFound("Unknown room"))))?;
|
||||
if RoomVersion::new(room_version)
|
||||
.expect("room version must be supported")
|
||||
let room_version_rules = room_version.rules().unwrap();
|
||||
|
||||
if room_version_rules
|
||||
.authorization
|
||||
.explicitly_privilege_room_creators
|
||||
&& room_creators.contains(user_id)
|
||||
{
|
||||
let create_content: RoomCreateEventContent =
|
||||
serde_json::from_str(create_event.content().get())
|
||||
.map_err(|_| err!(Database("Invalid event content for m.room.create")))?;
|
||||
let is_creator = create_content
|
||||
.additional_creators
|
||||
.unwrap_or_default()
|
||||
.into_iter()
|
||||
.chain(once(create_event.sender().to_owned()))
|
||||
.any(|sender| sender == user_id);
|
||||
if is_creator {
|
||||
return Ok(true);
|
||||
}
|
||||
}
|
||||
match power_levels_content.map(RoomPowerLevels::from) {
|
||||
| Ok(pl) => Ok(pl.user_can_send_state(user_id, StateEventType::RoomCanonicalAlias)),
|
||||
| Err(e) =>
|
||||
if e.is_not_found() {
|
||||
Ok(create_event.sender() == user_id)
|
||||
} else {
|
||||
Err!(Database("Invalid event content for m.room.power_levels: {e}"))
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
async fn public_rooms_chunk(services: &Services, room_id: OwnedRoomId) -> PublicRoomsChunk {
|
||||
let name = services.rooms.state_accessor.get_name(&room_id).ok();
|
||||
|
||||
let room_type = services.rooms.state_accessor.get_room_type(&room_id).ok();
|
||||
|
||||
let canonical_alias = services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.get_canonical_alias(&room_id)
|
||||
.ok();
|
||||
|
||||
let avatar_url = services.rooms.state_accessor.get_avatar(&room_id);
|
||||
|
||||
let topic = services.rooms.state_accessor.get_room_topic(&room_id).ok();
|
||||
|
||||
let world_readable = services.rooms.state_accessor.is_world_readable(&room_id);
|
||||
|
||||
let join_rule = services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.room_state_get_content(&room_id, &StateEventType::RoomJoinRules, "")
|
||||
.map_ok(|c: RoomJoinRulesEventContent| match c.join_rule {
|
||||
| JoinRule::Public => PublicRoomJoinRule::Public,
|
||||
| JoinRule::Knock => "knock".into(),
|
||||
| JoinRule::KnockRestricted(_) => "knock_restricted".into(),
|
||||
| _ => "invite".into(),
|
||||
});
|
||||
|
||||
let guest_can_join = services.rooms.state_accessor.guest_can_join(&room_id);
|
||||
|
||||
let num_joined_members = services.rooms.state_cache.room_joined_count(&room_id);
|
||||
|
||||
let (
|
||||
(avatar_url, canonical_alias, guest_can_join, join_rule, name),
|
||||
(num_joined_members, room_type, topic, world_readable),
|
||||
) = join(
|
||||
join5(avatar_url, canonical_alias, guest_can_join, join_rule, name),
|
||||
join4(num_joined_members, room_type, topic, world_readable),
|
||||
)
|
||||
.boxed()
|
||||
.await;
|
||||
|
||||
PublicRoomsChunk {
|
||||
avatar_url: avatar_url.into_option().unwrap_or_default().url,
|
||||
canonical_alias,
|
||||
guest_can_join,
|
||||
join_rule: join_rule.unwrap_or_default(),
|
||||
name,
|
||||
num_joined_members: num_joined_members
|
||||
.map(TryInto::try_into)
|
||||
.map(Result::ok)
|
||||
.flat_ok()
|
||||
.unwrap_or_else(|| uint!(0)),
|
||||
room_id,
|
||||
room_type,
|
||||
topic,
|
||||
world_readable,
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
Ok(power_levels.user_can_send_state(user_id, StateEventType::RoomCanonicalAlias))
|
||||
}
|
||||
|
||||
+99
-118
@@ -5,25 +5,23 @@
|
||||
|
||||
use axum::extract::State;
|
||||
use conduwuit::{
|
||||
Err, Error, Result, debug, debug_warn, err,
|
||||
result::NotFound,
|
||||
utils::{IterStream, stream::WidebandExt},
|
||||
Err, Result, debug, debug_warn, err,
|
||||
result::FlatOk,
|
||||
utils::{IterStream, TryFutureExtExt, stream::WidebandExt},
|
||||
};
|
||||
use conduwuit_service::{Services, users::parse_master_key};
|
||||
use futures::{StreamExt, stream::FuturesUnordered};
|
||||
use ruma::{
|
||||
OneTimeKeyAlgorithm, OwnedDeviceId, OwnedUserId, UserId,
|
||||
api::{
|
||||
client::{
|
||||
error::ErrorKind,
|
||||
keys::{
|
||||
claim_keys, get_key_changes, get_keys, upload_keys,
|
||||
upload_signatures::{self},
|
||||
upload_signing_keys,
|
||||
},
|
||||
client::keys::{
|
||||
claim_keys, get_key_changes, get_keys, upload_keys,
|
||||
upload_signatures::{self},
|
||||
upload_signing_keys,
|
||||
},
|
||||
federation,
|
||||
},
|
||||
assign,
|
||||
encryption::CrossSigningKey,
|
||||
serde::Raw,
|
||||
};
|
||||
@@ -66,6 +64,27 @@ pub(crate) async fn upload_keys_route(
|
||||
.await?;
|
||||
}
|
||||
|
||||
for (key_id, fallback_key) in &body.fallback_keys {
|
||||
if fallback_key
|
||||
.deserialize()
|
||||
.inspect_err(|e| {
|
||||
debug_warn!(
|
||||
%key_id,
|
||||
?fallback_key,
|
||||
"Invalid one time key JSON submitted by client, skipping: {e}"
|
||||
);
|
||||
})
|
||||
.is_err()
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
services
|
||||
.users
|
||||
.add_fallback_key(sender_user, sender_device, key_id, fallback_key, false)
|
||||
.await?;
|
||||
}
|
||||
|
||||
if let Some(device_keys) = &body.device_keys {
|
||||
let deser_device_keys = device_keys.deserialize().map_err(|e| {
|
||||
err!(Request(BadJson(debug_warn!(
|
||||
@@ -115,12 +134,12 @@ pub(crate) async fn upload_keys_route(
|
||||
}
|
||||
}
|
||||
|
||||
Ok(upload_keys::v3::Response {
|
||||
one_time_key_counts: services
|
||||
.users
|
||||
.count_one_time_keys(sender_user, sender_device)
|
||||
.await,
|
||||
})
|
||||
let one_time_key_counts = services
|
||||
.users
|
||||
.count_one_time_keys(sender_user, sender_device)
|
||||
.await;
|
||||
|
||||
Ok(upload_keys::v3::Response::new(one_time_key_counts))
|
||||
}
|
||||
|
||||
/// # `POST /_matrix/client/r0/keys/query`
|
||||
@@ -174,7 +193,7 @@ pub(crate) async fn upload_signing_keys_route(
|
||||
) -> Result<upload_signing_keys::v3::Response> {
|
||||
let sender_user = body.sender_user();
|
||||
|
||||
match check_for_new_keys(
|
||||
if uiaa_needed_to_upload_keys(
|
||||
services,
|
||||
sender_user,
|
||||
body.self_signing_key.as_ref(),
|
||||
@@ -182,25 +201,11 @@ pub(crate) async fn upload_signing_keys_route(
|
||||
body.master_key.as_ref(),
|
||||
)
|
||||
.await
|
||||
.inspect_err(|e| debug!(?e))
|
||||
{
|
||||
| Ok(exists) => {
|
||||
if let Some(result) = exists {
|
||||
// No-op, they tried to reupload the same set of keys
|
||||
// (lost connection for example)
|
||||
return Ok(result);
|
||||
}
|
||||
debug!(
|
||||
"Skipping UIA in accordance with MSC3967, the user didn't have any existing keys"
|
||||
);
|
||||
// Some of the keys weren't found, so we let them upload
|
||||
},
|
||||
| _ => {
|
||||
let _ = services
|
||||
.uiaa
|
||||
.authenticate_password(&body.auth, Some(Identity::from_user_id(sender_user)))
|
||||
.await?;
|
||||
},
|
||||
let _ = services
|
||||
.uiaa
|
||||
.authenticate_password(&body.auth, Some(Identity::from_user_id(sender_user)))
|
||||
.await?;
|
||||
}
|
||||
|
||||
services
|
||||
@@ -214,77 +219,56 @@ pub(crate) async fn upload_signing_keys_route(
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(upload_signing_keys::v3::Response {})
|
||||
Ok(upload_signing_keys::v3::Response::new())
|
||||
}
|
||||
|
||||
async fn check_for_new_keys(
|
||||
async fn uiaa_needed_to_upload_keys(
|
||||
services: crate::State,
|
||||
user_id: &UserId,
|
||||
self_signing_key: Option<&Raw<CrossSigningKey>>,
|
||||
user_signing_key: Option<&Raw<CrossSigningKey>>,
|
||||
master_signing_key: Option<&Raw<CrossSigningKey>>,
|
||||
) -> Result<Option<upload_signing_keys::v3::Response>> {
|
||||
debug!("checking for existing keys");
|
||||
let mut empty = false;
|
||||
if let Some(master_signing_key) = master_signing_key {
|
||||
let (key, value) = parse_master_key(user_id, master_signing_key)?;
|
||||
let result = services
|
||||
.users
|
||||
.get_master_key(None, user_id, &|_| true)
|
||||
.await;
|
||||
if result.is_not_found() {
|
||||
empty = true;
|
||||
} else {
|
||||
let existing_master_key = result?;
|
||||
let (existing_key, existing_value) = parse_master_key(user_id, &existing_master_key)?;
|
||||
if existing_key != key || existing_value != value {
|
||||
return Err!(Request(Forbidden(
|
||||
"Tried to change an existing master key, UIA required"
|
||||
)));
|
||||
}
|
||||
}
|
||||
}
|
||||
if let Some(user_signing_key) = user_signing_key {
|
||||
let key = services.users.get_user_signing_key(user_id).await;
|
||||
if key.is_not_found() && !empty {
|
||||
return Err!(Request(Forbidden(
|
||||
"Tried to update an existing user signing key, UIA required"
|
||||
)));
|
||||
}
|
||||
if !key.is_not_found() {
|
||||
let existing_signing_key = key?.deserialize()?;
|
||||
if existing_signing_key != user_signing_key.deserialize()? {
|
||||
return Err!(Request(Forbidden(
|
||||
"Tried to change an existing user signing key, UIA required"
|
||||
)));
|
||||
}
|
||||
}
|
||||
}
|
||||
if let Some(self_signing_key) = self_signing_key {
|
||||
let key = services
|
||||
) -> bool {
|
||||
let (self_signing_key, user_signing_key, master_signing_key) = (
|
||||
self_signing_key.map(Raw::deserialize).flat_ok(),
|
||||
user_signing_key.map(Raw::deserialize).flat_ok(),
|
||||
master_signing_key.map(Raw::deserialize).flat_ok(),
|
||||
);
|
||||
|
||||
let (existing_self_signing_key, existing_user_signing_key, existing_master_signing_key) = futures::join!(
|
||||
services
|
||||
.users
|
||||
.get_self_signing_key(None, user_id, &|_| true)
|
||||
.await;
|
||||
if key.is_not_found() && !empty {
|
||||
debug!(?key);
|
||||
return Err!(Request(Forbidden(
|
||||
"Tried to add a new signing key independently from the master key"
|
||||
)));
|
||||
}
|
||||
if !key.is_not_found() {
|
||||
let existing_signing_key = key?.deserialize()?;
|
||||
if existing_signing_key != self_signing_key.deserialize()? {
|
||||
return Err!(Request(Forbidden(
|
||||
"Tried to update an existing self signing key, UIA required"
|
||||
)));
|
||||
}
|
||||
}
|
||||
}
|
||||
if empty {
|
||||
return Ok(None);
|
||||
}
|
||||
.ok(),
|
||||
services.users.get_user_signing_key(user_id).ok(),
|
||||
services.users.get_master_key(None, user_id, &|_| true).ok(),
|
||||
);
|
||||
|
||||
Ok(Some(upload_signing_keys::v3::Response {}))
|
||||
let (existing_self_signing_key, existing_user_signing_key, existing_master_signing_key) = (
|
||||
existing_self_signing_key
|
||||
.as_ref()
|
||||
.map(Raw::deserialize)
|
||||
.flat_ok(),
|
||||
existing_user_signing_key
|
||||
.as_ref()
|
||||
.map(Raw::deserialize)
|
||||
.flat_ok(),
|
||||
existing_master_signing_key
|
||||
.as_ref()
|
||||
.map(Raw::deserialize)
|
||||
.flat_ok(),
|
||||
);
|
||||
|
||||
if let Some(existing_master_signing_key) = existing_master_signing_key {
|
||||
// If a master key exists, UIAA is required if any of the keys are different.
|
||||
|
||||
master_signing_key != Some(existing_master_signing_key)
|
||||
|| user_signing_key != existing_user_signing_key
|
||||
|| self_signing_key != existing_self_signing_key
|
||||
} else {
|
||||
// If no master key exists, UIAA is not required.
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
/// # `POST /_matrix/client/r0/keys/signatures/upload`
|
||||
@@ -343,7 +327,7 @@ pub(crate) async fn upload_signatures_route(
|
||||
}
|
||||
}
|
||||
|
||||
Ok(upload_signatures::v3::Response { failures: BTreeMap::new() })
|
||||
Ok(upload_signatures::v3::Response::new())
|
||||
}
|
||||
|
||||
/// # `POST /_matrix/client/r0/keys/changes`
|
||||
@@ -363,18 +347,17 @@ pub(crate) async fn get_key_changes_route(
|
||||
let from = body
|
||||
.from
|
||||
.parse()
|
||||
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `from`."))?;
|
||||
.map_err(|_| err!(Request(InvalidParam("Invalid `from`."))))?;
|
||||
|
||||
let to = body
|
||||
.to
|
||||
.parse()
|
||||
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `to`."))?;
|
||||
.map_err(|_| err!(Request(InvalidParam("Invalid `to`."))))?;
|
||||
|
||||
device_list_updates.extend(
|
||||
services
|
||||
.users
|
||||
.keys_changed(sender_user, Some(from), Some(to))
|
||||
.map(ToOwned::to_owned)
|
||||
.collect::<Vec<_>>()
|
||||
.await,
|
||||
);
|
||||
@@ -385,18 +368,18 @@ pub(crate) async fn get_key_changes_route(
|
||||
device_list_updates.extend(
|
||||
services
|
||||
.users
|
||||
.room_keys_changed(room_id, Some(from), Some(to))
|
||||
.room_keys_changed(&room_id, Some(from), Some(to))
|
||||
.map(|(user_id, _)| user_id)
|
||||
.map(ToOwned::to_owned)
|
||||
.collect::<Vec<_>>()
|
||||
.await,
|
||||
);
|
||||
}
|
||||
|
||||
Ok(get_key_changes::v3::Response {
|
||||
changed: device_list_updates.into_iter().collect(),
|
||||
left: Vec::new(), // TODO
|
||||
})
|
||||
Ok(get_key_changes::v3::Response::new(
|
||||
device_list_updates.into_iter().collect(),
|
||||
// TODO
|
||||
vec![],
|
||||
))
|
||||
}
|
||||
|
||||
pub(crate) async fn get_keys_helper<F>(
|
||||
@@ -433,10 +416,10 @@ pub(crate) async fn get_keys_helper<F>(
|
||||
let mut devices = services.users.all_device_ids(user_id).boxed();
|
||||
|
||||
while let Some(device_id) = devices.next().await {
|
||||
if let Ok(mut keys) = services.users.get_device_keys(user_id, device_id).await {
|
||||
if let Ok(mut keys) = services.users.get_device_keys(user_id, &device_id).await {
|
||||
let metadata = services
|
||||
.users
|
||||
.get_device_metadata(user_id, device_id)
|
||||
.get_device_metadata(user_id, &device_id)
|
||||
.await
|
||||
.map_err(|_| {
|
||||
err!(Database("all_device_keys contained nonexistent device."))
|
||||
@@ -445,7 +428,7 @@ pub(crate) async fn get_keys_helper<F>(
|
||||
add_unsigned_device_display_name(&mut keys, metadata, include_display_names)
|
||||
.map_err(|_| err!(Database("invalid device keys in database")))?;
|
||||
|
||||
container.insert(device_id.to_owned(), keys);
|
||||
container.insert(device_id.clone(), keys);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -506,8 +489,7 @@ pub(crate) async fn get_keys_helper<F>(
|
||||
device_keys_input_fed.insert(user_id.to_owned(), keys.clone());
|
||||
}
|
||||
|
||||
let request =
|
||||
federation::keys::get_keys::v1::Request { device_keys: device_keys_input_fed };
|
||||
let request = federation::keys::get_keys::v1::Request::new(device_keys_input_fed);
|
||||
let response = tokio::time::timeout(
|
||||
timeout,
|
||||
services.sending.send_federation_request(server, request),
|
||||
@@ -561,13 +543,13 @@ pub(crate) async fn get_keys_helper<F>(
|
||||
}
|
||||
}
|
||||
|
||||
Ok(get_keys::v3::Response {
|
||||
Ok(assign!(get_keys::v3::Response::new(), {
|
||||
failures,
|
||||
device_keys,
|
||||
master_keys,
|
||||
self_signing_keys,
|
||||
user_signing_keys,
|
||||
})
|
||||
}))
|
||||
}
|
||||
|
||||
fn add_unsigned_device_display_name(
|
||||
@@ -576,7 +558,8 @@ fn add_unsigned_device_display_name(
|
||||
include_display_names: bool,
|
||||
) -> serde_json::Result<()> {
|
||||
if let Some(display_name) = metadata.display_name {
|
||||
let mut object = keys.deserialize_as::<serde_json::Map<String, serde_json::Value>>()?;
|
||||
let mut object =
|
||||
keys.deserialize_as_unchecked::<serde_json::Map<String, serde_json::Value>>()?;
|
||||
|
||||
let unsigned = object.entry("unsigned").or_insert_with(|| json!({}));
|
||||
if let serde_json::Value::Object(unsigned_object) = unsigned {
|
||||
@@ -642,9 +625,7 @@ pub(crate) async fn claim_keys_helper(
|
||||
timeout,
|
||||
services.sending.send_federation_request(
|
||||
server,
|
||||
federation::keys::claim_keys::v1::Request {
|
||||
one_time_keys: one_time_keys_input_fed,
|
||||
},
|
||||
federation::keys::claim_keys::v1::Request::new(one_time_keys_input_fed),
|
||||
),
|
||||
)
|
||||
.await
|
||||
@@ -667,5 +648,5 @@ pub(crate) async fn claim_keys_helper(
|
||||
}
|
||||
}
|
||||
|
||||
Ok(claim_keys::v3::Response { failures, one_time_keys })
|
||||
Ok(assign!(claim_keys::v3::Response::new(one_time_keys), { failures: failures }))
|
||||
}
|
||||
|
||||
+34
-90
@@ -9,11 +9,11 @@
|
||||
use conduwuit_core::error;
|
||||
use conduwuit_service::{
|
||||
Services,
|
||||
media::{CACHE_CONTROL_IMMUTABLE, CORP_CROSS_ORIGIN, Dim, FileMeta, MXC_LENGTH},
|
||||
media::{Dim, FileMeta, MXC_LENGTH},
|
||||
};
|
||||
use reqwest::Url;
|
||||
use ruma::{
|
||||
Mxc, UserId,
|
||||
UserId,
|
||||
api::client::{
|
||||
authenticated_media::{
|
||||
get_content, get_content_as_filename, get_content_thumbnail, get_media_config,
|
||||
@@ -22,6 +22,7 @@
|
||||
media::create_content,
|
||||
},
|
||||
};
|
||||
use service::media::mxc::Mxc;
|
||||
|
||||
use crate::Ruma;
|
||||
|
||||
@@ -30,9 +31,9 @@ pub(crate) async fn get_media_config_route(
|
||||
State(services): State<crate::State>,
|
||||
_body: Ruma<get_media_config::v1::Request>,
|
||||
) -> Result<get_media_config::v1::Response> {
|
||||
Ok(get_media_config::v1::Response {
|
||||
upload_size: ruma_from_usize(services.server.config.max_request_size),
|
||||
})
|
||||
Ok(get_media_config::v1::Response::new(ruma_from_usize(
|
||||
services.server.config.max_request_size,
|
||||
)))
|
||||
}
|
||||
|
||||
/// # `POST /_matrix/media/v3/upload`
|
||||
@@ -74,18 +75,7 @@ pub(crate) async fn create_content_route(
|
||||
return Err!(Request(Unknown("Failed to save uploaded media")));
|
||||
}
|
||||
|
||||
let blurhash = body.generate_blurhash.then(|| {
|
||||
services
|
||||
.media
|
||||
.create_blurhash(&body.file, content_type, filename)
|
||||
.ok()
|
||||
.flatten()
|
||||
});
|
||||
|
||||
Ok(create_content::v3::Response {
|
||||
content_uri: mxc.to_string().into(),
|
||||
blurhash: blurhash.flatten(),
|
||||
})
|
||||
Ok(create_content::v3::Response::new(mxc.to_string().into()))
|
||||
}
|
||||
|
||||
/// # `GET /_matrix/client/v1/media/thumbnail/{serverName}/{mediaId}`
|
||||
@@ -114,7 +104,7 @@ pub(crate) async fn get_content_thumbnail_route(
|
||||
content,
|
||||
content_type,
|
||||
content_disposition,
|
||||
} = match fetch_thumbnail(&services, &mxc, user, body.timeout_ms, &dim).await {
|
||||
} = match fetch_thumbnail_meta(&services, &mxc, user, body.timeout_ms, &dim).await {
|
||||
| Ok(meta) => meta,
|
||||
| Err(conduwuit::Error::Io(e)) => match e.kind() {
|
||||
| std::io::ErrorKind::NotFound =>
|
||||
@@ -128,13 +118,14 @@ pub(crate) async fn get_content_thumbnail_route(
|
||||
| Err(_) => return Err!(Request(Unknown("Unknown error when fetching thumbnail."))),
|
||||
};
|
||||
|
||||
Ok(get_content_thumbnail::v1::Response {
|
||||
file: content.expect("entire file contents"),
|
||||
content_type: content_type.map(Into::into),
|
||||
cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()),
|
||||
cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()),
|
||||
let content_disposition =
|
||||
make_content_disposition(content_disposition.as_ref(), content_type.as_deref(), None);
|
||||
|
||||
Ok(get_content_thumbnail::v1::Response::new(
|
||||
content.expect("entire file contents"),
|
||||
content_type.unwrap_or_default(),
|
||||
content_disposition,
|
||||
})
|
||||
))
|
||||
}
|
||||
|
||||
/// # `GET /_matrix/client/v1/media/download/{serverName}/{mediaId}`
|
||||
@@ -161,7 +152,7 @@ pub(crate) async fn get_content_route(
|
||||
content,
|
||||
content_type,
|
||||
content_disposition,
|
||||
} = match fetch_file(&services, &mxc, user, body.timeout_ms, None).await {
|
||||
} = match fetch_file_meta(&services, &mxc, user, body.timeout_ms).await {
|
||||
| Ok(meta) => meta,
|
||||
| Err(conduwuit::Error::Io(e)) => match e.kind() {
|
||||
| std::io::ErrorKind::NotFound => return Err!(Request(NotFound("Media not found."))),
|
||||
@@ -174,13 +165,14 @@ pub(crate) async fn get_content_route(
|
||||
| Err(_) => return Err!(Request(Unknown("Unknown error when fetching file."))),
|
||||
};
|
||||
|
||||
Ok(get_content::v1::Response {
|
||||
file: content.expect("entire file contents"),
|
||||
content_type: content_type.map(Into::into),
|
||||
cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()),
|
||||
cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()),
|
||||
let content_disposition =
|
||||
make_content_disposition(content_disposition.as_ref(), content_type.as_deref(), None);
|
||||
|
||||
Ok(get_content::v1::Response::new(
|
||||
content.expect("entire file contents"),
|
||||
content_type.unwrap_or_default(),
|
||||
content_disposition,
|
||||
})
|
||||
))
|
||||
}
|
||||
|
||||
/// # `GET /_matrix/client/v1/media/download/{serverName}/{mediaId}/{fileName}`
|
||||
@@ -208,7 +200,7 @@ pub(crate) async fn get_content_as_filename_route(
|
||||
content,
|
||||
content_type,
|
||||
content_disposition,
|
||||
} = match fetch_file(&services, &mxc, user, body.timeout_ms, None).await {
|
||||
} = match fetch_file_meta(&services, &mxc, user, body.timeout_ms).await {
|
||||
| Ok(meta) => meta,
|
||||
| Err(conduwuit::Error::Io(e)) => match e.kind() {
|
||||
| std::io::ErrorKind::NotFound => return Err!(Request(NotFound("Media not found."))),
|
||||
@@ -221,13 +213,17 @@ pub(crate) async fn get_content_as_filename_route(
|
||||
| Err(_) => return Err!(Request(Unknown("Unknown error when fetching file."))),
|
||||
};
|
||||
|
||||
Ok(get_content_as_filename::v1::Response {
|
||||
file: content.expect("entire file contents"),
|
||||
content_type: content_type.map(Into::into),
|
||||
cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()),
|
||||
cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()),
|
||||
let content_disposition = make_content_disposition(
|
||||
content_disposition.as_ref(),
|
||||
content_type.as_deref(),
|
||||
Some(&body.filename),
|
||||
);
|
||||
|
||||
Ok(get_content_as_filename::v1::Response::new(
|
||||
content.expect("entire file contents"),
|
||||
content_type.unwrap_or_default(),
|
||||
content_disposition,
|
||||
})
|
||||
))
|
||||
}
|
||||
|
||||
/// # `GET /_matrix/client/v1/media/preview_url`
|
||||
@@ -278,58 +274,6 @@ pub(crate) async fn get_media_preview_route(
|
||||
})
|
||||
}
|
||||
|
||||
async fn fetch_thumbnail(
|
||||
services: &Services,
|
||||
mxc: &Mxc<'_>,
|
||||
user: &UserId,
|
||||
timeout_ms: Duration,
|
||||
dim: &Dim,
|
||||
) -> Result<FileMeta> {
|
||||
let FileMeta {
|
||||
content,
|
||||
content_type,
|
||||
content_disposition,
|
||||
} = fetch_thumbnail_meta(services, mxc, user, timeout_ms, dim).await?;
|
||||
|
||||
let content_disposition = Some(make_content_disposition(
|
||||
content_disposition.as_ref(),
|
||||
content_type.as_deref(),
|
||||
None,
|
||||
));
|
||||
|
||||
Ok(FileMeta {
|
||||
content,
|
||||
content_type,
|
||||
content_disposition,
|
||||
})
|
||||
}
|
||||
|
||||
async fn fetch_file(
|
||||
services: &Services,
|
||||
mxc: &Mxc<'_>,
|
||||
user: &UserId,
|
||||
timeout_ms: Duration,
|
||||
filename: Option<&str>,
|
||||
) -> Result<FileMeta> {
|
||||
let FileMeta {
|
||||
content,
|
||||
content_type,
|
||||
content_disposition,
|
||||
} = fetch_file_meta(services, mxc, user, timeout_ms).await?;
|
||||
|
||||
let content_disposition = Some(make_content_disposition(
|
||||
content_disposition.as_ref(),
|
||||
content_type.as_deref(),
|
||||
filename,
|
||||
));
|
||||
|
||||
Ok(FileMeta {
|
||||
content,
|
||||
content_type,
|
||||
content_disposition,
|
||||
})
|
||||
}
|
||||
|
||||
async fn fetch_thumbnail_meta(
|
||||
services: &Services,
|
||||
mxc: &Mxc<'_>,
|
||||
|
||||
@@ -6,15 +6,16 @@
|
||||
Err, Result, err,
|
||||
utils::{content_disposition::make_content_disposition, math::ruma_from_usize},
|
||||
};
|
||||
use conduwuit_service::media::{CACHE_CONTROL_IMMUTABLE, CORP_CROSS_ORIGIN, Dim, FileMeta};
|
||||
use conduwuit_service::media::{CORP_CROSS_ORIGIN, Dim, FileMeta};
|
||||
use reqwest::Url;
|
||||
use ruma::{
|
||||
Mxc,
|
||||
api::client::media::{
|
||||
create_content, get_content, get_content_as_filename, get_content_thumbnail,
|
||||
get_media_config, get_media_preview,
|
||||
},
|
||||
assign,
|
||||
};
|
||||
use service::media::mxc::Mxc;
|
||||
|
||||
use crate::{Ruma, RumaResponse, client::create_content_route};
|
||||
|
||||
@@ -25,9 +26,9 @@ pub(crate) async fn get_media_config_legacy_route(
|
||||
State(services): State<crate::State>,
|
||||
_body: Ruma<get_media_config::v3::Request>,
|
||||
) -> Result<get_media_config::v3::Response> {
|
||||
Ok(get_media_config::v3::Response {
|
||||
upload_size: ruma_from_usize(services.server.config.max_request_size),
|
||||
})
|
||||
Ok(get_media_config::v3::Response::new(ruma_from_usize(
|
||||
services.server.config.max_request_size,
|
||||
)))
|
||||
}
|
||||
|
||||
/// # `GET /_matrix/media/v1/config`
|
||||
@@ -153,13 +154,16 @@ pub(crate) async fn get_content_legacy_route(
|
||||
None,
|
||||
);
|
||||
|
||||
Ok(get_content::v3::Response {
|
||||
file: content.expect("entire file contents"),
|
||||
content_type: content_type.map(Into::into),
|
||||
content_disposition: Some(content_disposition),
|
||||
cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()),
|
||||
cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()),
|
||||
})
|
||||
Ok(assign!(
|
||||
get_content::v3::Response::new(
|
||||
content.expect("entire file contents"),
|
||||
content_type.unwrap_or_default(),
|
||||
content_disposition,
|
||||
),
|
||||
{
|
||||
cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()),
|
||||
}
|
||||
))
|
||||
},
|
||||
| _ =>
|
||||
if !services.globals.server_is_ours(&body.server_name) && body.allow_remote {
|
||||
@@ -177,13 +181,16 @@ pub(crate) async fn get_content_legacy_route(
|
||||
None,
|
||||
);
|
||||
|
||||
Ok(get_content::v3::Response {
|
||||
file: response.file,
|
||||
content_type: response.content_type,
|
||||
content_disposition: Some(content_disposition),
|
||||
cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()),
|
||||
cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()),
|
||||
})
|
||||
Ok(assign!(
|
||||
get_content::v3::Response::new(
|
||||
response.file,
|
||||
response.content_type.unwrap_or_default(),
|
||||
content_disposition,
|
||||
),
|
||||
{
|
||||
cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()),
|
||||
}
|
||||
))
|
||||
} else {
|
||||
Err!(Request(NotFound("Media not found.")))
|
||||
},
|
||||
@@ -244,13 +251,15 @@ pub(crate) async fn get_content_as_filename_legacy_route(
|
||||
Some(&body.filename),
|
||||
);
|
||||
|
||||
Ok(get_content_as_filename::v3::Response {
|
||||
file: content.expect("entire file contents"),
|
||||
content_type: content_type.map(Into::into),
|
||||
content_disposition: Some(content_disposition),
|
||||
cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()),
|
||||
cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()),
|
||||
})
|
||||
Ok(assign!(get_content_as_filename::v3::Response::new(
|
||||
content.expect("entire file contents"),
|
||||
content_type.unwrap_or_default(),
|
||||
content_disposition,
|
||||
),
|
||||
{
|
||||
cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()),
|
||||
}
|
||||
))
|
||||
},
|
||||
| _ =>
|
||||
if !services.globals.server_is_ours(&body.server_name) && body.allow_remote {
|
||||
@@ -268,13 +277,16 @@ pub(crate) async fn get_content_as_filename_legacy_route(
|
||||
None,
|
||||
);
|
||||
|
||||
Ok(get_content_as_filename::v3::Response {
|
||||
content_disposition: Some(content_disposition),
|
||||
content_type: response.content_type,
|
||||
file: response.file,
|
||||
cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()),
|
||||
cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()),
|
||||
})
|
||||
Ok(assign!(
|
||||
get_content_as_filename::v3::Response::new(
|
||||
response.file,
|
||||
response.content_type.unwrap_or_default(),
|
||||
content_disposition,
|
||||
),
|
||||
{
|
||||
cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()),
|
||||
}
|
||||
))
|
||||
} else {
|
||||
Err!(Request(NotFound("Media not found.")))
|
||||
},
|
||||
@@ -335,13 +347,16 @@ pub(crate) async fn get_content_thumbnail_legacy_route(
|
||||
None,
|
||||
);
|
||||
|
||||
Ok(get_content_thumbnail::v3::Response {
|
||||
file: content.expect("entire file contents"),
|
||||
content_type: content_type.map(Into::into),
|
||||
cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()),
|
||||
cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()),
|
||||
content_disposition: Some(content_disposition),
|
||||
})
|
||||
Ok(assign!(
|
||||
get_content_thumbnail::v3::Response::new(
|
||||
content.expect("entire file contents"),
|
||||
content_type.unwrap_or_default(),
|
||||
content_disposition,
|
||||
),
|
||||
{
|
||||
cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.to_owned()),
|
||||
}
|
||||
))
|
||||
},
|
||||
| _ =>
|
||||
if !services.globals.server_is_ours(&body.server_name) && body.allow_remote {
|
||||
@@ -359,13 +374,16 @@ pub(crate) async fn get_content_thumbnail_legacy_route(
|
||||
None,
|
||||
);
|
||||
|
||||
Ok(get_content_thumbnail::v3::Response {
|
||||
file: response.file,
|
||||
content_type: response.content_type,
|
||||
cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()),
|
||||
cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()),
|
||||
content_disposition: Some(content_disposition),
|
||||
})
|
||||
Ok(assign!(
|
||||
get_content_thumbnail::v3::Response::new(
|
||||
response.file,
|
||||
response.content_type.unwrap_or_default(),
|
||||
content_disposition,
|
||||
),
|
||||
{
|
||||
cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.to_owned()),
|
||||
}
|
||||
))
|
||||
} else {
|
||||
Err!(Request(NotFound("Media not found.")))
|
||||
},
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
use axum::extract::State;
|
||||
use conduwuit::{Err, Result, matrix::pdu::PduBuilder};
|
||||
use conduwuit::{Err, Result, matrix::pdu::PartialPdu};
|
||||
use ruma::{
|
||||
api::client::membership::ban_user,
|
||||
assign,
|
||||
events::room::member::{MembershipState, RoomMemberEventContent},
|
||||
};
|
||||
|
||||
@@ -24,30 +25,19 @@ pub(crate) async fn ban_user_route(
|
||||
return Err!(Request(UserSuspended("You cannot perform this action while suspended.")));
|
||||
}
|
||||
|
||||
let state_lock = services.rooms.state.mutex.lock(&body.room_id).await;
|
||||
|
||||
let current_member_content = services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.get_member(&body.room_id, &body.user_id)
|
||||
.await
|
||||
.unwrap_or_else(|_| RoomMemberEventContent::new(MembershipState::Ban));
|
||||
let state_lock = services.rooms.state.mutex.lock(body.room_id.as_str()).await;
|
||||
|
||||
services
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder::state(body.user_id.to_string(), &RoomMemberEventContent {
|
||||
membership: MembershipState::Ban,
|
||||
reason: body.reason.clone(),
|
||||
displayname: None, // display name may be offensive
|
||||
avatar_url: None, // avatar may be offensive
|
||||
is_direct: None,
|
||||
join_authorized_via_users_server: None,
|
||||
third_party_invite: None,
|
||||
redact_events: body.redact_events,
|
||||
..current_member_content
|
||||
}),
|
||||
PartialPdu::state(
|
||||
body.user_id.to_string(),
|
||||
&assign!(RoomMemberEventContent::new(MembershipState::Ban), {
|
||||
reason: body.reason.clone(),
|
||||
redact_events: body.redact_events,
|
||||
}),
|
||||
),
|
||||
sender_user,
|
||||
Some(&body.room_id),
|
||||
&state_lock,
|
||||
|
||||
@@ -2,18 +2,19 @@
|
||||
use axum_client_ip::ClientIp;
|
||||
use conduwuit::{
|
||||
Err, Result, debug_error, err, info,
|
||||
matrix::{event::gen_event_id_canonical_json, pdu::PduBuilder},
|
||||
matrix::{event::gen_event_id_canonical_json, pdu::PartialPdu},
|
||||
warn,
|
||||
};
|
||||
use futures::FutureExt;
|
||||
use ruma::{
|
||||
RoomId, UserId,
|
||||
api::{client::membership::invite_user, federation::membership::create_invite},
|
||||
events::{
|
||||
invite_permission_config::FilterLevel,
|
||||
room::member::{MembershipState, RoomMemberEventContent},
|
||||
api::{
|
||||
client::membership::invite_user::{self, v3::InviteUserId},
|
||||
federation::membership::create_invite,
|
||||
},
|
||||
events::room::member::{MembershipState, RoomMemberEventContent},
|
||||
};
|
||||
use ruminuwuity::invite_permission_config::FilterLevel;
|
||||
use service::Services;
|
||||
|
||||
use super::banned_room_check;
|
||||
@@ -51,7 +52,11 @@ pub(crate) async fn invite_user_route(
|
||||
.await?;
|
||||
|
||||
match &body.recipient {
|
||||
| invite_user::v3::InvitationRecipient::UserId { user_id: recipient_user } => {
|
||||
| invite_user::v3::InvitationRecipient::UserId(InviteUserId {
|
||||
user_id: recipient_user,
|
||||
reason,
|
||||
..
|
||||
}) => {
|
||||
let sender_filter_level = services
|
||||
.users
|
||||
.invite_filter_level(recipient_user, sender_user)
|
||||
@@ -59,7 +64,7 @@ pub(crate) async fn invite_user_route(
|
||||
|
||||
if !matches!(sender_filter_level, FilterLevel::Allow) {
|
||||
// drop invites if the sender has the recipient filtered
|
||||
return Ok(invite_user::v3::Response {});
|
||||
return Ok(invite_user::v3::Response::new());
|
||||
}
|
||||
|
||||
if let Ok(target_user_membership) = services
|
||||
@@ -95,13 +100,13 @@ pub(crate) async fn invite_user_route(
|
||||
sender_user,
|
||||
recipient_user,
|
||||
&body.room_id,
|
||||
body.reason.clone(),
|
||||
reason.clone(),
|
||||
false,
|
||||
)
|
||||
.boxed()
|
||||
.await?;
|
||||
|
||||
Ok(invite_user::v3::Response {})
|
||||
Ok(invite_user::v3::Response::new())
|
||||
},
|
||||
| _ => {
|
||||
Err!(Request(NotFound("User not found.")))
|
||||
@@ -141,25 +146,28 @@ pub(crate) async fn invite_helper(
|
||||
let (pdu, pdu_json, invite_room_state) = {
|
||||
let state_lock = services.rooms.state.mutex.lock(room_id).await;
|
||||
|
||||
let content = RoomMemberEventContent {
|
||||
avatar_url: services.users.avatar_url(recipient_user).await.ok(),
|
||||
is_direct: Some(is_direct),
|
||||
reason,
|
||||
..RoomMemberEventContent::new(MembershipState::Invite)
|
||||
};
|
||||
let mut content = RoomMemberEventContent::new(MembershipState::Invite);
|
||||
content.displayname = services.users.displayname(recipient_user).await.ok();
|
||||
content.avatar_url = services.users.avatar_url(recipient_user).await.ok();
|
||||
content.is_direct = Some(is_direct);
|
||||
content.reason = reason;
|
||||
|
||||
let (pdu, pdu_json) = services
|
||||
.rooms
|
||||
.timeline
|
||||
.create_hash_and_sign_event(
|
||||
PduBuilder::state(recipient_user.to_string(), &content),
|
||||
PartialPdu::state(recipient_user.to_string(), &content),
|
||||
sender_user,
|
||||
Some(room_id),
|
||||
&state_lock,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let invite_room_state = services.rooms.state.summary_stripped(&pdu, room_id).await;
|
||||
let invite_room_state = services
|
||||
.rooms
|
||||
.state
|
||||
.summary_stripped(&pdu, room_id, recipient_user)
|
||||
.await;
|
||||
|
||||
drop(state_lock);
|
||||
|
||||
@@ -168,32 +176,39 @@ pub(crate) async fn invite_helper(
|
||||
|
||||
let room_version_id = services.rooms.state.get_room_version(room_id).await?;
|
||||
|
||||
let mut request = create_invite::v2::Request::new(
|
||||
room_id.to_owned(),
|
||||
(*pdu.event_id).to_owned(),
|
||||
room_version_id.clone(),
|
||||
services
|
||||
.sending
|
||||
.convert_to_outgoing_federation_event(pdu_json.clone())
|
||||
.await,
|
||||
invite_room_state,
|
||||
);
|
||||
request.via = services
|
||||
.rooms
|
||||
.state_cache
|
||||
.servers_route_via(room_id)
|
||||
.await
|
||||
.ok();
|
||||
|
||||
let response = services
|
||||
.sending
|
||||
.send_federation_request(recipient_user.server_name(), create_invite::v2::Request {
|
||||
room_id: room_id.to_owned(),
|
||||
event_id: (*pdu.event_id).to_owned(),
|
||||
room_version: room_version_id.clone(),
|
||||
event: services
|
||||
.sending
|
||||
.convert_to_outgoing_federation_event(pdu_json.clone())
|
||||
.await,
|
||||
invite_room_state,
|
||||
via: services
|
||||
.rooms
|
||||
.state_cache
|
||||
.servers_route_via(room_id)
|
||||
.await
|
||||
.ok(),
|
||||
})
|
||||
.send_federation_request(recipient_user.server_name(), request)
|
||||
.await?;
|
||||
|
||||
// We do not add the event_id field to the pdu here because of signature and
|
||||
// hashes checks
|
||||
let (event_id, value) = gen_event_id_canonical_json(&response.event, &room_version_id)
|
||||
.map_err(|e| {
|
||||
err!(Request(BadJson(warn!("Could not convert event to canonical JSON: {e}"))))
|
||||
})?;
|
||||
let (event_id, value) = gen_event_id_canonical_json(
|
||||
&response.event,
|
||||
&room_version_id
|
||||
.rules()
|
||||
.expect("room version should have defined rules"),
|
||||
)
|
||||
.map_err(|e| {
|
||||
err!(Request(BadJson(warn!("Could not convert event to canonical JSON: {e}"))))
|
||||
})?;
|
||||
|
||||
if pdu.event_id != event_id {
|
||||
return Err!(Request(BadJson(warn!(
|
||||
@@ -229,20 +244,17 @@ pub(crate) async fn invite_helper(
|
||||
|
||||
let state_lock = services.rooms.state.mutex.lock(room_id).await;
|
||||
|
||||
let content = RoomMemberEventContent {
|
||||
displayname: services.users.displayname(recipient_user).await.ok(),
|
||||
avatar_url: services.users.avatar_url(recipient_user).await.ok(),
|
||||
blurhash: services.users.blurhash(recipient_user).await.ok(),
|
||||
is_direct: Some(is_direct),
|
||||
reason,
|
||||
..RoomMemberEventContent::new(MembershipState::Invite)
|
||||
};
|
||||
let mut content = RoomMemberEventContent::new(MembershipState::Invite);
|
||||
content.displayname = services.users.displayname(recipient_user).await.ok();
|
||||
content.avatar_url = services.users.avatar_url(recipient_user).await.ok();
|
||||
content.is_direct = Some(is_direct);
|
||||
content.reason = reason;
|
||||
|
||||
services
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder::state(recipient_user.to_string(), &content),
|
||||
PartialPdu::state(recipient_user.to_string(), &content),
|
||||
sender_user,
|
||||
Some(room_id),
|
||||
&state_lock,
|
||||
|
||||
@@ -1,60 +1,18 @@
|
||||
use std::{borrow::Borrow, collections::HashMap, iter::once, sync::Arc};
|
||||
|
||||
use axum::extract::State;
|
||||
use axum_client_ip::ClientIp;
|
||||
use conduwuit::{
|
||||
Err, Result, debug, debug_info, debug_warn, err, error, info, is_true,
|
||||
matrix::{
|
||||
StateKey,
|
||||
event::{gen_event_id, gen_event_id_canonical_json},
|
||||
pdu::{PduBuilder, PduEvent},
|
||||
state_res,
|
||||
},
|
||||
Err, Result, debug,
|
||||
result::FlatOk,
|
||||
trace,
|
||||
utils::{
|
||||
self, shuffle,
|
||||
stream::{IterStream, ReadyExt},
|
||||
to_canonical_object,
|
||||
},
|
||||
warn,
|
||||
utils::{shuffle, stream::IterStream},
|
||||
};
|
||||
use futures::{FutureExt, StreamExt, TryFutureExt};
|
||||
use futures::{FutureExt, StreamExt};
|
||||
use ruma::{
|
||||
CanonicalJsonObject, CanonicalJsonValue, OwnedRoomId, OwnedServerName, OwnedUserId, RoomId,
|
||||
RoomVersionId, UserId,
|
||||
api::{
|
||||
client::{
|
||||
error::ErrorKind,
|
||||
membership::{join_room_by_id, join_room_by_id_or_alias},
|
||||
},
|
||||
federation::{self},
|
||||
},
|
||||
canonical_json::to_canonical_value,
|
||||
events::{
|
||||
StateEventType,
|
||||
room::{
|
||||
join_rules::JoinRule,
|
||||
member::{MembershipState, RoomMemberEventContent},
|
||||
},
|
||||
},
|
||||
OwnedRoomId, OwnedServerName, OwnedUserId, UserId,
|
||||
api::client::membership::{join_room_by_id, join_room_by_id_or_alias},
|
||||
};
|
||||
use service::{
|
||||
Services,
|
||||
appservice::RegistrationInfo,
|
||||
rooms::{
|
||||
state::RoomMutexGuard,
|
||||
state_compressor::{CompressedState, HashSetCompressStateEvent},
|
||||
timeline::pdu_fits,
|
||||
},
|
||||
};
|
||||
use tokio::join;
|
||||
|
||||
use super::{banned_room_check, validate_remote_member_event_stub};
|
||||
use crate::{
|
||||
Ruma,
|
||||
server::{select_authorising_user, user_can_perform_restricted_join},
|
||||
};
|
||||
use super::banned_room_check;
|
||||
use crate::Ruma;
|
||||
|
||||
/// # `POST /_matrix/client/r0/rooms/{roomId}/join`
|
||||
///
|
||||
@@ -89,7 +47,6 @@ pub(crate) async fn join_room_by_id_route(
|
||||
.rooms
|
||||
.state_cache
|
||||
.servers_invite_via(&body.room_id)
|
||||
.map(ToOwned::to_owned)
|
||||
.collect()
|
||||
.await;
|
||||
|
||||
@@ -115,16 +72,14 @@ pub(crate) async fn join_room_by_id_route(
|
||||
shuffle(&mut servers);
|
||||
let servers = deprioritize(servers, &services.config.deprioritize_joins_through_servers);
|
||||
|
||||
join_room_by_id_helper(
|
||||
&services,
|
||||
sender_user,
|
||||
&body.room_id,
|
||||
body.reason.clone(),
|
||||
&servers,
|
||||
&body.appservice_info,
|
||||
)
|
||||
.boxed()
|
||||
.await
|
||||
let room_id = services
|
||||
.rooms
|
||||
.membership
|
||||
.join_room(sender_user, &body.room_id, body.reason.clone(), &servers)
|
||||
.boxed()
|
||||
.await?;
|
||||
|
||||
Ok(join_room_by_id::v3::Response::new(room_id))
|
||||
}
|
||||
|
||||
/// # `POST /_matrix/client/r0/join/{roomIdOrAlias}`
|
||||
@@ -143,7 +98,6 @@ pub(crate) async fn join_room_by_id_or_alias_route(
|
||||
body: Ruma<join_room_by_id_or_alias::v3::Request>,
|
||||
) -> Result<join_room_by_id_or_alias::v3::Response> {
|
||||
let sender_user = body.sender_user();
|
||||
let appservice_info = &body.appservice_info;
|
||||
let body = &body.body;
|
||||
if services.users.is_suspended(sender_user).await? {
|
||||
return Err!(Request(UserSuspended("You cannot perform this action while suspended.")));
|
||||
@@ -169,7 +123,6 @@ pub(crate) async fn join_room_by_id_or_alias_route(
|
||||
.rooms
|
||||
.state_cache
|
||||
.servers_invite_via(&room_id)
|
||||
.map(ToOwned::to_owned)
|
||||
.collect::<Vec<_>>()
|
||||
.await,
|
||||
);
|
||||
@@ -210,11 +163,7 @@ pub(crate) async fn join_room_by_id_or_alias_route(
|
||||
)
|
||||
.await?;
|
||||
|
||||
let addl_via_servers = services
|
||||
.rooms
|
||||
.state_cache
|
||||
.servers_invite_via(&room_id)
|
||||
.map(ToOwned::to_owned);
|
||||
let addl_via_servers = services.rooms.state_cache.servers_invite_via(&room_id);
|
||||
|
||||
let addl_state_servers = services
|
||||
.rooms
|
||||
@@ -227,7 +176,7 @@ pub(crate) async fn join_room_by_id_or_alias_route(
|
||||
.iter()
|
||||
.map(|event| event.get_field("sender"))
|
||||
.filter_map(FlatOk::flat_ok)
|
||||
.map(|user: &UserId| user.server_name().to_owned())
|
||||
.map(|user: OwnedUserId| user.server_name().to_owned())
|
||||
.stream()
|
||||
.chain(addl_via_servers)
|
||||
.collect()
|
||||
@@ -243,654 +192,14 @@ pub(crate) async fn join_room_by_id_or_alias_route(
|
||||
};
|
||||
|
||||
let servers = deprioritize(servers, &services.config.deprioritize_joins_through_servers);
|
||||
let join_room_response = join_room_by_id_helper(
|
||||
&services,
|
||||
sender_user,
|
||||
&room_id,
|
||||
body.reason.clone(),
|
||||
&servers,
|
||||
appservice_info,
|
||||
)
|
||||
.boxed()
|
||||
.await?;
|
||||
|
||||
Ok(join_room_by_id_or_alias::v3::Response { room_id: join_room_response.room_id })
|
||||
}
|
||||
|
||||
pub async fn join_room_by_id_helper(
|
||||
services: &Services,
|
||||
sender_user: &UserId,
|
||||
room_id: &RoomId,
|
||||
reason: Option<String>,
|
||||
servers: &[OwnedServerName],
|
||||
appservice_info: &Option<RegistrationInfo>,
|
||||
) -> Result<join_room_by_id::v3::Response> {
|
||||
let state_lock = services.rooms.state.mutex.lock(room_id).await;
|
||||
|
||||
let user_is_guest = services
|
||||
.users
|
||||
.is_deactivated(sender_user)
|
||||
.await
|
||||
.unwrap_or(false)
|
||||
&& appservice_info.is_none();
|
||||
|
||||
if user_is_guest && !services.rooms.state_accessor.guest_can_join(room_id).await {
|
||||
return Err!(Request(Forbidden("Guests are not allowed to join this room")));
|
||||
}
|
||||
|
||||
if services
|
||||
let room_id = services
|
||||
.rooms
|
||||
.state_cache
|
||||
.is_joined(sender_user, room_id)
|
||||
.await
|
||||
{
|
||||
debug_warn!("{sender_user} is already joined in {room_id}");
|
||||
return Ok(join_room_by_id::v3::Response { room_id: room_id.into() });
|
||||
}
|
||||
|
||||
if let Err(e) = services
|
||||
.antispam
|
||||
.user_may_join_room(
|
||||
sender_user.to_owned(),
|
||||
room_id.to_owned(),
|
||||
services
|
||||
.rooms
|
||||
.state_cache
|
||||
.is_invited(sender_user, room_id)
|
||||
.await,
|
||||
)
|
||||
.await
|
||||
{
|
||||
warn!("Antispam prevented user {} from joining room {}: {}", sender_user, room_id, e);
|
||||
return Err!(Request(Forbidden("You are not allowed to join this room.")));
|
||||
}
|
||||
|
||||
let server_in_room = services
|
||||
.rooms
|
||||
.state_cache
|
||||
.server_in_room(services.globals.server_name(), room_id)
|
||||
.await;
|
||||
|
||||
// Only check our known membership if we're already in the room.
|
||||
// See: https://forgejo.ellis.link/continuwuation/continuwuity/issues/855
|
||||
let membership = if server_in_room {
|
||||
services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.get_member(room_id, sender_user)
|
||||
.await
|
||||
} else {
|
||||
debug!("Ignoring local state for join {room_id}, we aren't in the room yet.");
|
||||
Ok(RoomMemberEventContent::new(MembershipState::Leave))
|
||||
};
|
||||
if let Ok(m) = membership {
|
||||
if m.membership == MembershipState::Ban {
|
||||
debug_warn!("{sender_user} is banned from {room_id} but attempted to join");
|
||||
// TODO: return reason
|
||||
return Err!(Request(Forbidden("You are banned from the room.")));
|
||||
}
|
||||
}
|
||||
|
||||
if !server_in_room && servers.is_empty() {
|
||||
return Err!(Request(NotFound(
|
||||
"No servers were provided to assist in joining the room remotely, and we are not \
|
||||
already participating in the room."
|
||||
)));
|
||||
}
|
||||
|
||||
if services.antispam.check_all_joins() {
|
||||
if let Err(e) = services
|
||||
.antispam
|
||||
.meowlnir_accept_make_join(room_id.to_owned(), sender_user.to_owned())
|
||||
.await
|
||||
{
|
||||
warn!("Antispam prevented user {} from joining room {}: {}", sender_user, room_id, e);
|
||||
return Err!(Request(Forbidden("Antispam rejected join request.")));
|
||||
}
|
||||
}
|
||||
|
||||
if server_in_room {
|
||||
join_room_by_id_helper_local(services, sender_user, room_id, reason, servers, state_lock)
|
||||
.boxed()
|
||||
.await?;
|
||||
} else {
|
||||
// Ask a remote server if we are not participating in this room
|
||||
join_room_by_id_helper_remote(
|
||||
services,
|
||||
sender_user,
|
||||
room_id,
|
||||
reason,
|
||||
servers,
|
||||
state_lock,
|
||||
)
|
||||
.membership
|
||||
.join_room(sender_user, &room_id, body.reason.clone(), &servers)
|
||||
.boxed()
|
||||
.await?;
|
||||
}
|
||||
Ok(join_room_by_id::v3::Response::new(room_id.to_owned()))
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip_all, fields(%sender_user, %room_id), name = "join_remote", level = "info")]
|
||||
async fn join_room_by_id_helper_remote(
|
||||
services: &Services,
|
||||
sender_user: &UserId,
|
||||
room_id: &RoomId,
|
||||
reason: Option<String>,
|
||||
servers: &[OwnedServerName],
|
||||
state_lock: RoomMutexGuard,
|
||||
) -> Result {
|
||||
info!("Joining {room_id} over federation.");
|
||||
|
||||
let (make_join_response, remote_server) =
|
||||
make_join_request(services, sender_user, room_id, servers).await?;
|
||||
|
||||
info!("make_join finished");
|
||||
|
||||
let room_version_id = make_join_response.room_version.unwrap_or(RoomVersionId::V1);
|
||||
|
||||
if !services.server.supported_room_version(&room_version_id) {
|
||||
// How did we get here?
|
||||
return Err!(BadServerResponse(
|
||||
"Remote room version {room_version_id} is not supported by conduwuit"
|
||||
));
|
||||
}
|
||||
|
||||
let mut join_event_stub: CanonicalJsonObject =
|
||||
serde_json::from_str(make_join_response.event.get()).map_err(|e| {
|
||||
err!(BadServerResponse(warn!(
|
||||
"Invalid make_join event json received from server: {e:?}"
|
||||
)))
|
||||
})?;
|
||||
|
||||
let join_authorized_via_users_server = {
|
||||
use RoomVersionId::*;
|
||||
if !matches!(room_version_id, V1 | V2 | V3 | V4 | V5 | V6 | V7) {
|
||||
join_event_stub
|
||||
.get("content")
|
||||
.map(|s| {
|
||||
s.as_object()?
|
||||
.get("join_authorised_via_users_server")?
|
||||
.as_str()
|
||||
})
|
||||
.and_then(|s| OwnedUserId::try_from(s.unwrap_or_default()).ok())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
};
|
||||
|
||||
join_event_stub.insert(
|
||||
"origin_server_ts".to_owned(),
|
||||
CanonicalJsonValue::Integer(
|
||||
utils::millis_since_unix_epoch()
|
||||
.try_into()
|
||||
.expect("Timestamp is valid js_int value"),
|
||||
),
|
||||
);
|
||||
join_event_stub.insert(
|
||||
"content".to_owned(),
|
||||
to_canonical_value(RoomMemberEventContent {
|
||||
displayname: services.users.displayname(sender_user).await.ok(),
|
||||
avatar_url: services.users.avatar_url(sender_user).await.ok(),
|
||||
blurhash: services.users.blurhash(sender_user).await.ok(),
|
||||
reason,
|
||||
join_authorized_via_users_server: join_authorized_via_users_server.clone(),
|
||||
..RoomMemberEventContent::new(MembershipState::Join)
|
||||
})
|
||||
.expect("event is valid, we just created it"),
|
||||
);
|
||||
|
||||
// We keep the "event_id" in the pdu only in v1 or
|
||||
// v2 rooms
|
||||
match room_version_id {
|
||||
| RoomVersionId::V1 | RoomVersionId::V2 => {},
|
||||
| _ => {
|
||||
join_event_stub.remove("event_id");
|
||||
},
|
||||
}
|
||||
|
||||
// In order to create a compatible ref hash (EventID) the `hashes` field needs
|
||||
// to be present
|
||||
services
|
||||
.server_keys
|
||||
.hash_and_sign_event(&mut join_event_stub, &room_version_id)?;
|
||||
|
||||
// Generate event id
|
||||
let event_id = gen_event_id(&join_event_stub, &room_version_id)?;
|
||||
|
||||
// Add event_id back
|
||||
join_event_stub
|
||||
.insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.clone().into()));
|
||||
|
||||
// It has enough fields to be called a proper event now
|
||||
let mut join_event = join_event_stub;
|
||||
|
||||
info!("Asking {remote_server} for send_join in room {room_id}");
|
||||
let send_join_request = federation::membership::create_join_event::v2::Request {
|
||||
room_id: room_id.to_owned(),
|
||||
event_id: event_id.clone(),
|
||||
omit_members: false,
|
||||
pdu: services
|
||||
.sending
|
||||
.convert_to_outgoing_federation_event(join_event.clone())
|
||||
.await,
|
||||
};
|
||||
|
||||
let send_join_response = match services
|
||||
.sending
|
||||
.send_synapse_request(&remote_server, send_join_request)
|
||||
.await
|
||||
{
|
||||
| Ok(response) => response,
|
||||
| Err(e) => {
|
||||
error!("send_join failed: {e}");
|
||||
return Err(e);
|
||||
},
|
||||
};
|
||||
|
||||
info!("send_join finished");
|
||||
|
||||
if join_authorized_via_users_server.is_some() {
|
||||
if let Some(signed_raw) = &send_join_response.room_state.event {
|
||||
debug_info!(
|
||||
"There is a signed event with join_authorized_via_users_server. This room is \
|
||||
probably using restricted joins. Adding signature to our event"
|
||||
);
|
||||
|
||||
let (signed_event_id, signed_value) =
|
||||
gen_event_id_canonical_json(signed_raw, &room_version_id).map_err(|e| {
|
||||
err!(Request(BadJson(warn!(
|
||||
"Could not convert event to canonical JSON: {e}"
|
||||
))))
|
||||
})?;
|
||||
|
||||
if signed_event_id != event_id {
|
||||
return Err!(Request(BadJson(warn!(
|
||||
%signed_event_id, %event_id,
|
||||
"Server {remote_server} sent event with wrong event ID"
|
||||
))));
|
||||
}
|
||||
|
||||
match signed_value["signatures"]
|
||||
.as_object()
|
||||
.ok_or_else(|| {
|
||||
err!(BadServerResponse(warn!(
|
||||
"Server {remote_server} sent invalid signatures type"
|
||||
)))
|
||||
})
|
||||
.and_then(|e| {
|
||||
e.get(remote_server.as_str()).ok_or_else(|| {
|
||||
err!(BadServerResponse(warn!(
|
||||
"Server {remote_server} did not send its signature for a restricted \
|
||||
room"
|
||||
)))
|
||||
})
|
||||
}) {
|
||||
| Ok(signature) => {
|
||||
join_event
|
||||
.get_mut("signatures")
|
||||
.expect("we created a valid pdu")
|
||||
.as_object_mut()
|
||||
.expect("we created a valid pdu")
|
||||
.insert(remote_server.to_string(), signature.clone());
|
||||
},
|
||||
| Err(e) => {
|
||||
warn!(
|
||||
"Server {remote_server} sent invalid signature in send_join signatures \
|
||||
for event {signed_value:?}: {e:?}",
|
||||
);
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
services
|
||||
.rooms
|
||||
.short
|
||||
.get_or_create_shortroomid(room_id)
|
||||
.await;
|
||||
|
||||
info!("Parsing join event");
|
||||
let parsed_join_pdu = PduEvent::from_id_val(&event_id, join_event.clone())
|
||||
.map_err(|e| err!(BadServerResponse("Invalid join event PDU: {e:?}")))?;
|
||||
|
||||
info!("Acquiring server signing keys for response events");
|
||||
let resp_events = &send_join_response.room_state;
|
||||
let resp_state = &resp_events.state;
|
||||
let resp_auth = &resp_events.auth_chain;
|
||||
services
|
||||
.server_keys
|
||||
.acquire_events_pubkeys(resp_auth.iter().chain(resp_state.iter()))
|
||||
.await;
|
||||
|
||||
info!("Going through send_join response room_state");
|
||||
let cork = services.db.cork_and_flush();
|
||||
let state = send_join_response
|
||||
.room_state
|
||||
.state
|
||||
.iter()
|
||||
.stream()
|
||||
.then(|pdu| {
|
||||
services
|
||||
.server_keys
|
||||
.validate_and_add_event_id_no_fetch(pdu, &room_version_id)
|
||||
.inspect_err(|e| {
|
||||
debug_warn!("Could not validate send_join response room_state event: {e:?}");
|
||||
})
|
||||
.inspect(|_| debug!("Completed validating send_join response room_state event"))
|
||||
})
|
||||
.ready_filter_map(Result::ok)
|
||||
.fold(HashMap::new(), |mut state, (event_id, value)| async move {
|
||||
let pdu = match PduEvent::from_id_val(&event_id, value.clone()) {
|
||||
| Ok(pdu) => pdu,
|
||||
| Err(e) => {
|
||||
debug_warn!("Invalid PDU in send_join response: {e:?}: {value:#?}");
|
||||
return state;
|
||||
},
|
||||
};
|
||||
if !pdu_fits(&mut value.clone()) {
|
||||
warn!(
|
||||
"dropping incoming PDU {event_id} in room {room_id} from room join because \
|
||||
it exceeds 65535 bytes or is otherwise too large."
|
||||
);
|
||||
return state;
|
||||
}
|
||||
services.rooms.outlier.add_pdu_outlier(&event_id, &value);
|
||||
if let Some(state_key) = &pdu.state_key {
|
||||
let shortstatekey = services
|
||||
.rooms
|
||||
.short
|
||||
.get_or_create_shortstatekey(&pdu.kind.to_string().into(), state_key)
|
||||
.await;
|
||||
|
||||
state.insert(shortstatekey, pdu.event_id.clone());
|
||||
}
|
||||
state
|
||||
})
|
||||
.await;
|
||||
|
||||
drop(cork);
|
||||
|
||||
info!("Going through send_join response auth_chain");
|
||||
let cork = services.db.cork_and_flush();
|
||||
send_join_response
|
||||
.room_state
|
||||
.auth_chain
|
||||
.iter()
|
||||
.stream()
|
||||
.then(|pdu| {
|
||||
services
|
||||
.server_keys
|
||||
.validate_and_add_event_id_no_fetch(pdu, &room_version_id)
|
||||
})
|
||||
.ready_filter_map(Result::ok)
|
||||
.ready_for_each(|(event_id, value)| {
|
||||
trace!(%event_id, "Adding PDU as an outlier from send_join auth_chain");
|
||||
services.rooms.outlier.add_pdu_outlier(&event_id, &value);
|
||||
})
|
||||
.await;
|
||||
|
||||
drop(cork);
|
||||
|
||||
debug!("Running send_join auth check");
|
||||
let fetch_state = &state;
|
||||
let state_fetch = |k: StateEventType, s: StateKey| async move {
|
||||
let shortstatekey = services.rooms.short.get_shortstatekey(&k, &s).await.ok()?;
|
||||
|
||||
let event_id = fetch_state.get(&shortstatekey)?;
|
||||
services.rooms.timeline.get_pdu(event_id).await.ok()
|
||||
};
|
||||
|
||||
let auth_check = state_res::event_auth::auth_check(
|
||||
&state_res::RoomVersion::new(&room_version_id)?,
|
||||
&parsed_join_pdu,
|
||||
None, // TODO: third party invite
|
||||
|k, s| state_fetch(k.clone(), s.into()),
|
||||
&state_fetch(StateEventType::RoomCreate, "".into())
|
||||
.await
|
||||
.expect("create event is missing from send_join auth"),
|
||||
)
|
||||
.await
|
||||
.map_err(|e| err!(Request(Forbidden(warn!("Auth check failed: {e:?}")))))?;
|
||||
|
||||
if !auth_check {
|
||||
return Err!(Request(Forbidden("Auth check failed")));
|
||||
}
|
||||
|
||||
info!("Compressing state from send_join");
|
||||
let compressed: CompressedState = services
|
||||
.rooms
|
||||
.state_compressor
|
||||
.compress_state_events(state.iter().map(|(ssk, eid)| (ssk, eid.borrow())))
|
||||
.collect()
|
||||
.await;
|
||||
|
||||
debug!("Saving compressed state");
|
||||
let HashSetCompressStateEvent {
|
||||
shortstatehash: statehash_before_join,
|
||||
added,
|
||||
removed,
|
||||
} = services
|
||||
.rooms
|
||||
.state_compressor
|
||||
.save_state(room_id, Arc::new(compressed))
|
||||
.await?;
|
||||
|
||||
debug!("Forcing state for new room");
|
||||
services
|
||||
.rooms
|
||||
.state
|
||||
.force_state(room_id, statehash_before_join, added, removed, &state_lock)
|
||||
.await?;
|
||||
|
||||
debug!("Updating joined counts for new room");
|
||||
services
|
||||
.rooms
|
||||
.state_cache
|
||||
.update_joined_count(room_id)
|
||||
.await;
|
||||
|
||||
// We append to state before appending the pdu, so we don't have a moment in
|
||||
// time with the pdu without it's state. This is okay because append_pdu can't
|
||||
// fail.
|
||||
let statehash_after_join = services
|
||||
.rooms
|
||||
.state
|
||||
.append_to_state(&parsed_join_pdu, room_id)
|
||||
.await?;
|
||||
|
||||
info!("Appending new room join event");
|
||||
services
|
||||
.rooms
|
||||
.timeline
|
||||
.append_pdu(
|
||||
&parsed_join_pdu,
|
||||
join_event,
|
||||
once(parsed_join_pdu.event_id.borrow()),
|
||||
&state_lock,
|
||||
room_id,
|
||||
)
|
||||
.await?;
|
||||
|
||||
info!("Setting final room state for new room");
|
||||
// We set the room state after inserting the pdu, so that we never have a moment
|
||||
// in time where events in the current room state do not exist
|
||||
services
|
||||
.rooms
|
||||
.state
|
||||
.set_room_state(room_id, statehash_after_join, &state_lock);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip_all, fields(%sender_user, %room_id), name = "join_local", level = "info")]
|
||||
async fn join_room_by_id_helper_local(
|
||||
services: &Services,
|
||||
sender_user: &UserId,
|
||||
room_id: &RoomId,
|
||||
reason: Option<String>,
|
||||
servers: &[OwnedServerName],
|
||||
state_lock: RoomMutexGuard,
|
||||
) -> Result {
|
||||
info!("Joining room locally");
|
||||
|
||||
let (room_version, join_rules, is_invited) = join!(
|
||||
services.rooms.state.get_room_version(room_id),
|
||||
services.rooms.state_accessor.get_join_rules(room_id),
|
||||
services.rooms.state_cache.is_invited(sender_user, room_id)
|
||||
);
|
||||
|
||||
let room_version = room_version?;
|
||||
let mut auth_user: Option<OwnedUserId> = None;
|
||||
if !is_invited && matches!(join_rules, JoinRule::Restricted(_) | JoinRule::KnockRestricted(_))
|
||||
{
|
||||
use RoomVersionId::*;
|
||||
if !matches!(room_version, V1 | V2 | V3 | V4 | V5 | V6 | V7) {
|
||||
// This is a restricted room, check if we can complete the join requirements
|
||||
// locally.
|
||||
let needs_auth_user =
|
||||
user_can_perform_restricted_join(services, sender_user, room_id, &room_version)
|
||||
.await;
|
||||
if needs_auth_user.is_ok_and(is_true!()) {
|
||||
// If there was an error or the value is false, we'll try joining over
|
||||
// federation. Since it's Ok(true), we can authorise this locally.
|
||||
// If we can't select a local user, this will remain None, the join will fail,
|
||||
// and we'll fall back to federation.
|
||||
auth_user = select_authorising_user(services, room_id, sender_user, &state_lock)
|
||||
.await
|
||||
.ok();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let content = RoomMemberEventContent {
|
||||
displayname: services.users.displayname(sender_user).await.ok(),
|
||||
avatar_url: services.users.avatar_url(sender_user).await.ok(),
|
||||
blurhash: services.users.blurhash(sender_user).await.ok(),
|
||||
reason: reason.clone(),
|
||||
join_authorized_via_users_server: auth_user,
|
||||
..RoomMemberEventContent::new(MembershipState::Join)
|
||||
};
|
||||
|
||||
// Try normal join first
|
||||
let Err(error) = services
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder::state(sender_user.to_string(), &content),
|
||||
sender_user,
|
||||
Some(room_id),
|
||||
&state_lock,
|
||||
)
|
||||
.await
|
||||
else {
|
||||
info!("Joined room locally");
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
if servers.is_empty() || servers.len() == 1 && services.globals.server_is_ours(&servers[0]) {
|
||||
if !services.rooms.metadata.exists(room_id).await {
|
||||
return Err!(Request(
|
||||
Unknown(
|
||||
"Room was not found locally and no servers were found to help us discover it"
|
||||
),
|
||||
NOT_FOUND
|
||||
));
|
||||
}
|
||||
|
||||
return Err(error);
|
||||
}
|
||||
|
||||
info!(
|
||||
?error,
|
||||
remote_servers = %servers.len(),
|
||||
"Could not join room locally, attempting remote join",
|
||||
);
|
||||
join_room_by_id_helper_remote(services, sender_user, room_id, reason, servers, state_lock)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn make_join_request(
|
||||
services: &Services,
|
||||
sender_user: &UserId,
|
||||
room_id: &RoomId,
|
||||
servers: &[OwnedServerName],
|
||||
) -> Result<(federation::membership::prepare_join_event::v1::Response, OwnedServerName)> {
|
||||
let mut make_join_counter: usize = 1;
|
||||
|
||||
for remote_server in servers {
|
||||
if services.globals.server_is_ours(remote_server) {
|
||||
continue;
|
||||
}
|
||||
info!(
|
||||
"Asking {remote_server} for make_join (attempt {make_join_counter}/{})",
|
||||
servers.len()
|
||||
);
|
||||
let make_join_response = services
|
||||
.sending
|
||||
.send_federation_request(
|
||||
remote_server,
|
||||
federation::membership::prepare_join_event::v1::Request {
|
||||
room_id: room_id.to_owned(),
|
||||
user_id: sender_user.to_owned(),
|
||||
ver: services.server.supported_room_versions().collect(),
|
||||
},
|
||||
)
|
||||
.await;
|
||||
|
||||
trace!("make_join response: {:?}", make_join_response);
|
||||
make_join_counter = make_join_counter.saturating_add(1);
|
||||
|
||||
match make_join_response {
|
||||
| Ok(response) => {
|
||||
info!("Received make_join response from {remote_server}");
|
||||
if let Err(e) = validate_remote_member_event_stub(
|
||||
&MembershipState::Join,
|
||||
sender_user,
|
||||
room_id,
|
||||
&to_canonical_object(&response.event)?,
|
||||
) {
|
||||
warn!("make_join response from {remote_server} failed validation: {e}");
|
||||
continue;
|
||||
}
|
||||
return Ok((response, remote_server.clone()));
|
||||
},
|
||||
| Err(e) => match e.kind() {
|
||||
| ErrorKind::UnableToAuthorizeJoin => {
|
||||
info!(
|
||||
"{remote_server} was unable to verify the joining user satisfied \
|
||||
restricted join requirements: {e}. Will continue trying."
|
||||
);
|
||||
},
|
||||
| ErrorKind::UnableToGrantJoin => {
|
||||
info!(
|
||||
"{remote_server} believes the joining user satisfies restricted join \
|
||||
rules, but is unable to authorise a join for us. Will continue trying."
|
||||
);
|
||||
},
|
||||
| ErrorKind::IncompatibleRoomVersion { room_version } => {
|
||||
warn!(
|
||||
"{remote_server} reports the room we are trying to join is \
|
||||
v{room_version}, which we do not support."
|
||||
);
|
||||
return Err(e);
|
||||
},
|
||||
| ErrorKind::Forbidden { .. } => {
|
||||
warn!("{remote_server} refuses to let us join: {e}.");
|
||||
return Err(e);
|
||||
},
|
||||
| ErrorKind::NotFound => {
|
||||
info!(
|
||||
"{remote_server} does not know about {room_id}: {e}. Will continue \
|
||||
trying."
|
||||
);
|
||||
},
|
||||
| _ => {
|
||||
info!("{remote_server} failed to make_join: {e}. Will continue trying.");
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
info!("All {} servers were unable to assist in joining {room_id} :(", servers.len());
|
||||
Err!(BadServerResponse("No server available to assist in joining."))
|
||||
Ok(join_room_by_id_or_alias::v3::Response::new(room_id))
|
||||
}
|
||||
|
||||
/// Moves deprioritized servers (if any) to the back of the list.
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
use axum::extract::State;
|
||||
use conduwuit::{Err, Result, matrix::pdu::PduBuilder};
|
||||
use conduwuit::{Err, Result, matrix::pdu::PartialPdu};
|
||||
use ruma::{
|
||||
api::client::membership::kick_user,
|
||||
assign,
|
||||
events::room::member::{MembershipState, RoomMemberEventContent},
|
||||
};
|
||||
|
||||
@@ -18,41 +19,33 @@ pub(crate) async fn kick_user_route(
|
||||
if services.users.is_suspended(sender_user).await? {
|
||||
return Err!(Request(UserSuspended("You cannot perform this action while suspended.")));
|
||||
}
|
||||
let state_lock = services.rooms.state.mutex.lock(&body.room_id).await;
|
||||
let state_lock = services.rooms.state.mutex.lock(body.room_id.as_str()).await;
|
||||
|
||||
let Ok(event) = services
|
||||
if !services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.get_member(&body.room_id, &body.user_id)
|
||||
.state_cache
|
||||
.user_membership(&body.user_id, &body.room_id)
|
||||
.await
|
||||
else {
|
||||
// copy synapse's behaviour of returning 200 without any change to the state
|
||||
// instead of erroring on left users
|
||||
return Ok(kick_user::v3::Response::new());
|
||||
};
|
||||
|
||||
if !matches!(
|
||||
event.membership,
|
||||
MembershipState::Invite | MembershipState::Knock | MembershipState::Join,
|
||||
) {
|
||||
return Err!(Request(Forbidden(
|
||||
"Cannot kick a user who is not apart of the room (current membership: {})",
|
||||
event.membership
|
||||
)));
|
||||
.is_some_and(|membership| {
|
||||
matches!(
|
||||
membership,
|
||||
MembershipState::Invite | MembershipState::Join | MembershipState::Knock
|
||||
)
|
||||
}) {
|
||||
return Err!(Request(Forbidden("You cannot kick users who are not in the room.")));
|
||||
}
|
||||
|
||||
services
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder::state(body.user_id.to_string(), &RoomMemberEventContent {
|
||||
membership: MembershipState::Leave,
|
||||
reason: body.reason.clone(),
|
||||
is_direct: None,
|
||||
join_authorized_via_users_server: None,
|
||||
third_party_invite: None,
|
||||
..event
|
||||
}),
|
||||
PartialPdu::state(
|
||||
body.user_id.to_string(),
|
||||
&assign!(RoomMemberEventContent::new(MembershipState::Leave), {
|
||||
reason: body.reason.clone(),
|
||||
redact_events: body.redact_events,
|
||||
}),
|
||||
),
|
||||
sender_user,
|
||||
Some(&body.room_id),
|
||||
&state_lock,
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
Err, Result, debug, debug_info, debug_warn, err, info,
|
||||
matrix::{
|
||||
event::gen_event_id,
|
||||
pdu::{PduBuilder, PduEvent},
|
||||
pdu::{PartialPdu, PduEvent},
|
||||
},
|
||||
result::FlatOk,
|
||||
trace,
|
||||
@@ -15,8 +15,8 @@
|
||||
};
|
||||
use futures::{FutureExt, StreamExt};
|
||||
use ruma::{
|
||||
CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, OwnedRoomId, OwnedServerName, RoomId,
|
||||
RoomVersionId, UserId,
|
||||
CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, OwnedRoomId, OwnedServerName,
|
||||
OwnedUserId, RoomId, UserId,
|
||||
api::{
|
||||
client::knock::knock_room,
|
||||
federation::{self},
|
||||
@@ -33,12 +33,13 @@
|
||||
use service::{
|
||||
Services,
|
||||
rooms::{
|
||||
membership::validate_remote_member_event_stub,
|
||||
state::RoomMutexGuard,
|
||||
state_compressor::{CompressedState, HashSetCompressStateEvent},
|
||||
},
|
||||
};
|
||||
|
||||
use super::{banned_room_check, join::join_room_by_id_helper, validate_remote_member_event_stub};
|
||||
use super::banned_room_check;
|
||||
use crate::Ruma;
|
||||
|
||||
/// # `POST /_matrix/client/*/knock/{roomIdOrAlias}`
|
||||
@@ -73,7 +74,6 @@ pub(crate) async fn knock_room_route(
|
||||
.rooms
|
||||
.state_cache
|
||||
.servers_invite_via(&room_id)
|
||||
.map(ToOwned::to_owned)
|
||||
.collect::<Vec<_>>()
|
||||
.await,
|
||||
);
|
||||
@@ -113,11 +113,7 @@ pub(crate) async fn knock_room_route(
|
||||
)
|
||||
.await?;
|
||||
|
||||
let addl_via_servers = services
|
||||
.rooms
|
||||
.state_cache
|
||||
.servers_invite_via(&room_id)
|
||||
.map(ToOwned::to_owned);
|
||||
let addl_via_servers = services.rooms.state_cache.servers_invite_via(&room_id);
|
||||
|
||||
let addl_state_servers = services
|
||||
.rooms
|
||||
@@ -130,7 +126,7 @@ pub(crate) async fn knock_room_route(
|
||||
.iter()
|
||||
.map(|event| event.get_field("sender"))
|
||||
.filter_map(FlatOk::flat_ok)
|
||||
.map(|user: &UserId| user.server_name().to_owned())
|
||||
.map(|user: OwnedUserId| user.server_name().to_owned())
|
||||
.stream()
|
||||
.chain(addl_via_servers)
|
||||
.collect()
|
||||
@@ -188,7 +184,7 @@ async fn knock_room_by_id_helper(
|
||||
.await
|
||||
{
|
||||
debug_warn!("{sender_user} is already knocked in {room_id}");
|
||||
return Ok(knock_room::v3::Response { room_id: room_id.into() });
|
||||
return Ok(knock_room::v3::Response::new(room_id.into()));
|
||||
}
|
||||
|
||||
if let Ok(membership) = services
|
||||
@@ -243,15 +239,11 @@ async fn knock_room_by_id_helper(
|
||||
// join_room_by_id_helper We need to release the lock here and let
|
||||
// join_room_by_id_helper acquire it again
|
||||
drop(state_lock);
|
||||
match join_room_by_id_helper(
|
||||
services,
|
||||
sender_user,
|
||||
room_id,
|
||||
reason.clone(),
|
||||
servers,
|
||||
&None,
|
||||
)
|
||||
.await
|
||||
match services
|
||||
.rooms
|
||||
.membership
|
||||
.join_room(sender_user, room_id, reason.clone(), servers)
|
||||
.await
|
||||
{
|
||||
| Ok(_) => return Ok(knock_room::v3::Response::new(room_id.to_owned())),
|
||||
| Err(e) => {
|
||||
@@ -339,34 +331,26 @@ async fn knock_room_helper_local(
|
||||
) -> Result {
|
||||
debug_info!("We can knock locally");
|
||||
|
||||
let room_version_id = services.rooms.state.get_room_version(room_id).await?;
|
||||
let room_version = services.rooms.state.get_room_version(room_id).await?;
|
||||
let room_version_rules = room_version
|
||||
.rules()
|
||||
.expect("room version should have defined rules");
|
||||
|
||||
if matches!(
|
||||
room_version_id,
|
||||
RoomVersionId::V1
|
||||
| RoomVersionId::V2
|
||||
| RoomVersionId::V3
|
||||
| RoomVersionId::V4
|
||||
| RoomVersionId::V5
|
||||
| RoomVersionId::V6
|
||||
) {
|
||||
if !room_version_rules.authorization.knocking {
|
||||
return Err!(Request(Forbidden("This room does not support knocking.")));
|
||||
}
|
||||
|
||||
let content = RoomMemberEventContent {
|
||||
displayname: services.users.displayname(sender_user).await.ok(),
|
||||
avatar_url: services.users.avatar_url(sender_user).await.ok(),
|
||||
blurhash: services.users.blurhash(sender_user).await.ok(),
|
||||
reason: reason.clone(),
|
||||
..RoomMemberEventContent::new(MembershipState::Knock)
|
||||
};
|
||||
let mut content = RoomMemberEventContent::new(MembershipState::Knock);
|
||||
content.displayname = services.users.displayname(sender_user).await.ok();
|
||||
content.avatar_url = services.users.avatar_url(sender_user).await.ok();
|
||||
content.reason.clone_from(&reason.clone());
|
||||
|
||||
// Try normal knock first
|
||||
let Err(error) = services
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder::state(sender_user.to_string(), &content),
|
||||
PartialPdu::state(sender_user.to_string(), &content),
|
||||
sender_user,
|
||||
Some(room_id),
|
||||
&state_lock,
|
||||
@@ -381,19 +365,18 @@ async fn knock_room_helper_local(
|
||||
return Err(error);
|
||||
}
|
||||
|
||||
warn!("We couldn't do the knock locally, maybe federation can help to satisfy the knock");
|
||||
|
||||
let (make_knock_response, remote_server) =
|
||||
make_knock_request(services, sender_user, room_id, servers).await?;
|
||||
|
||||
info!("make_knock finished");
|
||||
|
||||
let room_version_id = make_knock_response.room_version;
|
||||
let room_version = make_knock_response.room_version;
|
||||
let room_version_rules = room_version
|
||||
.rules()
|
||||
.expect("room version should have defined rules");
|
||||
|
||||
if !services.server.supported_room_version(&room_version_id) {
|
||||
return Err!(BadServerResponse(
|
||||
"Remote room version {room_version_id} is not supported by conduwuit"
|
||||
));
|
||||
if !services.server.supported_room_version(&room_version) {
|
||||
return Err!(BadServerResponse("Remote room version {room_version} is not supported"));
|
||||
}
|
||||
|
||||
let mut knock_event_stub = serde_json::from_str::<CanonicalJsonObject>(
|
||||
@@ -424,24 +407,17 @@ async fn knock_room_helper_local(
|
||||
);
|
||||
knock_event_stub.insert(
|
||||
"content".to_owned(),
|
||||
to_canonical_value(RoomMemberEventContent {
|
||||
displayname: services.users.displayname(sender_user).await.ok(),
|
||||
avatar_url: services.users.avatar_url(sender_user).await.ok(),
|
||||
blurhash: services.users.blurhash(sender_user).await.ok(),
|
||||
reason,
|
||||
..RoomMemberEventContent::new(MembershipState::Knock)
|
||||
})
|
||||
.expect("event is valid, we just created it"),
|
||||
to_canonical_value(content).expect("event is valid, we just created it"),
|
||||
);
|
||||
|
||||
// In order to create a compatible ref hash (EventID) the `hashes` field needs
|
||||
// to be present
|
||||
services
|
||||
.server_keys
|
||||
.hash_and_sign_event(&mut knock_event_stub, &room_version_id)?;
|
||||
.hash_and_sign_event(&mut knock_event_stub, &room_version_rules)?;
|
||||
|
||||
// Generate event id
|
||||
let event_id = gen_event_id(&knock_event_stub, &room_version_id)?;
|
||||
let event_id = gen_event_id(&knock_event_stub, &room_version_rules)?;
|
||||
|
||||
// Add event_id
|
||||
knock_event_stub
|
||||
@@ -451,14 +427,14 @@ async fn knock_room_helper_local(
|
||||
let knock_event = knock_event_stub;
|
||||
|
||||
info!("Asking {remote_server} for send_knock in room {room_id}");
|
||||
let send_knock_request = federation::knock::send_knock::v1::Request {
|
||||
room_id: room_id.to_owned(),
|
||||
event_id: event_id.clone(),
|
||||
pdu: services
|
||||
let send_knock_request = federation::membership::create_knock_event::v1::Request::new(
|
||||
room_id.to_owned(),
|
||||
event_id.clone(),
|
||||
services
|
||||
.sending
|
||||
.convert_to_outgoing_federation_event(knock_event.clone())
|
||||
.await,
|
||||
};
|
||||
);
|
||||
|
||||
services
|
||||
.sending
|
||||
@@ -520,12 +496,13 @@ async fn knock_room_helper_remote(
|
||||
|
||||
info!("make_knock finished");
|
||||
|
||||
let room_version_id = make_knock_response.room_version;
|
||||
let room_version = make_knock_response.room_version;
|
||||
let room_version_rules = room_version
|
||||
.rules()
|
||||
.expect("room version should have defined rules");
|
||||
|
||||
if !services.server.supported_room_version(&room_version_id) {
|
||||
return Err!(BadServerResponse(
|
||||
"Remote room version {room_version_id} is not supported by conduwuit"
|
||||
));
|
||||
if !services.server.supported_room_version(&room_version) {
|
||||
return Err!(BadServerResponse("Remote room version {room_version} is not supported"));
|
||||
}
|
||||
|
||||
let mut knock_event_stub: CanonicalJsonObject =
|
||||
@@ -545,26 +522,25 @@ async fn knock_room_helper_remote(
|
||||
.expect("Timestamp is valid js_int value"),
|
||||
),
|
||||
);
|
||||
|
||||
let mut knock_content = RoomMemberEventContent::new(MembershipState::Knock);
|
||||
knock_content.displayname = services.users.displayname(sender_user).await.ok();
|
||||
knock_content.avatar_url = services.users.avatar_url(sender_user).await.ok();
|
||||
knock_content.reason = reason;
|
||||
|
||||
knock_event_stub.insert(
|
||||
"content".to_owned(),
|
||||
to_canonical_value(RoomMemberEventContent {
|
||||
displayname: services.users.displayname(sender_user).await.ok(),
|
||||
avatar_url: services.users.avatar_url(sender_user).await.ok(),
|
||||
blurhash: services.users.blurhash(sender_user).await.ok(),
|
||||
reason,
|
||||
..RoomMemberEventContent::new(MembershipState::Knock)
|
||||
})
|
||||
.expect("event is valid, we just created it"),
|
||||
to_canonical_value(knock_content).expect("event is valid, we just created it"),
|
||||
);
|
||||
|
||||
// In order to create a compatible ref hash (EventID) the `hashes` field needs
|
||||
// to be present
|
||||
services
|
||||
.server_keys
|
||||
.hash_and_sign_event(&mut knock_event_stub, &room_version_id)?;
|
||||
.hash_and_sign_event(&mut knock_event_stub, &room_version_rules)?;
|
||||
|
||||
// Generate event id
|
||||
let event_id = gen_event_id(&knock_event_stub, &room_version_id)?;
|
||||
let event_id = gen_event_id(&knock_event_stub, &room_version_rules)?;
|
||||
|
||||
// Add event_id
|
||||
knock_event_stub
|
||||
@@ -574,18 +550,18 @@ async fn knock_room_helper_remote(
|
||||
let knock_event = knock_event_stub;
|
||||
|
||||
info!("Asking {remote_server} for send_knock in room {room_id}");
|
||||
let send_knock_request = federation::knock::send_knock::v1::Request {
|
||||
room_id: room_id.to_owned(),
|
||||
event_id: event_id.clone(),
|
||||
pdu: services
|
||||
let request = federation::membership::create_knock_event::v1::Request::new(
|
||||
room_id.to_owned(),
|
||||
event_id.clone(),
|
||||
services
|
||||
.sending
|
||||
.convert_to_outgoing_federation_event(knock_event.clone())
|
||||
.await,
|
||||
};
|
||||
);
|
||||
|
||||
let send_knock_response = services
|
||||
.sending
|
||||
.send_federation_request(&remote_server, send_knock_request)
|
||||
.send_federation_request(&remote_server, request)
|
||||
.await?;
|
||||
|
||||
info!("send_knock finished");
|
||||
@@ -604,7 +580,17 @@ async fn knock_room_helper_remote(
|
||||
let state = send_knock_response
|
||||
.knock_room_state
|
||||
.iter()
|
||||
.map(|event| serde_json::from_str::<CanonicalJsonObject>(event.clone().into_json().get()))
|
||||
.map(|event| {
|
||||
#[allow(deprecated)]
|
||||
let raw_value = match event {
|
||||
| federation::membership::RawStrippedState::Stripped(raw_state) =>
|
||||
&raw_state.clone().into_json(),
|
||||
| federation::membership::RawStrippedState::Pdu(raw_value) => raw_value,
|
||||
| _ => panic!("unknown raw stripped state type"),
|
||||
};
|
||||
|
||||
serde_json::from_str::<CanonicalJsonObject>(raw_value.get())
|
||||
})
|
||||
.filter_map(Result::ok);
|
||||
|
||||
let mut state_map: HashMap<u64, OwnedEventId> = HashMap::new();
|
||||
@@ -629,7 +615,7 @@ async fn knock_room_helper_remote(
|
||||
continue;
|
||||
};
|
||||
|
||||
let event_id = gen_event_id(&event, &room_version_id)?;
|
||||
let event_id = gen_event_id(&event, &room_version_rules)?;
|
||||
let shortstatekey = services
|
||||
.rooms
|
||||
.short
|
||||
@@ -709,7 +695,7 @@ async fn make_knock_request(
|
||||
sender_user: &UserId,
|
||||
room_id: &RoomId,
|
||||
servers: &[OwnedServerName],
|
||||
) -> Result<(federation::knock::create_knock_event_template::v1::Response, OwnedServerName)> {
|
||||
) -> Result<(federation::membership::prepare_knock_event::v1::Response, OwnedServerName)> {
|
||||
let mut make_knock_response_and_server =
|
||||
Err!(BadServerResponse("No server available to assist in knocking."));
|
||||
|
||||
@@ -722,16 +708,15 @@ async fn make_knock_request(
|
||||
|
||||
info!("Asking {remote_server} for make_knock ({make_knock_counter})");
|
||||
|
||||
let mut request = federation::membership::prepare_knock_event::v1::Request::new(
|
||||
room_id.to_owned(),
|
||||
sender_user.to_owned(),
|
||||
);
|
||||
request.ver = services.server.supported_room_versions().collect();
|
||||
|
||||
let make_knock_response = services
|
||||
.sending
|
||||
.send_federation_request(
|
||||
remote_server,
|
||||
federation::knock::create_knock_event_template::v1::Request {
|
||||
room_id: room_id.to_owned(),
|
||||
user_id: sender_user.to_owned(),
|
||||
ver: services.server.supported_room_versions().collect(),
|
||||
},
|
||||
)
|
||||
.send_federation_request(remote_server, request)
|
||||
.await;
|
||||
|
||||
trace!("make_knock response: {make_knock_response:?}");
|
||||
|
||||
@@ -3,13 +3,13 @@
|
||||
use axum::extract::State;
|
||||
use conduwuit::{
|
||||
Err, Pdu, Result, debug_info, debug_warn, err,
|
||||
matrix::{event::gen_event_id, pdu::PduBuilder},
|
||||
matrix::{event::gen_event_id, pdu::PartialPdu},
|
||||
utils::{self, FutureBoolExt, future::ReadyEqExt},
|
||||
warn,
|
||||
};
|
||||
use futures::{FutureExt, StreamExt, pin_mut};
|
||||
use ruma::{
|
||||
CanonicalJsonObject, CanonicalJsonValue, OwnedServerName, RoomId, RoomVersionId, UserId,
|
||||
CanonicalJsonObject, CanonicalJsonValue, OwnedServerName, RoomId, UserId,
|
||||
api::{
|
||||
client::membership::leave_room,
|
||||
federation::{self},
|
||||
@@ -19,9 +19,8 @@
|
||||
room::member::{MembershipState, RoomMemberEventContent},
|
||||
},
|
||||
};
|
||||
use service::Services;
|
||||
use service::{Services, rooms::membership::validate_remote_member_event_stub};
|
||||
|
||||
use super::validate_remote_member_event_stub;
|
||||
use crate::Ruma;
|
||||
|
||||
/// # `POST /_matrix/client/v3/rooms/{roomId}/leave`
|
||||
@@ -42,11 +41,7 @@ pub(crate) async fn leave_room_route(
|
||||
// Make a user leave all their joined rooms, rescinds knocks, forgets all rooms,
|
||||
// and ignores errors
|
||||
pub async fn leave_all_rooms(services: &Services, user_id: &UserId) {
|
||||
let rooms_joined = services
|
||||
.rooms
|
||||
.state_cache
|
||||
.rooms_joined(user_id)
|
||||
.map(ToOwned::to_owned);
|
||||
let rooms_joined = services.rooms.state_cache.rooms_joined(user_id);
|
||||
|
||||
let rooms_invited = services
|
||||
.rooms
|
||||
@@ -142,18 +137,17 @@ pub async fn leave_room(
|
||||
.await;
|
||||
|
||||
match user_member_event_content {
|
||||
| Ok(content) => {
|
||||
| Ok(mut content) => {
|
||||
content.membership = MembershipState::Leave;
|
||||
content.reason = reason;
|
||||
content.join_authorized_via_users_server = None;
|
||||
content.is_direct = None;
|
||||
|
||||
services
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder::state(user_id.to_string(), &RoomMemberEventContent {
|
||||
membership: MembershipState::Leave,
|
||||
reason,
|
||||
join_authorized_via_users_server: None,
|
||||
is_direct: None,
|
||||
..content
|
||||
}),
|
||||
PartialPdu::state(user_id.to_string(), &content),
|
||||
user_id,
|
||||
Some(room_id),
|
||||
&state_lock,
|
||||
@@ -226,7 +220,6 @@ pub async fn remote_leave_room<S: ::std::hash::BuildHasher>(
|
||||
.rooms
|
||||
.state_cache
|
||||
.servers_invite_via(room_id)
|
||||
.map(ToOwned::to_owned)
|
||||
.collect::<HashSet<OwnedServerName>>()
|
||||
.await,
|
||||
);
|
||||
@@ -260,7 +253,7 @@ pub async fn remote_leave_room<S: ::std::hash::BuildHasher>(
|
||||
.filter_map(|event| event.get_field("sender").ok().flatten())
|
||||
.filter_map(|sender: &str| UserId::parse(sender).ok())
|
||||
.filter_map(|sender| {
|
||||
if !services.globals.user_is_local(sender) {
|
||||
if !services.globals.user_is_local(&sender) {
|
||||
Some(sender.server_name().to_owned())
|
||||
} else {
|
||||
None
|
||||
@@ -289,10 +282,10 @@ pub async fn remote_leave_room<S: ::std::hash::BuildHasher>(
|
||||
.sending
|
||||
.send_federation_request(
|
||||
remote_server.as_ref(),
|
||||
federation::membership::prepare_leave_event::v1::Request {
|
||||
room_id: room_id.to_owned(),
|
||||
user_id: user_id.to_owned(),
|
||||
},
|
||||
federation::membership::prepare_leave_event::v1::Request::new(
|
||||
room_id.to_owned(),
|
||||
user_id.to_owned(),
|
||||
),
|
||||
)
|
||||
.await;
|
||||
|
||||
@@ -329,6 +322,10 @@ pub async fn remote_leave_room<S: ::std::hash::BuildHasher>(
|
||||
)));
|
||||
}
|
||||
|
||||
let room_version_rules = room_version_id
|
||||
.rules()
|
||||
.expect("room version should have defined rules");
|
||||
|
||||
let mut leave_event_stub = serde_json::from_str::<CanonicalJsonObject>(
|
||||
make_leave_response.event.get(),
|
||||
)
|
||||
@@ -366,21 +363,16 @@ pub async fn remote_leave_room<S: ::std::hash::BuildHasher>(
|
||||
}
|
||||
|
||||
// room v3 and above removed the "event_id" field from remote PDU format
|
||||
match room_version_id {
|
||||
| RoomVersionId::V1 | RoomVersionId::V2 => {},
|
||||
| _ => {
|
||||
leave_event_stub.remove("event_id");
|
||||
},
|
||||
}
|
||||
leave_event_stub.remove("event_id");
|
||||
|
||||
// In order to create a compatible ref hash (EventID) the `hashes` field needs
|
||||
// to be present
|
||||
services
|
||||
.server_keys
|
||||
.hash_and_sign_event(&mut leave_event_stub, &room_version_id)?;
|
||||
.hash_and_sign_event(&mut leave_event_stub, &room_version_rules)?;
|
||||
|
||||
// Generate event id
|
||||
let event_id = gen_event_id(&leave_event_stub, &room_version_id)?;
|
||||
let event_id = gen_event_id(&leave_event_stub, &room_version_rules)?;
|
||||
|
||||
// Add event_id back
|
||||
leave_event_stub
|
||||
@@ -393,14 +385,14 @@ pub async fn remote_leave_room<S: ::std::hash::BuildHasher>(
|
||||
.sending
|
||||
.send_federation_request(
|
||||
&remote_server,
|
||||
federation::membership::create_leave_event::v2::Request {
|
||||
room_id: room_id.to_owned(),
|
||||
event_id: event_id.clone(),
|
||||
pdu: services
|
||||
federation::membership::create_leave_event::v2::Request::new(
|
||||
room_id.to_owned(),
|
||||
event_id.clone(),
|
||||
services
|
||||
.sending
|
||||
.convert_to_outgoing_federation_event(leave_event.clone())
|
||||
.await,
|
||||
},
|
||||
),
|
||||
)
|
||||
.await?;
|
||||
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
use futures::{FutureExt, StreamExt, future::join};
|
||||
use ruma::{
|
||||
api::client::membership::{
|
||||
get_member_events::{self, v3::MembershipEventFilter},
|
||||
get_member_events::{self},
|
||||
joined_members::{self, v3::RoomMember},
|
||||
},
|
||||
events::{
|
||||
@@ -43,20 +43,20 @@ pub(crate) async fn get_member_events_route(
|
||||
return Err!(Request(Forbidden("You don't have permission to view this room.")));
|
||||
}
|
||||
|
||||
Ok(get_member_events::v3::Response {
|
||||
chunk: services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.room_state_full(&body.room_id)
|
||||
.ready_filter_map(Result::ok)
|
||||
.ready_filter(|((ty, _), _)| *ty == StateEventType::RoomMember)
|
||||
.map(at!(1))
|
||||
.ready_filter_map(|pdu| membership_filter(pdu, membership, not_membership))
|
||||
.map(Event::into_format)
|
||||
.collect()
|
||||
.boxed()
|
||||
.await,
|
||||
})
|
||||
let chunk = services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.room_state_full(&body.room_id)
|
||||
.ready_filter_map(Result::ok)
|
||||
.ready_filter(|((ty, _), _)| *ty == StateEventType::RoomMember)
|
||||
.map(at!(1))
|
||||
.ready_filter_map(|pdu| membership_filter(pdu, membership, not_membership))
|
||||
.map(Event::into_format)
|
||||
.collect()
|
||||
.boxed()
|
||||
.await;
|
||||
|
||||
Ok(get_member_events::v3::Response::new(chunk))
|
||||
}
|
||||
|
||||
/// # `POST /_matrix/client/r0/rooms/{roomId}/joined_members`
|
||||
@@ -78,70 +78,46 @@ pub(crate) async fn joined_members_route(
|
||||
return Err!(Request(Forbidden("You don't have permission to view this room.")));
|
||||
}
|
||||
|
||||
Ok(joined_members::v3::Response {
|
||||
joined: services
|
||||
.rooms
|
||||
.state_cache
|
||||
.room_members(&body.room_id)
|
||||
.map(ToOwned::to_owned)
|
||||
.broad_then(|user_id| async move {
|
||||
let (display_name, avatar_url) = join(
|
||||
services.users.displayname(&user_id).ok(),
|
||||
services.users.avatar_url(&user_id).ok(),
|
||||
)
|
||||
.await;
|
||||
let joined = services
|
||||
.rooms
|
||||
.state_cache
|
||||
.room_members(&body.room_id)
|
||||
.broad_then(|user_id| async move {
|
||||
let mut member = RoomMember::new();
|
||||
let (display_name, avatar_url) = join(
|
||||
services.users.displayname(&user_id).ok(),
|
||||
services.users.avatar_url(&user_id).ok(),
|
||||
)
|
||||
.await;
|
||||
member.display_name = display_name;
|
||||
member.avatar_url = avatar_url;
|
||||
|
||||
(user_id, RoomMember { display_name, avatar_url })
|
||||
})
|
||||
.collect()
|
||||
.await,
|
||||
})
|
||||
(user_id, member)
|
||||
})
|
||||
.collect()
|
||||
.await;
|
||||
|
||||
Ok(joined_members::v3::Response::new(joined))
|
||||
}
|
||||
|
||||
fn membership_filter<Pdu: Event>(
|
||||
pdu: Pdu,
|
||||
for_membership: Option<&MembershipEventFilter>,
|
||||
not_membership: Option<&MembershipEventFilter>,
|
||||
membership_state_filter: Option<&MembershipState>,
|
||||
not_membership_state_filter: Option<&MembershipState>,
|
||||
) -> Option<impl Event> {
|
||||
let membership_state_filter = match for_membership {
|
||||
| Some(MembershipEventFilter::Ban) => MembershipState::Ban,
|
||||
| Some(MembershipEventFilter::Invite) => MembershipState::Invite,
|
||||
| Some(MembershipEventFilter::Knock) => MembershipState::Knock,
|
||||
| Some(MembershipEventFilter::Leave) => MembershipState::Leave,
|
||||
| Some(_) | None => MembershipState::Join,
|
||||
};
|
||||
|
||||
let not_membership_state_filter = match not_membership {
|
||||
| Some(MembershipEventFilter::Ban) => MembershipState::Ban,
|
||||
| Some(MembershipEventFilter::Invite) => MembershipState::Invite,
|
||||
| Some(MembershipEventFilter::Join) => MembershipState::Join,
|
||||
| Some(MembershipEventFilter::Knock) => MembershipState::Knock,
|
||||
| Some(_) | None => MembershipState::Leave,
|
||||
};
|
||||
|
||||
let evt_membership = pdu.get_content::<RoomMemberEventContent>().ok()?.membership;
|
||||
|
||||
if for_membership.is_some() && not_membership.is_some() {
|
||||
if membership_state_filter != evt_membership
|
||||
|| not_membership_state_filter == evt_membership
|
||||
{
|
||||
None
|
||||
} else {
|
||||
Some(pdu)
|
||||
}
|
||||
} else if for_membership.is_some() && not_membership.is_none() {
|
||||
if membership_state_filter != evt_membership {
|
||||
None
|
||||
} else {
|
||||
Some(pdu)
|
||||
}
|
||||
} else if not_membership.is_some() && for_membership.is_none() {
|
||||
if not_membership_state_filter == evt_membership {
|
||||
None
|
||||
} else {
|
||||
Some(pdu)
|
||||
}
|
||||
} else {
|
||||
Some(pdu)
|
||||
if let Some(membership_state_filter) = membership_state_filter
|
||||
&& *membership_state_filter != evt_membership
|
||||
{
|
||||
return None;
|
||||
}
|
||||
|
||||
if let Some(not_membership_state_filter) = not_membership_state_filter
|
||||
&& *not_membership_state_filter == evt_membership
|
||||
{
|
||||
return None;
|
||||
}
|
||||
|
||||
Some(pdu)
|
||||
}
|
||||
|
||||
@@ -13,16 +13,10 @@
|
||||
use axum::extract::State;
|
||||
use conduwuit::{Err, Result, warn};
|
||||
use futures::{FutureExt, StreamExt};
|
||||
use ruma::{
|
||||
CanonicalJsonObject, OwnedRoomId, RoomId, ServerName, UserId,
|
||||
api::client::membership::joined_rooms,
|
||||
events::{
|
||||
StaticEventContent,
|
||||
room::member::{MembershipState, RoomMemberEventContent},
|
||||
},
|
||||
};
|
||||
use ruma::{OwnedRoomId, RoomId, ServerName, UserId, api::client::membership::joined_rooms};
|
||||
use service::Services;
|
||||
|
||||
pub use self::leave::{leave_all_rooms, leave_room, remote_leave_room};
|
||||
pub(crate) use self::{
|
||||
ban::ban_user_route,
|
||||
forget::forget_room_route,
|
||||
@@ -34,10 +28,6 @@
|
||||
members::{get_member_events_route, joined_members_route},
|
||||
unban::unban_user_route,
|
||||
};
|
||||
pub use self::{
|
||||
join::join_room_by_id_helper,
|
||||
leave::{leave_all_rooms, leave_room, remote_leave_room},
|
||||
};
|
||||
use crate::{Ruma, client::full_user_deactivate};
|
||||
|
||||
/// # `POST /_matrix/client/r0/joined_rooms`
|
||||
@@ -47,15 +37,14 @@ pub(crate) async fn joined_rooms_route(
|
||||
State(services): State<crate::State>,
|
||||
body: Ruma<joined_rooms::v3::Request>,
|
||||
) -> Result<joined_rooms::v3::Response> {
|
||||
Ok(joined_rooms::v3::Response {
|
||||
joined_rooms: services
|
||||
.rooms
|
||||
.state_cache
|
||||
.rooms_joined(body.sender_user())
|
||||
.map(ToOwned::to_owned)
|
||||
.collect()
|
||||
.await,
|
||||
})
|
||||
let joined_rooms = services
|
||||
.rooms
|
||||
.state_cache
|
||||
.rooms_joined(body.sender_user())
|
||||
.collect()
|
||||
.await;
|
||||
|
||||
Ok(joined_rooms::v3::Response::new(joined_rooms))
|
||||
}
|
||||
|
||||
/// Checks if the room is banned in any way possible and the sender user is not
|
||||
@@ -160,80 +149,3 @@ pub(crate) async fn banned_room_check(
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Validates that an event returned from a remote server by `/make_*`
|
||||
/// actually is a membership event with the expected fields.
|
||||
///
|
||||
/// Without checking this, the remote server could use the remote membership
|
||||
/// mechanism to trick our server into signing arbitrary malicious events.
|
||||
pub(crate) fn validate_remote_member_event_stub(
|
||||
membership: &MembershipState,
|
||||
user_id: &UserId,
|
||||
room_id: &RoomId,
|
||||
event_stub: &CanonicalJsonObject,
|
||||
) -> Result<()> {
|
||||
let Some(event_type) = event_stub.get("type") else {
|
||||
return Err!(BadServerResponse(
|
||||
"Remote server returned member event with missing type field"
|
||||
));
|
||||
};
|
||||
if event_type != &RoomMemberEventContent::TYPE {
|
||||
return Err!(BadServerResponse(
|
||||
"Remote server returned member event with invalid event type"
|
||||
));
|
||||
}
|
||||
|
||||
let Some(sender) = event_stub.get("sender") else {
|
||||
return Err!(BadServerResponse(
|
||||
"Remote server returned member event with missing sender field"
|
||||
));
|
||||
};
|
||||
if sender != &user_id.as_str() {
|
||||
return Err!(BadServerResponse(
|
||||
"Remote server returned member event with incorrect sender"
|
||||
));
|
||||
}
|
||||
|
||||
let Some(state_key) = event_stub.get("state_key") else {
|
||||
return Err!(BadServerResponse(
|
||||
"Remote server returned member event with missing state_key field"
|
||||
));
|
||||
};
|
||||
if state_key != &user_id.as_str() {
|
||||
return Err!(BadServerResponse(
|
||||
"Remote server returned member event with incorrect state_key"
|
||||
));
|
||||
}
|
||||
|
||||
let Some(event_room_id) = event_stub.get("room_id") else {
|
||||
return Err!(BadServerResponse(
|
||||
"Remote server returned member event with missing room_id field"
|
||||
));
|
||||
};
|
||||
if event_room_id != &room_id.as_str() {
|
||||
return Err!(BadServerResponse(
|
||||
"Remote server returned member event with incorrect room_id"
|
||||
));
|
||||
}
|
||||
|
||||
let Some(content) = event_stub
|
||||
.get("content")
|
||||
.and_then(|content| content.as_object())
|
||||
else {
|
||||
return Err!(BadServerResponse(
|
||||
"Remote server returned member event with missing content field"
|
||||
));
|
||||
};
|
||||
let Some(event_membership) = content.get("membership") else {
|
||||
return Err!(BadServerResponse(
|
||||
"Remote server returned member event with missing membership field"
|
||||
));
|
||||
};
|
||||
if event_membership != &membership.as_str() {
|
||||
return Err!(BadServerResponse(
|
||||
"Remote server returned member event with incorrect membership type"
|
||||
));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
use axum::extract::State;
|
||||
use conduwuit::{Err, Result, matrix::pdu::PduBuilder};
|
||||
use conduwuit::{Err, Result, matrix::pdu::PartialPdu};
|
||||
use ruma::{
|
||||
api::client::membership::unban_user,
|
||||
events::room::member::{MembershipState, RoomMemberEventContent},
|
||||
@@ -18,9 +18,9 @@ pub(crate) async fn unban_user_route(
|
||||
if services.users.is_suspended(sender_user).await? {
|
||||
return Err!(Request(UserSuspended("You cannot perform this action while suspended.")));
|
||||
}
|
||||
let state_lock = services.rooms.state.mutex.lock(&body.room_id).await;
|
||||
let state_lock = services.rooms.state.mutex.lock(body.room_id.as_str()).await;
|
||||
|
||||
let current_member_content = services
|
||||
let mut current_member_content = services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.get_member(&body.room_id, &body.user_id)
|
||||
@@ -34,18 +34,17 @@ pub(crate) async fn unban_user_route(
|
||||
)));
|
||||
}
|
||||
|
||||
current_member_content.membership = MembershipState::Leave;
|
||||
current_member_content.reason.clone_from(&body.reason);
|
||||
current_member_content.join_authorized_via_users_server = None;
|
||||
current_member_content.third_party_invite = None;
|
||||
current_member_content.is_direct = None;
|
||||
|
||||
services
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder::state(body.user_id.to_string(), &RoomMemberEventContent {
|
||||
membership: MembershipState::Leave,
|
||||
reason: body.reason.clone(),
|
||||
join_authorized_via_users_server: None,
|
||||
third_party_invite: None,
|
||||
is_direct: None,
|
||||
..current_member_content
|
||||
}),
|
||||
PartialPdu::state(body.user_id.to_string(), ¤t_member_content),
|
||||
sender_user,
|
||||
Some(&body.room_id),
|
||||
&state_lock,
|
||||
|
||||
+13
-10
@@ -26,15 +26,17 @@
|
||||
DeviceId, RoomId, UserId,
|
||||
api::{
|
||||
Direction,
|
||||
client::{error::ErrorKind, filter::RoomEventFilter, message::get_message_events},
|
||||
client::{filter::RoomEventFilter, message::get_message_events},
|
||||
error::{ErrorKind, SenderIgnoredErrorData},
|
||||
},
|
||||
assign,
|
||||
events::{
|
||||
AnyStateEvent, StateEventType,
|
||||
TimelineEventType::{self, *},
|
||||
invite_permission_config::FilterLevel,
|
||||
},
|
||||
serde::Raw,
|
||||
};
|
||||
use ruminuwuity::invite_permission_config::FilterLevel;
|
||||
use tracing::warn;
|
||||
|
||||
use crate::Ruma;
|
||||
@@ -74,7 +76,6 @@ pub(crate) async fn get_message_events_route(
|
||||
ClientIp(client_ip): ClientIp,
|
||||
body: Ruma<get_message_events::v3::Request>,
|
||||
) -> Result<get_message_events::v3::Response> {
|
||||
debug_assert!(IGNORED_MESSAGE_TYPES.is_sorted(), "IGNORED_MESSAGE_TYPES is not sorted");
|
||||
let sender_user = body.sender_user();
|
||||
let sender_device = body.sender_device.as_deref();
|
||||
let room_id = &body.room_id;
|
||||
@@ -199,12 +200,12 @@ pub(crate) async fn get_message_events_route(
|
||||
.map(Event::into_format)
|
||||
.collect();
|
||||
|
||||
Ok(get_message_events::v3::Response {
|
||||
Ok(assign!(get_message_events::v3::Response::new(), {
|
||||
start: from.to_string(),
|
||||
end: next_token.as_ref().map(PduCount::to_string),
|
||||
chunk,
|
||||
state,
|
||||
})
|
||||
chunk: chunk,
|
||||
state: state,
|
||||
}))
|
||||
}
|
||||
|
||||
pub(crate) async fn lazy_loading_witness<'a, I>(
|
||||
@@ -301,7 +302,7 @@ pub(crate) async fn is_ignored_pdu<Pdu>(
|
||||
{
|
||||
// exclude Synapse's dummy events from bloating up response bodies. clients
|
||||
// don't need to see this.
|
||||
if event.kind().to_cow_str() == "org.matrix.dummy_event" {
|
||||
if event.kind().to_string() == "org.matrix.dummy_event" {
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
@@ -323,7 +324,7 @@ pub(crate) async fn is_ignored_pdu<Pdu>(
|
||||
if server_ignored {
|
||||
// the sender's server is ignored, so ignore this event
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::SenderIgnored { sender: None },
|
||||
ErrorKind::SenderIgnored(SenderIgnoredErrorData::new()),
|
||||
"The sender's server is ignored by this server.",
|
||||
));
|
||||
}
|
||||
@@ -332,7 +333,9 @@ pub(crate) async fn is_ignored_pdu<Pdu>(
|
||||
// the recipient of this PDU has the sender ignored, and we're not
|
||||
// configured to send ignored messages to clients
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::SenderIgnored { sender: Some(event.sender().to_owned()) },
|
||||
ErrorKind::SenderIgnored(SenderIgnoredErrorData::with_sender(
|
||||
event.sender().to_owned(),
|
||||
)),
|
||||
"You have ignored this sender.",
|
||||
));
|
||||
}
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
pub(super) mod media_legacy;
|
||||
pub(super) mod membership;
|
||||
pub(super) mod message;
|
||||
pub(super) mod mutual_rooms;
|
||||
pub(super) mod openid;
|
||||
pub(super) mod presence;
|
||||
pub(super) mod profile;
|
||||
@@ -35,7 +36,6 @@
|
||||
pub(super) mod threads;
|
||||
pub(super) mod to_device;
|
||||
pub(super) mod typing;
|
||||
pub(super) mod unstable;
|
||||
pub(super) mod unversioned;
|
||||
pub(super) mod user_directory;
|
||||
pub(super) mod voip;
|
||||
@@ -58,12 +58,12 @@
|
||||
pub(super) use media::*;
|
||||
pub(super) use media_legacy::*;
|
||||
pub(super) use membership::*;
|
||||
pub use membership::{join_room_by_id_helper, leave_all_rooms, leave_room, remote_leave_room};
|
||||
pub use membership::{leave_all_rooms, leave_room, remote_leave_room};
|
||||
pub(super) use message::*;
|
||||
pub(super) use mutual_rooms::*;
|
||||
pub(super) use openid::*;
|
||||
pub(super) use presence::*;
|
||||
pub(super) use profile::*;
|
||||
pub use profile::{update_all_rooms, update_avatar_url, update_displayname};
|
||||
pub use push::recreate_push_rules_and_return;
|
||||
pub(super) use push::*;
|
||||
pub(super) use read_marker::*;
|
||||
@@ -82,7 +82,6 @@
|
||||
pub(super) use threads::*;
|
||||
pub(super) use to_device::*;
|
||||
pub(super) use typing::*;
|
||||
pub(super) use unstable::*;
|
||||
pub(super) use unversioned::*;
|
||||
pub(super) use user_directory::*;
|
||||
pub(super) use voip::*;
|
||||
|
||||
@@ -0,0 +1,32 @@
|
||||
use axum::extract::State;
|
||||
use conduwuit::{Err, Result};
|
||||
use futures::StreamExt;
|
||||
use ruma::api::client::membership::mutual_rooms;
|
||||
|
||||
use crate::Ruma;
|
||||
|
||||
/// # `GET /_matrix/client/unstable/uk.half-shot.msc2666/user/mutual_rooms`
|
||||
///
|
||||
/// Gets all the rooms the sender shares with the specified user.
|
||||
///
|
||||
/// An implementation of [MSC2666](https://github.com/matrix-org/matrix-spec-proposals/pull/2666)
|
||||
#[tracing::instrument(skip_all, name = "mutual_rooms", level = "info")]
|
||||
pub(crate) async fn get_mutual_rooms_route(
|
||||
State(services): State<crate::State>,
|
||||
body: Ruma<mutual_rooms::unstable::Request>,
|
||||
) -> Result<mutual_rooms::unstable::Response> {
|
||||
let sender_user = body.sender_user();
|
||||
|
||||
if sender_user == body.user_id {
|
||||
return Err!(Request(Unknown("You cannot request rooms in common with yourself.")));
|
||||
}
|
||||
|
||||
let mutual_rooms = services
|
||||
.rooms
|
||||
.state_cache
|
||||
.get_shared_rooms(sender_user, &body.user_id)
|
||||
.collect()
|
||||
.await;
|
||||
|
||||
Ok(mutual_rooms::unstable::Response::new(mutual_rooms))
|
||||
}
|
||||
@@ -29,10 +29,10 @@ pub(crate) async fn create_openid_token_route(
|
||||
.users
|
||||
.create_openid_token(&body.user_id, &access_token)?;
|
||||
|
||||
Ok(account::request_openid_token::v3::Response {
|
||||
Ok(account::request_openid_token::v3::Response::new(
|
||||
access_token,
|
||||
token_type: TokenType::Bearer,
|
||||
matrix_server_name: services.server.name.clone(),
|
||||
expires_in: Duration::from_secs(expires_in),
|
||||
})
|
||||
TokenType::Bearer,
|
||||
services.server.name.clone(),
|
||||
Duration::from_secs(expires_in),
|
||||
))
|
||||
}
|
||||
|
||||
@@ -2,7 +2,10 @@
|
||||
|
||||
use axum::extract::State;
|
||||
use conduwuit::{Err, Result};
|
||||
use ruma::api::client::presence::{get_presence, set_presence};
|
||||
use ruma::{
|
||||
api::client::presence::{get_presence, set_presence},
|
||||
assign,
|
||||
};
|
||||
|
||||
use crate::Ruma;
|
||||
|
||||
@@ -26,7 +29,7 @@ pub(crate) async fn set_presence_route(
|
||||
.set_presence(body.sender_user(), &body.presence, None, None, body.status_msg.clone())
|
||||
.await?;
|
||||
|
||||
Ok(set_presence::v3::Response {})
|
||||
Ok(set_presence::v3::Response::new())
|
||||
}
|
||||
|
||||
/// # `GET /_matrix/client/r0/presence/{userId}/status`
|
||||
@@ -76,13 +79,11 @@ pub(crate) async fn get_presence_route(
|
||||
.map(|millis| Duration::from_millis(millis.into())),
|
||||
};
|
||||
|
||||
Ok(get_presence::v3::Response {
|
||||
// TODO: Should ruma just use the presenceeventcontent type here?
|
||||
Ok(assign!(get_presence::v3::Response::new(presence.content.presence), {
|
||||
status_msg,
|
||||
currently_active: presence.content.currently_active,
|
||||
last_active_ago,
|
||||
presence: presence.content.presence,
|
||||
})
|
||||
}))
|
||||
},
|
||||
| _ => Err!(Request(NotFound("Presence state for this user was not found"))),
|
||||
}
|
||||
|
||||
+326
-381
@@ -1,233 +1,29 @@
|
||||
use std::collections::BTreeMap;
|
||||
|
||||
use axum::extract::State;
|
||||
use conduwuit::{
|
||||
Err, Result,
|
||||
matrix::pdu::PduBuilder,
|
||||
utils::{IterStream, future::TryExtExt, stream::TryIgnore},
|
||||
warn,
|
||||
};
|
||||
use conduwuit::{Err, Result, matrix::pdu::PartialPdu, utils::to_canonical_object};
|
||||
use conduwuit_service::Services;
|
||||
use futures::{
|
||||
FutureExt, StreamExt, TryStreamExt,
|
||||
future::{join, join3, join4},
|
||||
};
|
||||
use futures::StreamExt;
|
||||
use ruma::{
|
||||
OwnedMxcUri, OwnedRoomId, UserId,
|
||||
UserId,
|
||||
api::{
|
||||
client::profile::{
|
||||
get_avatar_url, get_display_name, get_profile, set_avatar_url, set_display_name,
|
||||
delete_profile_field, get_profile, get_profile_field, set_profile_field,
|
||||
},
|
||||
federation,
|
||||
},
|
||||
assign,
|
||||
events::room::member::{MembershipState, RoomMemberEventContent},
|
||||
presence::PresenceState,
|
||||
profile::{ProfileFieldName, ProfileFieldValue},
|
||||
};
|
||||
use serde_json::{Value, to_value};
|
||||
|
||||
use crate::Ruma;
|
||||
|
||||
/// # `PUT /_matrix/client/r0/profile/{userId}/displayname`
|
||||
///
|
||||
/// Updates the displayname.
|
||||
///
|
||||
/// - Also makes sure other users receive the update using presence EDUs
|
||||
pub(crate) async fn set_displayname_route(
|
||||
State(services): State<crate::State>,
|
||||
body: Ruma<set_display_name::v3::Request>,
|
||||
) -> Result<set_display_name::v3::Response> {
|
||||
let sender_user = body.sender_user();
|
||||
if services.users.is_suspended(sender_user).await? {
|
||||
return Err!(Request(UserSuspended("You cannot perform this action while suspended.")));
|
||||
}
|
||||
|
||||
if *sender_user != body.user_id && body.appservice_info.is_none() {
|
||||
return Err!(Request(Forbidden("You cannot update the profile of another user")));
|
||||
}
|
||||
|
||||
let all_joined_rooms: Vec<OwnedRoomId> = services
|
||||
.rooms
|
||||
.state_cache
|
||||
.rooms_joined(&body.user_id)
|
||||
.map(ToOwned::to_owned)
|
||||
.collect()
|
||||
.await;
|
||||
|
||||
update_displayname(&services, &body.user_id, body.displayname.clone(), &all_joined_rooms)
|
||||
.boxed()
|
||||
.await;
|
||||
|
||||
if services.config.allow_local_presence {
|
||||
// Presence update
|
||||
services
|
||||
.presence
|
||||
.ping_presence(&body.user_id, &PresenceState::Online)
|
||||
.await?;
|
||||
}
|
||||
|
||||
Ok(set_display_name::v3::Response {})
|
||||
}
|
||||
|
||||
/// # `GET /_matrix/client/v3/profile/{userId}/displayname`
|
||||
///
|
||||
/// Returns the displayname of the user.
|
||||
///
|
||||
/// - If user is on another server and we do not have a local copy already fetch
|
||||
/// displayname over federation
|
||||
pub(crate) async fn get_displayname_route(
|
||||
State(services): State<crate::State>,
|
||||
body: Ruma<get_display_name::v3::Request>,
|
||||
) -> Result<get_display_name::v3::Response> {
|
||||
if !services.globals.user_is_local(&body.user_id) {
|
||||
// Create and update our local copy of the user
|
||||
if let Ok(response) = services
|
||||
.sending
|
||||
.send_federation_request(
|
||||
body.user_id.server_name(),
|
||||
federation::query::get_profile_information::v1::Request {
|
||||
user_id: body.user_id.clone(),
|
||||
field: None, // we want the full user's profile to update locally too
|
||||
},
|
||||
)
|
||||
.await
|
||||
{
|
||||
if !services.users.exists(&body.user_id).await {
|
||||
services.users.create(&body.user_id, None, None).await?;
|
||||
}
|
||||
|
||||
services
|
||||
.users
|
||||
.set_displayname(&body.user_id, response.displayname.clone());
|
||||
services
|
||||
.users
|
||||
.set_avatar_url(&body.user_id, response.avatar_url.clone());
|
||||
services
|
||||
.users
|
||||
.set_blurhash(&body.user_id, response.blurhash.clone());
|
||||
|
||||
return Ok(get_display_name::v3::Response { displayname: response.displayname });
|
||||
}
|
||||
}
|
||||
|
||||
if !services.users.exists(&body.user_id).await {
|
||||
// Return 404 if this user doesn't exist and we couldn't fetch it over
|
||||
// federation
|
||||
return Err!(Request(NotFound("Profile was not found.")));
|
||||
}
|
||||
|
||||
Ok(get_display_name::v3::Response {
|
||||
displayname: services.users.displayname(&body.user_id).await.ok(),
|
||||
})
|
||||
}
|
||||
|
||||
/// # `PUT /_matrix/client/v3/profile/{userId}/avatar_url`
|
||||
///
|
||||
/// Updates the `avatar_url` and `blurhash`.
|
||||
///
|
||||
/// - Also makes sure other users receive the update using presence EDUs
|
||||
pub(crate) async fn set_avatar_url_route(
|
||||
State(services): State<crate::State>,
|
||||
body: Ruma<set_avatar_url::v3::Request>,
|
||||
) -> Result<set_avatar_url::v3::Response> {
|
||||
let sender_user = body.sender_user();
|
||||
if services.users.is_suspended(sender_user).await? {
|
||||
return Err!(Request(UserSuspended("You cannot perform this action while suspended.")));
|
||||
}
|
||||
|
||||
if *sender_user != body.user_id && body.appservice_info.is_none() {
|
||||
return Err!(Request(Forbidden("You cannot update the profile of another user")));
|
||||
}
|
||||
|
||||
let all_joined_rooms: Vec<OwnedRoomId> = services
|
||||
.rooms
|
||||
.state_cache
|
||||
.rooms_joined(&body.user_id)
|
||||
.map(ToOwned::to_owned)
|
||||
.collect()
|
||||
.await;
|
||||
|
||||
update_avatar_url(
|
||||
&services,
|
||||
&body.user_id,
|
||||
body.avatar_url.clone(),
|
||||
body.blurhash.clone(),
|
||||
&all_joined_rooms,
|
||||
)
|
||||
.boxed()
|
||||
.await;
|
||||
|
||||
if services.config.allow_local_presence {
|
||||
// Presence update
|
||||
services
|
||||
.presence
|
||||
.ping_presence(&body.user_id, &PresenceState::Online)
|
||||
.await
|
||||
.ok();
|
||||
}
|
||||
|
||||
Ok(set_avatar_url::v3::Response {})
|
||||
}
|
||||
|
||||
/// # `GET /_matrix/client/v3/profile/{userId}/avatar_url`
|
||||
///
|
||||
/// Returns the `avatar_url` and `blurhash` of the user.
|
||||
///
|
||||
/// - If user is on another server and we do not have a local copy already fetch
|
||||
/// `avatar_url` and blurhash over federation
|
||||
pub(crate) async fn get_avatar_url_route(
|
||||
State(services): State<crate::State>,
|
||||
body: Ruma<get_avatar_url::v3::Request>,
|
||||
) -> Result<get_avatar_url::v3::Response> {
|
||||
if !services.globals.user_is_local(&body.user_id) {
|
||||
// Create and update our local copy of the user
|
||||
if let Ok(response) = services
|
||||
.sending
|
||||
.send_federation_request(
|
||||
body.user_id.server_name(),
|
||||
federation::query::get_profile_information::v1::Request {
|
||||
user_id: body.user_id.clone(),
|
||||
field: None, // we want the full user's profile to update locally as well
|
||||
},
|
||||
)
|
||||
.await
|
||||
{
|
||||
if !services.users.exists(&body.user_id).await {
|
||||
services.users.create(&body.user_id, None, None).await?;
|
||||
}
|
||||
|
||||
services
|
||||
.users
|
||||
.set_displayname(&body.user_id, response.displayname.clone());
|
||||
services
|
||||
.users
|
||||
.set_avatar_url(&body.user_id, response.avatar_url.clone());
|
||||
services
|
||||
.users
|
||||
.set_blurhash(&body.user_id, response.blurhash.clone());
|
||||
|
||||
return Ok(get_avatar_url::v3::Response {
|
||||
avatar_url: response.avatar_url,
|
||||
blurhash: response.blurhash,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
if !services.users.exists(&body.user_id).await {
|
||||
// Return 404 if this user doesn't exist and we couldn't fetch it over
|
||||
// federation
|
||||
return Err!(Request(NotFound("Profile was not found.")));
|
||||
}
|
||||
|
||||
let (avatar_url, blurhash) = join(
|
||||
services.users.avatar_url(&body.user_id).ok(),
|
||||
services.users.blurhash(&body.user_id).ok(),
|
||||
)
|
||||
.await;
|
||||
|
||||
Ok(get_avatar_url::v3::Response { avatar_url, blurhash })
|
||||
}
|
||||
|
||||
/// # `GET /_matrix/client/v3/profile/{userId}`
|
||||
///
|
||||
/// Returns the displayname, avatar_url, blurhash, and custom profile fields of
|
||||
/// the user.
|
||||
/// Returns the user's profile information.
|
||||
///
|
||||
/// - If user is on another server and we do not have a local copy already,
|
||||
/// fetch profile over federation.
|
||||
@@ -235,188 +31,337 @@ pub(crate) async fn get_profile_route(
|
||||
State(services): State<crate::State>,
|
||||
body: Ruma<get_profile::v3::Request>,
|
||||
) -> Result<get_profile::v3::Response> {
|
||||
let Some(profile) = fetch_full_profile(&services, &body.user_id).await else {
|
||||
return Err!(Request(NotFound("This user's profile could not be fetched.")));
|
||||
};
|
||||
|
||||
Ok(get_profile::v3::Response::from_iter(profile))
|
||||
}
|
||||
|
||||
pub(crate) async fn get_profile_field_route(
|
||||
State(services): State<crate::State>,
|
||||
body: Ruma<get_profile_field::v3::Request>,
|
||||
) -> Result<get_profile_field::v3::Response> {
|
||||
let value = fetch_profile_field(&services, &body.user_id, body.field.clone()).await?;
|
||||
|
||||
Ok(assign!(get_profile_field::v3::Response::default(), { value }))
|
||||
}
|
||||
|
||||
pub(crate) async fn set_profile_field_route(
|
||||
State(services): State<crate::State>,
|
||||
body: Ruma<set_profile_field::v3::Request>,
|
||||
) -> Result<set_profile_field::v3::Response> {
|
||||
if body.user_id != body.sender_user()
|
||||
&& !(body.appservice_info.is_some()
|
||||
|| services.admin.user_is_admin(body.sender_user()).await)
|
||||
{
|
||||
return Err!(Request(Forbidden("You may not change other users' profile data.")));
|
||||
}
|
||||
|
||||
if !services.globals.user_is_local(&body.user_id) {
|
||||
// Create and update our local copy of the user
|
||||
if let Ok(response) = services
|
||||
.sending
|
||||
.send_federation_request(
|
||||
body.user_id.server_name(),
|
||||
federation::query::get_profile_information::v1::Request {
|
||||
user_id: body.user_id.clone(),
|
||||
field: None,
|
||||
},
|
||||
)
|
||||
return Err!(Request(InvalidParam("You may not change a remote user's profile data.")));
|
||||
}
|
||||
|
||||
set_profile_field(&services, &body.user_id, ProfileFieldChange::Set(body.value.clone()))
|
||||
.await?;
|
||||
|
||||
Ok(set_profile_field::v3::Response::new())
|
||||
}
|
||||
|
||||
pub(crate) async fn delete_profile_field_route(
|
||||
State(services): State<crate::State>,
|
||||
body: Ruma<delete_profile_field::v3::Request>,
|
||||
) -> Result<delete_profile_field::v3::Response> {
|
||||
if body.user_id != body.sender_user()
|
||||
&& !(body.appservice_info.is_some()
|
||||
|| services.admin.user_is_admin(body.sender_user()).await)
|
||||
{
|
||||
return Err!(Request(Forbidden("You may not change other users' profile data.")));
|
||||
}
|
||||
|
||||
if !services.globals.user_is_local(&body.user_id) {
|
||||
return Err!(Request(InvalidParam("You may not change a remote user's profile data.")));
|
||||
}
|
||||
|
||||
set_profile_field(&services, &body.user_id, ProfileFieldChange::Delete(body.field.clone()))
|
||||
.await?;
|
||||
|
||||
Ok(delete_profile_field::v3::Response::new())
|
||||
}
|
||||
|
||||
async fn fetch_full_profile(
|
||||
services: &Services,
|
||||
user_id: &UserId,
|
||||
) -> Option<BTreeMap<String, Value>> {
|
||||
// If the user exists locally, fetch their local profile
|
||||
if services.users.exists(user_id).await {
|
||||
return Some(get_local_profile(services, user_id).await);
|
||||
}
|
||||
|
||||
// Otherwise ask their homeserver
|
||||
let Ok(response) = services
|
||||
.sending
|
||||
.send_federation_request(
|
||||
user_id.server_name(),
|
||||
federation::query::get_profile_information::v1::Request::new(user_id.to_owned()),
|
||||
)
|
||||
.await
|
||||
else {
|
||||
return None;
|
||||
};
|
||||
|
||||
// Update our local copies of their profile fields
|
||||
services.users.clear_profile(user_id).await;
|
||||
|
||||
for (field, value) in response.iter() {
|
||||
let Ok(value) = ProfileFieldValue::new(field, value.to_owned()) else {
|
||||
// Skip malformed fields
|
||||
continue;
|
||||
};
|
||||
|
||||
let _ = set_profile_field(services, user_id, ProfileFieldChange::Set(value)).await;
|
||||
}
|
||||
|
||||
Some(BTreeMap::from_iter(response))
|
||||
}
|
||||
|
||||
async fn fetch_profile_field(
|
||||
services: &Services,
|
||||
user_id: &UserId,
|
||||
field: ProfileFieldName,
|
||||
) -> Result<Option<ProfileFieldValue>> {
|
||||
// If the user exists locally, fetch their local profile field
|
||||
if services.globals.user_is_local(user_id) {
|
||||
return Ok(get_local_profile_field(services, user_id, field).await);
|
||||
}
|
||||
|
||||
// Otherwise ask their homeserver
|
||||
let Ok(response) = services
|
||||
.sending
|
||||
.send_federation_request(
|
||||
user_id.server_name(),
|
||||
assign!(federation::query::get_profile_information::v1::Request::new(user_id.to_owned()), {
|
||||
field: Some(field.clone())
|
||||
}),
|
||||
)
|
||||
.await
|
||||
else {
|
||||
return Err!(Request(NotFound(
|
||||
"User's homeserver could not provide this profile field."
|
||||
)));
|
||||
};
|
||||
|
||||
if let Some(value) = response.get(field.as_str()).map(ToOwned::to_owned) {
|
||||
if let Ok(value) = ProfileFieldValue::new(field.as_str(), value) {
|
||||
let _ = set_profile_field(services, user_id, ProfileFieldChange::Set(value.clone()))
|
||||
.await;
|
||||
|
||||
Ok(Some(value))
|
||||
} else {
|
||||
Err!(Request(Unknown(
|
||||
"User's homeserver returned malformed data for this profile field."
|
||||
)))
|
||||
}
|
||||
} else {
|
||||
let _ = set_profile_field(services, user_id, ProfileFieldChange::Delete(field)).await;
|
||||
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn get_local_profile(
|
||||
services: &Services,
|
||||
user_id: &UserId,
|
||||
) -> BTreeMap<String, Value> {
|
||||
let mut profile = BTreeMap::new();
|
||||
|
||||
// Get displayname and avatar_url independently because `all_profile_keys`
|
||||
// doesn't include them
|
||||
for field in [ProfileFieldName::AvatarUrl, ProfileFieldName::DisplayName] {
|
||||
let key = field.as_str().to_owned();
|
||||
|
||||
if let Some(value) = get_local_profile_field(services, user_id, field).await {
|
||||
profile.insert(key, value.value().into_owned());
|
||||
}
|
||||
}
|
||||
|
||||
// Insert all other profile fields
|
||||
let mut all_fields = services.users.all_profile_keys(user_id);
|
||||
|
||||
while let Some((key, value)) = all_fields.next().await {
|
||||
profile.insert(key, value);
|
||||
}
|
||||
|
||||
profile
|
||||
}
|
||||
|
||||
pub(crate) async fn get_local_profile_field(
|
||||
services: &Services,
|
||||
user_id: &UserId,
|
||||
field: ProfileFieldName,
|
||||
) -> Option<ProfileFieldValue> {
|
||||
let value = match field.clone() {
|
||||
| ProfileFieldName::AvatarUrl => services
|
||||
.users
|
||||
.avatar_url(user_id)
|
||||
.await
|
||||
{
|
||||
if !services.users.exists(&body.user_id).await {
|
||||
services.users.create(&body.user_id, None, None).await?;
|
||||
}
|
||||
.ok()
|
||||
.map(to_value)
|
||||
.transpose()
|
||||
.expect("converting avatar url to value should succeed"),
|
||||
| ProfileFieldName::DisplayName => services
|
||||
.users
|
||||
.displayname(user_id)
|
||||
.await
|
||||
.ok()
|
||||
.map(to_value)
|
||||
.transpose()
|
||||
.expect("converting displayname to value should succeed"),
|
||||
| other => services
|
||||
.users
|
||||
.profile_key(user_id, other.as_str())
|
||||
.await
|
||||
.ok(),
|
||||
}?;
|
||||
|
||||
services
|
||||
.users
|
||||
.set_displayname(&body.user_id, response.displayname.clone());
|
||||
services
|
||||
.users
|
||||
.set_avatar_url(&body.user_id, response.avatar_url.clone());
|
||||
services
|
||||
.users
|
||||
.set_blurhash(&body.user_id, response.blurhash.clone());
|
||||
Some(
|
||||
ProfileFieldValue::new(field.as_str(), value)
|
||||
.expect("local profile field should be valid"),
|
||||
)
|
||||
}
|
||||
|
||||
for (profile_key, profile_key_value) in &response.custom_profile_fields {
|
||||
services.users.set_profile_key(
|
||||
&body.user_id,
|
||||
profile_key,
|
||||
Some(profile_key_value.clone()),
|
||||
enum ProfileFieldChange {
|
||||
Set(ProfileFieldValue),
|
||||
Delete(ProfileFieldName),
|
||||
}
|
||||
|
||||
impl ProfileFieldChange {
|
||||
fn field_name(&self) -> ProfileFieldName {
|
||||
match self {
|
||||
| &Self::Delete(ref name) => name.clone(),
|
||||
| &Self::Set(ref value) => value.field_name(),
|
||||
}
|
||||
}
|
||||
|
||||
fn value(&self) -> Option<Value> {
|
||||
if let Self::Set(value) = self {
|
||||
Some(value.value().into_owned())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn set_profile_field(
|
||||
services: &Services,
|
||||
user_id: &UserId,
|
||||
change: ProfileFieldChange,
|
||||
) -> Result<()> {
|
||||
const MAX_KEY_LENGTH_BYTES: usize = 255;
|
||||
const MAX_PROFILE_LENGTH_BYTES: usize = 65536;
|
||||
|
||||
let field_name = change.field_name();
|
||||
|
||||
// TODO: The spec mentions special error codes (M_PROFILE_TOO_LARGE,
|
||||
// M_KEY_TOO_LARGE) for profile field size limits, but they're not in its list
|
||||
// of error codes and Ruma doesn't have them. Should we return those, or is
|
||||
// M_TOO_LARGE okay?
|
||||
if field_name.as_str().len() > MAX_KEY_LENGTH_BYTES {
|
||||
return Err!(Request(TooLarge(
|
||||
"Individual profile keys must not exceed {MAX_KEY_LENGTH_BYTES} bytes in length."
|
||||
)));
|
||||
}
|
||||
|
||||
// Serialize the entire profile as canonical JSON, including the new change,
|
||||
// to check if it exceeds 64 KiB
|
||||
{
|
||||
let mut full_profile = get_local_profile(services, user_id).await;
|
||||
|
||||
match &change {
|
||||
| ProfileFieldChange::Set(value) => {
|
||||
full_profile.insert(
|
||||
value.field_name().as_str().to_owned(),
|
||||
value.value().clone().into_owned(),
|
||||
);
|
||||
},
|
||||
| ProfileFieldChange::Delete(key) => {
|
||||
full_profile.remove(key.as_str());
|
||||
},
|
||||
}
|
||||
|
||||
if let Ok(canonical_profile) = to_canonical_object(full_profile) {
|
||||
if serde_json::to_string(&canonical_profile)
|
||||
.expect("should be able to serialize to string")
|
||||
.len() > MAX_PROFILE_LENGTH_BYTES
|
||||
{
|
||||
return Err!(
|
||||
"Profile data must not exceed {MAX_PROFILE_LENGTH_BYTES} bytes in length."
|
||||
);
|
||||
}
|
||||
|
||||
return Ok(get_profile::v3::Response {
|
||||
displayname: response.displayname,
|
||||
avatar_url: response.avatar_url,
|
||||
blurhash: response.blurhash,
|
||||
custom_profile_fields: response.custom_profile_fields,
|
||||
});
|
||||
} else {
|
||||
return Err!(Request(BadJson("Failed to canonicalize profile.")));
|
||||
}
|
||||
}
|
||||
|
||||
if !services.users.exists(&body.user_id).await {
|
||||
// Return 404 if this user doesn't exist and we couldn't fetch it over
|
||||
// federation
|
||||
return Err!(Request(NotFound("Profile was not found.")));
|
||||
match change {
|
||||
| ProfileFieldChange::Set(ProfileFieldValue::DisplayName(displayname)) => {
|
||||
services
|
||||
.users
|
||||
.set_displayname(user_id, Some(displayname).filter(|dn| !dn.is_empty()));
|
||||
},
|
||||
| ProfileFieldChange::Set(ProfileFieldValue::AvatarUrl(avatar_url)) => {
|
||||
services
|
||||
.users
|
||||
.set_avatar_url(user_id, Some(avatar_url).filter(|av| av.is_valid()));
|
||||
},
|
||||
| ProfileFieldChange::Delete(ProfileFieldName::DisplayName) => {
|
||||
services.users.set_displayname(user_id, None);
|
||||
},
|
||||
| ProfileFieldChange::Delete(ProfileFieldName::AvatarUrl) => {
|
||||
services.users.set_avatar_url(user_id, None);
|
||||
},
|
||||
| other =>
|
||||
services
|
||||
.users
|
||||
.set_profile_key(user_id, other.field_name().as_str(), other.value()),
|
||||
}
|
||||
|
||||
let (avatar_url, blurhash, displayname, custom_profile_fields) = join4(
|
||||
services.users.avatar_url(&body.user_id).ok(),
|
||||
services.users.blurhash(&body.user_id).ok(),
|
||||
services.users.displayname(&body.user_id).ok(),
|
||||
services.users.all_profile_keys(&body.user_id).collect(),
|
||||
)
|
||||
.await;
|
||||
// If the user is local and changed their displayname or avatar_url, update it
|
||||
// in all their joined rooms
|
||||
if matches!(field_name, ProfileFieldName::AvatarUrl | ProfileFieldName::DisplayName)
|
||||
&& services.users.is_active_local(user_id).await
|
||||
{
|
||||
let displayname = services.users.displayname(user_id).await.ok();
|
||||
let avatar_url = services.users.avatar_url(user_id).await.ok();
|
||||
let membership_content = assign!(
|
||||
RoomMemberEventContent::new(MembershipState::Join), { displayname, avatar_url }
|
||||
);
|
||||
|
||||
Ok(get_profile::v3::Response {
|
||||
avatar_url,
|
||||
blurhash,
|
||||
displayname,
|
||||
custom_profile_fields,
|
||||
})
|
||||
}
|
||||
let mut all_joined_rooms = services.rooms.state_cache.rooms_joined(user_id);
|
||||
|
||||
pub async fn update_displayname(
|
||||
services: &Services,
|
||||
user_id: &UserId,
|
||||
displayname: Option<String>,
|
||||
all_joined_rooms: &[OwnedRoomId],
|
||||
) {
|
||||
let (current_avatar_url, current_blurhash, current_displayname) = join3(
|
||||
services.users.avatar_url(user_id).ok(),
|
||||
services.users.blurhash(user_id).ok(),
|
||||
services.users.displayname(user_id).ok(),
|
||||
)
|
||||
.await;
|
||||
while let Some(room_id) = all_joined_rooms.next().await {
|
||||
let state_lock = services.rooms.state.mutex.lock(room_id.as_str()).await;
|
||||
|
||||
if displayname == current_displayname {
|
||||
return;
|
||||
}
|
||||
let _ = services
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PartialPdu::state(user_id.to_string(), &membership_content),
|
||||
user_id,
|
||||
Some(&room_id),
|
||||
&state_lock,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
services.users.set_displayname(user_id, displayname.clone());
|
||||
|
||||
// Send a new join membership event into all joined rooms
|
||||
let avatar_url = ¤t_avatar_url;
|
||||
let blurhash = ¤t_blurhash;
|
||||
let displayname = &displayname;
|
||||
let all_joined_rooms: Vec<_> = all_joined_rooms
|
||||
.iter()
|
||||
.try_stream()
|
||||
.and_then(|room_id: &OwnedRoomId| async move {
|
||||
let pdu = PduBuilder::state(user_id.to_string(), &RoomMemberEventContent {
|
||||
displayname: displayname.clone(),
|
||||
membership: MembershipState::Join,
|
||||
avatar_url: avatar_url.clone(),
|
||||
blurhash: blurhash.clone(),
|
||||
join_authorized_via_users_server: None,
|
||||
reason: None,
|
||||
is_direct: None,
|
||||
third_party_invite: None,
|
||||
redact_events: None,
|
||||
});
|
||||
|
||||
Ok((pdu, room_id))
|
||||
})
|
||||
.ignore_err()
|
||||
.collect()
|
||||
.await;
|
||||
|
||||
update_all_rooms(services, all_joined_rooms, user_id)
|
||||
.boxed()
|
||||
.await;
|
||||
}
|
||||
|
||||
pub async fn update_avatar_url(
|
||||
services: &Services,
|
||||
user_id: &UserId,
|
||||
avatar_url: Option<OwnedMxcUri>,
|
||||
blurhash: Option<String>,
|
||||
all_joined_rooms: &[OwnedRoomId],
|
||||
) {
|
||||
let (current_avatar_url, current_blurhash, current_displayname) = join3(
|
||||
services.users.avatar_url(user_id).ok(),
|
||||
services.users.blurhash(user_id).ok(),
|
||||
services.users.displayname(user_id).ok(),
|
||||
)
|
||||
.await;
|
||||
|
||||
if current_avatar_url == avatar_url && current_blurhash == blurhash {
|
||||
return;
|
||||
}
|
||||
|
||||
services.users.set_avatar_url(user_id, avatar_url.clone());
|
||||
services.users.set_blurhash(user_id, blurhash.clone());
|
||||
|
||||
// Send a new join membership event into all joined rooms
|
||||
let avatar_url = &avatar_url;
|
||||
let blurhash = &blurhash;
|
||||
let displayname = ¤t_displayname;
|
||||
let all_joined_rooms: Vec<_> = all_joined_rooms
|
||||
.iter()
|
||||
.try_stream()
|
||||
.and_then(|room_id: &OwnedRoomId| async move {
|
||||
let pdu = PduBuilder::state(user_id.to_string(), &RoomMemberEventContent {
|
||||
avatar_url: avatar_url.clone(),
|
||||
blurhash: blurhash.clone(),
|
||||
membership: MembershipState::Join,
|
||||
displayname: displayname.clone(),
|
||||
join_authorized_via_users_server: None,
|
||||
reason: None,
|
||||
is_direct: None,
|
||||
third_party_invite: None,
|
||||
redact_events: None,
|
||||
});
|
||||
|
||||
Ok((pdu, room_id))
|
||||
})
|
||||
.ignore_err()
|
||||
.collect()
|
||||
.await;
|
||||
|
||||
update_all_rooms(services, all_joined_rooms, user_id)
|
||||
.boxed()
|
||||
.await;
|
||||
}
|
||||
|
||||
pub async fn update_all_rooms(
|
||||
services: &Services,
|
||||
all_joined_rooms: Vec<(PduBuilder, &OwnedRoomId)>,
|
||||
user_id: &UserId,
|
||||
) {
|
||||
for (pdu_builder, room_id) in all_joined_rooms {
|
||||
let state_lock = services.rooms.state.mutex.lock(room_id).await;
|
||||
if let Err(e) = services
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(pdu_builder, user_id, Some(room_id), &state_lock)
|
||||
.await
|
||||
{
|
||||
warn!(%user_id, %room_id, "Failed to update/send new profile join membership update in room: {e}");
|
||||
if services.config.allow_local_presence {
|
||||
// Send a presence EDU to indicate the profile changed
|
||||
let _ = services
|
||||
.presence
|
||||
.ping_presence(user_id, &PresenceState::Online)
|
||||
.await;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
+36
-42
@@ -3,13 +3,13 @@
|
||||
use conduwuit_service::Services;
|
||||
use ruma::{
|
||||
CanonicalJsonObject, CanonicalJsonValue,
|
||||
api::client::{
|
||||
error::ErrorKind,
|
||||
push::{
|
||||
api::{
|
||||
client::push::{
|
||||
delete_pushrule, get_pushers, get_pushrule, get_pushrule_actions,
|
||||
get_pushrule_enabled, get_pushrules_all, get_pushrules_global_scope, set_pusher,
|
||||
set_pushrule, set_pushrule_actions, set_pushrule_enabled,
|
||||
},
|
||||
error::ErrorKind,
|
||||
},
|
||||
events::{
|
||||
GlobalAccountDataEventType,
|
||||
@@ -80,9 +80,7 @@ pub(crate) async fn get_pushrules_all_route(
|
||||
global_ruleset.update_with_server_default(Ruleset::server_default(sender_user));
|
||||
|
||||
let ty = GlobalAccountDataEventType::PushRules;
|
||||
let event = PushRulesEvent {
|
||||
content: PushRulesEventContent { global: global_ruleset.clone() },
|
||||
};
|
||||
let event = PushRulesEvent::new(PushRulesEventContent::new(global_ruleset.clone()));
|
||||
|
||||
services
|
||||
.account_data
|
||||
@@ -91,7 +89,7 @@ pub(crate) async fn get_pushrules_all_route(
|
||||
}
|
||||
};
|
||||
|
||||
Ok(get_pushrules_all::v3::Response { global: global_ruleset })
|
||||
Ok(get_pushrules_all::v3::Response::new(global_ruleset))
|
||||
}
|
||||
|
||||
/// # `GET /_matrix/client/r0/pushrules/global/`
|
||||
@@ -116,21 +114,20 @@ pub(crate) async fn get_pushrules_global_route(
|
||||
// user somehow has non-existent push rule event. recreate it and return server
|
||||
// default silently
|
||||
|
||||
let ty = GlobalAccountDataEventType::PushRules;
|
||||
let event = PushRulesEvent {
|
||||
content: PushRulesEventContent {
|
||||
global: Ruleset::server_default(sender_user),
|
||||
},
|
||||
};
|
||||
let global_ruleset = Ruleset::server_default(sender_user);
|
||||
let event = PushRulesEvent::new(PushRulesEventContent::new(global_ruleset.clone()));
|
||||
|
||||
services
|
||||
.account_data
|
||||
.update(None, sender_user, ty.to_string().into(), &serde_json::to_value(event)?)
|
||||
.update(
|
||||
None,
|
||||
sender_user,
|
||||
GlobalAccountDataEventType::PushRules.to_string().into(),
|
||||
&serde_json::to_value(event)?,
|
||||
)
|
||||
.await?;
|
||||
|
||||
return Ok(get_pushrules_global_scope::v3::Response {
|
||||
global: Ruleset::server_default(sender_user),
|
||||
});
|
||||
return Ok(get_pushrules_global_scope::v3::Response::new(global_ruleset));
|
||||
};
|
||||
|
||||
let account_data_content =
|
||||
@@ -173,16 +170,16 @@ pub(crate) async fn get_pushrules_global_route(
|
||||
None,
|
||||
sender_user,
|
||||
GlobalAccountDataEventType::PushRules.to_string().into(),
|
||||
&serde_json::to_value(PushRulesEvent {
|
||||
content: PushRulesEventContent { global: global_ruleset.clone() },
|
||||
})
|
||||
&serde_json::to_value(PushRulesEvent::new(PushRulesEventContent::new(
|
||||
global_ruleset.clone(),
|
||||
)))
|
||||
.expect("to json always works"),
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
};
|
||||
|
||||
Ok(get_pushrules_global_scope::v3::Response { global: global_ruleset })
|
||||
Ok(get_pushrules_global_scope::v3::Response::new(global_ruleset))
|
||||
}
|
||||
|
||||
/// # `GET /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}`
|
||||
@@ -216,7 +213,7 @@ pub(crate) async fn get_pushrule_route(
|
||||
.map(Into::into);
|
||||
|
||||
if let Some(rule) = rule {
|
||||
Ok(get_pushrule::v3::Response { rule })
|
||||
Ok(get_pushrule::v3::Response::new(rule))
|
||||
} else {
|
||||
Err!(Request(NotFound("Push rule not found.")))
|
||||
}
|
||||
@@ -275,7 +272,7 @@ pub(crate) async fn set_pushrule_route(
|
||||
.update(None, sender_user, ty.to_string().into(), &serde_json::to_value(account_data)?)
|
||||
.await?;
|
||||
|
||||
Ok(set_pushrule::v3::Response {})
|
||||
Ok(set_pushrule::v3::Response::new())
|
||||
}
|
||||
|
||||
/// # `GET /_matrix/client/r0/pushrules/global/{kind}/{ruleId}/actions`
|
||||
@@ -309,7 +306,7 @@ pub(crate) async fn get_pushrule_actions_route(
|
||||
.map(|rule| rule.actions().to_owned())
|
||||
.ok_or_else(|| err!(Request(NotFound("Push rule not found."))))?;
|
||||
|
||||
Ok(get_pushrule_actions::v3::Response { actions })
|
||||
Ok(get_pushrule_actions::v3::Response::new(actions))
|
||||
}
|
||||
|
||||
/// # `PUT /_matrix/client/r0/pushrules/global/{kind}/{ruleId}/actions`
|
||||
@@ -342,7 +339,7 @@ pub(crate) async fn set_pushrule_actions_route(
|
||||
.update(None, sender_user, ty.to_string().into(), &serde_json::to_value(account_data)?)
|
||||
.await?;
|
||||
|
||||
Ok(set_pushrule_actions::v3::Response {})
|
||||
Ok(set_pushrule_actions::v3::Response::new())
|
||||
}
|
||||
|
||||
/// # `GET /_matrix/client/r0/pushrules/global/{kind}/{ruleId}/enabled`
|
||||
@@ -360,7 +357,7 @@ pub(crate) async fn get_pushrule_enabled_route(
|
||||
|| body.rule_id.as_str() == PredefinedOverrideRuleId::ContainsDisplayName.as_str()
|
||||
|| body.rule_id.as_str() == PredefinedOverrideRuleId::RoomNotif.as_str()
|
||||
{
|
||||
return Ok(get_pushrule_enabled::v3::Response { enabled: false });
|
||||
return Ok(get_pushrule_enabled::v3::Response::new(false));
|
||||
}
|
||||
|
||||
let event: PushRulesEvent = services
|
||||
@@ -376,7 +373,7 @@ pub(crate) async fn get_pushrule_enabled_route(
|
||||
.map(ruma::push::AnyPushRuleRef::enabled)
|
||||
.ok_or_else(|| err!(Request(NotFound("Push rule not found."))))?;
|
||||
|
||||
Ok(get_pushrule_enabled::v3::Response { enabled })
|
||||
Ok(get_pushrule_enabled::v3::Response::new(enabled))
|
||||
}
|
||||
|
||||
/// # `PUT /_matrix/client/r0/pushrules/global/{kind}/{ruleId}/enabled`
|
||||
@@ -409,7 +406,7 @@ pub(crate) async fn set_pushrule_enabled_route(
|
||||
.update(None, sender_user, ty.to_string().into(), &serde_json::to_value(account_data)?)
|
||||
.await?;
|
||||
|
||||
Ok(set_pushrule_enabled::v3::Response {})
|
||||
Ok(set_pushrule_enabled::v3::Response::new())
|
||||
}
|
||||
|
||||
/// # `DELETE /_matrix/client/r0/pushrules/global/{kind}/{ruleId}`
|
||||
@@ -451,7 +448,7 @@ pub(crate) async fn delete_pushrule_route(
|
||||
.update(None, sender_user, ty.to_string().into(), &serde_json::to_value(account_data)?)
|
||||
.await?;
|
||||
|
||||
Ok(delete_pushrule::v3::Response {})
|
||||
Ok(delete_pushrule::v3::Response::new())
|
||||
}
|
||||
|
||||
/// # `GET /_matrix/client/r0/pushers`
|
||||
@@ -463,9 +460,7 @@ pub(crate) async fn get_pushers_route(
|
||||
) -> Result<get_pushers::v3::Response> {
|
||||
let sender_user = body.sender_user();
|
||||
|
||||
Ok(get_pushers::v3::Response {
|
||||
pushers: services.pusher.get_pushers(sender_user).await,
|
||||
})
|
||||
Ok(get_pushers::v3::Response::new(services.pusher.get_pushers(sender_user).await))
|
||||
}
|
||||
|
||||
/// # `POST /_matrix/client/r0/pushers/set`
|
||||
@@ -493,19 +488,18 @@ pub async fn recreate_push_rules_and_return(
|
||||
services: &Services,
|
||||
sender_user: &ruma::UserId,
|
||||
) -> Result<get_pushrules_all::v3::Response> {
|
||||
let ty = GlobalAccountDataEventType::PushRules;
|
||||
let event = PushRulesEvent {
|
||||
content: PushRulesEventContent {
|
||||
global: Ruleset::server_default(sender_user),
|
||||
},
|
||||
};
|
||||
let global_ruleset = Ruleset::server_default(sender_user);
|
||||
let event = PushRulesEvent::new(PushRulesEventContent::new(global_ruleset.clone()));
|
||||
|
||||
services
|
||||
.account_data
|
||||
.update(None, sender_user, ty.to_string().into(), &serde_json::to_value(event)?)
|
||||
.update(
|
||||
None,
|
||||
sender_user,
|
||||
GlobalAccountDataEventType::PushRules.to_string().into(),
|
||||
&serde_json::to_value(event)?,
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(get_pushrules_all::v3::Response {
|
||||
global: Ruleset::server_default(sender_user),
|
||||
})
|
||||
Ok(get_pushrules_all::v3::Response::new(global_ruleset))
|
||||
}
|
||||
|
||||
@@ -8,7 +8,8 @@
|
||||
api::client::{read_marker::set_read_marker, receipt::create_receipt},
|
||||
events::{
|
||||
RoomAccountDataEventType,
|
||||
receipt::{ReceiptThread, ReceiptType},
|
||||
fully_read::{FullyReadEvent, FullyReadEventContent},
|
||||
receipt::{Receipt, ReceiptEvent, ReceiptEventContent, ReceiptType},
|
||||
},
|
||||
};
|
||||
|
||||
@@ -28,9 +29,7 @@ pub(crate) async fn set_read_marker_route(
|
||||
let sender_user = body.sender_user();
|
||||
|
||||
if let Some(event) = &body.fully_read {
|
||||
let fully_read_event = ruma::events::fully_read::FullyReadEvent {
|
||||
content: ruma::events::fully_read::FullyReadEventContent { event_id: event.clone() },
|
||||
};
|
||||
let fully_read_event = FullyReadEvent::new(FullyReadEventContent::new(event.to_owned()));
|
||||
|
||||
services
|
||||
.account_data
|
||||
@@ -62,19 +61,16 @@ pub(crate) async fn set_read_marker_route(
|
||||
if services.config.allow_local_read_receipts
|
||||
&& !services.users.is_suspended(sender_user).await?
|
||||
{
|
||||
let receipt_content = BTreeMap::from_iter([(
|
||||
let receipt_content = [(
|
||||
event.to_owned(),
|
||||
BTreeMap::from_iter([(
|
||||
ReceiptType::Read,
|
||||
BTreeMap::from_iter([(
|
||||
sender_user.to_owned(),
|
||||
ruma::events::receipt::Receipt {
|
||||
ts: Some(MilliSecondsSinceUnixEpoch::now()),
|
||||
thread: ReceiptThread::Unthreaded,
|
||||
},
|
||||
Receipt::new(MilliSecondsSinceUnixEpoch::now()),
|
||||
)]),
|
||||
)]),
|
||||
)]);
|
||||
)];
|
||||
|
||||
services
|
||||
.rooms
|
||||
@@ -82,10 +78,10 @@ pub(crate) async fn set_read_marker_route(
|
||||
.readreceipt_update(
|
||||
sender_user,
|
||||
&body.room_id,
|
||||
&ruma::events::receipt::ReceiptEvent {
|
||||
content: ruma::events::receipt::ReceiptEventContent(receipt_content),
|
||||
room_id: body.room_id.clone(),
|
||||
},
|
||||
&ReceiptEvent::new(
|
||||
body.room_id.clone(),
|
||||
ReceiptEventContent::from_iter(receipt_content),
|
||||
),
|
||||
)
|
||||
.await;
|
||||
}
|
||||
@@ -111,7 +107,7 @@ pub(crate) async fn set_read_marker_route(
|
||||
.private_read_set(&body.room_id, sender_user, count);
|
||||
}
|
||||
|
||||
Ok(set_read_marker::v3::Response {})
|
||||
Ok(set_read_marker::v3::Response::new())
|
||||
}
|
||||
|
||||
/// # `POST /_matrix/client/r0/rooms/{roomId}/receipt/{receiptType}/{eventId}`
|
||||
@@ -148,11 +144,8 @@ pub(crate) async fn create_receipt_route(
|
||||
|
||||
match body.receipt_type {
|
||||
| create_receipt::v3::ReceiptType::FullyRead => {
|
||||
let fully_read_event = ruma::events::fully_read::FullyReadEvent {
|
||||
content: ruma::events::fully_read::FullyReadEventContent {
|
||||
event_id: body.event_id.clone(),
|
||||
},
|
||||
};
|
||||
let fully_read_event =
|
||||
FullyReadEvent::new(FullyReadEventContent::new(body.event_id.clone()));
|
||||
services
|
||||
.account_data
|
||||
.update(
|
||||
@@ -164,19 +157,16 @@ pub(crate) async fn create_receipt_route(
|
||||
.await?;
|
||||
},
|
||||
| create_receipt::v3::ReceiptType::Read => {
|
||||
let receipt_content = BTreeMap::from_iter([(
|
||||
let receipt_content = [(
|
||||
body.event_id.clone(),
|
||||
BTreeMap::from_iter([(
|
||||
ReceiptType::Read,
|
||||
BTreeMap::from_iter([(
|
||||
sender_user.to_owned(),
|
||||
ruma::events::receipt::Receipt {
|
||||
ts: Some(MilliSecondsSinceUnixEpoch::now()),
|
||||
thread: ReceiptThread::Unthreaded,
|
||||
},
|
||||
Receipt::new(MilliSecondsSinceUnixEpoch::now()),
|
||||
)]),
|
||||
)]),
|
||||
)]);
|
||||
)];
|
||||
|
||||
services
|
||||
.rooms
|
||||
@@ -184,10 +174,10 @@ pub(crate) async fn create_receipt_route(
|
||||
.readreceipt_update(
|
||||
sender_user,
|
||||
&body.room_id,
|
||||
&ruma::events::receipt::ReceiptEvent {
|
||||
content: ruma::events::receipt::ReceiptEventContent(receipt_content),
|
||||
room_id: body.room_id.clone(),
|
||||
},
|
||||
&ReceiptEvent::new(
|
||||
body.room_id.clone(),
|
||||
ReceiptEventContent::from_iter(receipt_content),
|
||||
),
|
||||
)
|
||||
.await;
|
||||
},
|
||||
@@ -218,5 +208,5 @@ pub(crate) async fn create_receipt_route(
|
||||
},
|
||||
}
|
||||
|
||||
Ok(create_receipt::v3::Response {})
|
||||
Ok(create_receipt::v3::Response::new())
|
||||
}
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
use axum::extract::State;
|
||||
use axum_client_ip::ClientIp;
|
||||
use conduwuit::{Err, Result, matrix::pdu::PduBuilder};
|
||||
use conduwuit::{Err, Result, matrix::pdu::PartialPdu};
|
||||
use ruma::{
|
||||
api::client::redact::redact_event, events::room::redaction::RoomRedactionEventContent,
|
||||
api::client::redact::redact_event, assign, events::room::redaction::RoomRedactionEventContent,
|
||||
};
|
||||
|
||||
use crate::Ruma;
|
||||
@@ -28,18 +28,19 @@ pub(crate) async fn redact_event_route(
|
||||
return Err!(Request(UserSuspended("You cannot perform this action while suspended.")));
|
||||
}
|
||||
|
||||
let state_lock = services.rooms.state.mutex.lock(&body.room_id).await;
|
||||
let state_lock = services.rooms.state.mutex.lock(body.room_id.as_str()).await;
|
||||
|
||||
let event_id = services
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
PartialPdu {
|
||||
redacts: Some(body.event_id.clone()),
|
||||
..PduBuilder::timeline(&RoomRedactionEventContent {
|
||||
redacts: Some(body.event_id.clone()),
|
||||
reason: body.reason.clone(),
|
||||
})
|
||||
..PartialPdu::timeline(
|
||||
&assign!(RoomRedactionEventContent::new_v11(body.event_id.clone()), {
|
||||
reason: body.reason.clone()
|
||||
}),
|
||||
)
|
||||
},
|
||||
sender_user,
|
||||
Some(&body.room_id),
|
||||
@@ -49,5 +50,5 @@ pub(crate) async fn redact_event_route(
|
||||
|
||||
drop(state_lock);
|
||||
|
||||
Ok(redact_event::v3::Response { event_id })
|
||||
Ok(redact_event::v3::Response::new(event_id))
|
||||
}
|
||||
|
||||
+15
-13
@@ -15,6 +15,7 @@
|
||||
get_relating_events_with_rel_type_and_event_type,
|
||||
},
|
||||
},
|
||||
assign,
|
||||
events::{TimelineEventType, relation::RelationType},
|
||||
};
|
||||
|
||||
@@ -39,11 +40,12 @@ pub(crate) async fn get_relating_events_with_rel_type_and_event_type_route(
|
||||
body.dir,
|
||||
)
|
||||
.await
|
||||
.map(|res| get_relating_events_with_rel_type_and_event_type::v1::Response {
|
||||
chunk: res.chunk,
|
||||
next_batch: res.next_batch,
|
||||
prev_batch: res.prev_batch,
|
||||
recursion_depth: res.recursion_depth,
|
||||
.map(|res| {
|
||||
assign!(get_relating_events_with_rel_type_and_event_type::v1::Response::new(res.chunk), {
|
||||
next_batch: res.next_batch,
|
||||
prev_batch: res.prev_batch,
|
||||
recursion_depth: res.recursion_depth,
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
@@ -66,11 +68,12 @@ pub(crate) async fn get_relating_events_with_rel_type_route(
|
||||
body.dir,
|
||||
)
|
||||
.await
|
||||
.map(|res| get_relating_events_with_rel_type::v1::Response {
|
||||
chunk: res.chunk,
|
||||
next_batch: res.next_batch,
|
||||
prev_batch: res.prev_batch,
|
||||
recursion_depth: res.recursion_depth,
|
||||
.map(|res| {
|
||||
assign!(get_relating_events_with_rel_type::v1::Response::new(res.chunk), {
|
||||
next_batch: res.next_batch,
|
||||
prev_batch: res.prev_batch,
|
||||
recursion_depth: res.recursion_depth,
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
@@ -201,12 +204,11 @@ async fn paginate_relations_with_filter(
|
||||
.map(Event::into_format)
|
||||
.collect();
|
||||
|
||||
Ok(get_relating_events::v1::Response {
|
||||
Ok(assign!(get_relating_events::v1::Response::new(chunk), {
|
||||
next_batch,
|
||||
prev_batch: from.map(Into::into),
|
||||
recursion_depth: recurse.then_some(depth.into()),
|
||||
chunk,
|
||||
})
|
||||
}))
|
||||
}
|
||||
|
||||
async fn visibility_filter<Pdu: Event + Send + Sync>(
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user