Compare commits

...

84 Commits

Author SHA1 Message Date
Jacob Taylor 94271e9e43 feat: Delete blurhash 2026-05-13 07:44:19 -07:00
Jacob Taylor f5f262506f feat: Merge nex/feat/rejected-events 2026-05-13 07:44:19 -07:00
Jacob Taylor 9ee5a57e0f chore: Tame clippy 2026-05-13 07:44:19 -07:00
Jacob Taylor ce55274fac feat: Merge ginger/oauth 2026-05-13 07:44:19 -07:00
Jacob Taylor cadf17475e fix: Clippy allows 2026-05-13 07:44:19 -07:00
Jacob Taylor 273d629e12 chore: Clippy fixes 2026-05-13 07:44:19 -07:00
Jacob Taylor 24edb0703a fix: Clear All Clippy Lints 2026-05-13 07:44:19 -07:00
Jacob Taylor f93ebac607 fix: Pre-Commit Lint Compliance Maneuver 2026-05-13 07:44:19 -07:00
timedout 1b5987cdc4 fix: Write-lock individual rooms when building sync for them 2026-05-13 07:44:19 -07:00
Jacob Taylor a6c8996e92 fix warn 2026-05-13 07:44:19 -07:00
Joe Citrine 09d8aab93b fix(resolver): port parser does not handle leading ':'
conditionally trim the leading colon
2026-05-13 07:44:19 -07:00
Jacob Taylor 98f889a9da upgrade some logs to info 2026-05-13 07:44:19 -07:00
Jade Ellis a26ea2ab7a fix: Use to_str methods on room IDs 2026-05-13 07:44:19 -07:00
Jade Ellis 2223b51b02 chore: cleanup 2026-05-13 07:44:19 -07:00
Jade Ellis f096a338a3 chore: Fix more complicated clippy warnings 2026-05-13 07:44:18 -07:00
Jade Ellis eac4b8a16a feat: Add command to purge sync tokens for empty rooms 2026-05-13 07:44:18 -07:00
Jade Ellis 1051568789 feat: Add admin command to delete sync tokens from a room 2026-05-13 07:44:18 -07:00
Jacob Taylor d39e4b6a21 exponential backoff is now just bees. did you want bees? no? well you have them now. congrats 2026-05-13 07:44:18 -07:00
Jacob Taylor 72fcc7bf61 sender_workers scaling. this time, with feeling! 2026-05-13 07:44:18 -07:00
Jacob Taylor 58c662f1d3 log when there are zero extremities 2026-05-13 07:44:18 -07:00
Jacob Taylor d60bdeee53 enable converged 6g at the edge in continuwuity 2026-05-13 07:44:18 -07:00
Jacob Taylor 9a934ac039 bump the number of allowed immutable memtables by 1, to allow for greater flood protection
this should probably not be applied if you have rocksdb_atomic_flush = false (the default)
2026-05-13 07:44:18 -07:00
Ginger 6f83925a4f fix: Use correct service name in membership service 2026-05-13 08:53:15 -04:00
31a05b9c e349dd284f chore: changelog 2026-05-12 14:18:35 +00:00
31a05b9c c57fe66d8d fix: query account-data account-data-get output 2026-05-12 14:18:35 +00:00
Ginger ff28fd0927 fix: Disable debugging parameter in Askama template 2026-05-12 14:17:23 +00:00
Ginger 7307f2dc80 fix: Remove deprecated MatrixRTC focus config option 2026-05-12 14:17:23 +00:00
Renovate Bot 6f56b665e7 chore(deps): update ruma digest to 9c9dccc 2026-05-12 14:17:23 +00:00
Renovate Bot 7018ce4180 chore(deps): update node-patch-updates to v2.0.11 2026-05-12 05:02:17 +00:00
timedout 10dd8bebfe fix: Don't advertise stable MSC2666
Turns out ruma doesn't have the stable definition yet, will need a version bump.

Reverts 088fa3e725
2026-05-11 23:27:00 +01:00
Henry-Hiles 1658b3bf6c chore: Add changelog 2026-05-10 03:48:25 +00:00
Henry-Hiles 088fa3e725 fix: Use stable ID for Mutual Rooms support 2026-05-10 03:48:25 +00:00
Henry-Hiles 4694186c97 fix: Link to community guidelines in CoC file 2026-05-09 16:53:03 -04:00
Renovate Bot a5c61d5137 chore(deps): update pre-commit hook crate-ci/typos to v1.46.1 2026-05-09 05:03:19 +00:00
Ginger 39a882c4a1 chore: Clippy fixes 2026-05-08 12:41:57 -04:00
Ginger f091d3a732 fix: Correctly check for local users' existence 2026-05-08 11:48:20 -04:00
nex ebf9a08cd1 fix: Correct typo that prevented state compressor service being loaded 2026-05-08 03:10:28 +00:00
timedout 4fef0a7ff2 chore: Publish admin announcement 2026-05-08 00:20:23 +01:00
timedout 2f37b446bc chore: Bump version 2026-05-07 22:27:44 +01:00
timedout 6185841b6a fix: Restore event auth check 4 in v12 rooms
Reviewed-By: Jacob Taylor <jacob@explodie.org>
2026-05-07 21:10:32 +01:00
Renovate Bot 3e0d4b066e chore(deps): update dependency cargo-bins/cargo-binstall to v1.19.1 2026-05-07 16:10:45 +00:00
Ginger 0d2eeed567 refactor: Move room joining logic into a new service 2026-05-06 14:01:50 -04:00
31a05b9c b296720540 chore: alter wording 2026-05-06 17:25:45 +00:00
31a05b9c d600aed8db chore: changelog 2026-05-06 17:25:45 +00:00
31a05b9c 9724953b5e feat: admin commands for mass-rejecting invites 2026-05-06 17:25:45 +00:00
stratself 1605176956 docs(perf): Improve introduction wording 2026-05-06 09:03:14 +00:00
stratself 2b0aedf5fd chore: Fix changelog newline 2026-05-06 09:03:14 +00:00
stratself c78c431703 fix(docs): Small wording fixes in perf tuning page 2026-05-06 09:03:13 +00:00
stratself 49b48b857d docs(perf): Change compression warning to SHOULD and better notes on
trusted_servers verification
2026-05-06 09:03:13 +00:00
stratself bf1e42b225 docs(perf): Remove section on .well-known
It is not a clear performance gain, and should be added
later in delegation.mdx
2026-05-06 09:03:13 +00:00
stratself ec76a234db docs(perf): Various changes from feedback
* Only recommend turning off all presences
* Add example trusted_servers and notice to vet them
* Add explained benefits of HTTP/3
* Some grammar nits
2026-05-06 09:03:13 +00:00
stratself 091514e9f9 docs(perf): Grammar/wording edits from feedback
Also combined the introductory paragraphs into one
2026-05-06 09:03:13 +00:00
stratself 789ad499f7 fix: Grammar + wording for perftuning page from feedback 2026-05-06 09:03:13 +00:00
stratself 1e6eaa4337 fix(docs): Remove sysctl and limits.conf recs from perf tuning 2026-05-06 09:03:13 +00:00
stratself de97900b07 chore(docs): Add performance tuning navigation 2026-05-06 09:03:13 +00:00
stratself cb68a3d0ae docs(perf): Rewrite notary tuning stuff and add section on HTTP/3
Some more wording fixes too
2026-05-06 09:03:12 +00:00
stratself d3852abe51 docs(perf): Add .well-known, nofiles, and sysctl recs + some copyedits 2026-05-06 09:03:12 +00:00
stratself 15845b1c55 chore: Add changelog for #1498 2026-05-06 09:03:12 +00:00
stratself f7d558baa6 docs(perf-tuning): Add notaries batch size advise + copyedits from comments
performance.md is now performance.mdx
2026-05-06 09:03:11 +00:00
stratself edd80b2600 docs(perf-tuning): Add bottommost compression section and other changes
* docs: Move typing/readmarks disabling section up, and separate
  outbound sending disablement from total disablement of features
* docs: Change introduction tone to "get more out of your server",
  remove wrong notion of tuned for small instances
2026-05-06 09:03:11 +00:00
stratself 03eab32c27 docs: Add initial performance tuning guidance 2026-05-06 09:03:10 +00:00
Ian Wagner 636de8a708 docs: Add link to matrix.org federation tester 2026-05-06 07:30:27 +00:00
Ginger e212c91ebf fix: Address review comments 2026-05-05 13:35:35 -04:00
Ginger 83f3314f08 chore: News fragment 2026-05-05 09:10:51 -04:00
Ginger 8c2cf67783 refactor: Remove support for guest user registration 2026-05-05 09:09:48 -04:00
Ginger 7436e2f4e1 chore: Update admin command docs 2026-05-05 09:05:43 -04:00
timedout 9ba406761b chore: Regenerate example config 2026-05-04 21:14:22 +01:00
nex 97f49d6357 chore: Remove news fragment checkbox from PR template
Requiring them is now handled by an action
2026-05-04 20:06:40 +00:00
new-years-eve 1a49bc6f87 docs: Add changelog 2026-05-04 20:05:26 +00:00
new-years-eve 833216256b feat: Add support for fallback keys
Fallback keys can be provided by client devices to be used in case the
supply of one-time keys run out. The server will store one fallback key
per user, per device, per algorithm. The server will keep track of
whether this fallback key has been used or not.

The  /keys/claim endpoint now provides a fallback key
if no one-time key is found

The /keys/upload endpoint now accepts fallback keys

The /sync endpoint now informs the client of the algorithms for which it
has an unused fallback key in stock.
2026-05-04 20:05:26 +00:00
new-years-eve 5fa3087401 feat: Implement serialization/deserialization for booleans 2026-05-04 20:05:26 +00:00
Ginger e95c0bd53f chore: News fragment 2026-05-04 11:28:11 -04:00
Ginger 52d1ed24a9 refactor: Remove LDAP support 2026-05-04 11:27:47 -04:00
timedout 4c1638e495 style: Resolve end-of-line lint failure 2026-05-04 14:57:21 +01:00
grgergo 3f69cf8ed7 chore: update example config 2026-05-04 14:57:21 +01:00
grgergo 560a615c29 chore: news fragment for #1706 2026-05-04 14:57:21 +01:00
grgergo 2e19310a87 clarify max_request_size limiting federation
Signed-off-by: grgergo <csakbek@freemail.hu>
2026-05-04 14:57:21 +01:00
Renovate Bot 81c5c6b2bc chore(deps): update sentry-rust monorepo to 0.48.0 2026-05-03 14:41:05 +00:00
Renovate Bot 73d8462ace chore(deps): update rust crate askama to 0.16.0 2026-05-03 14:40:44 +00:00
Renovate Bot 8b5fda1fb5 chore(deps): lock file maintenance 2026-05-03 14:40:16 +00:00
Ginger 6f9b4a989e fix: Update ctor macro arguments 2026-05-03 14:39:30 +00:00
Renovate Bot fe0d83d447 chore(deps): update rust crate ctor to 0.13.0 2026-05-03 14:39:30 +00:00
Renovate Bot 37dccdbeb0 chore(deps): update https://github.com/taiki-e/install-action digest to b5fddbb 2026-05-03 12:30:32 +00:00
Renovate Bot 1060adc670 chore(deps): update dependency cargo-bins/cargo-binstall to v1.19.0 2026-05-03 05:04:01 +00:00
198 changed files with 8563 additions and 4393 deletions
+1 -1
View File
@@ -71,7 +71,7 @@ runs:
- name: Install timelord-cli and git-warp-time
if: steps.check-binaries.outputs.need-install == 'true'
uses: https://github.com/taiki-e/install-action@787505cde8a44ea468a00478fe52baf23b15bccd # v2
uses: https://github.com/taiki-e/install-action@b5fddbb5361bce8a06fb168c9d403a6cc552b084 # v2
with:
tool: git-warp-time,timelord-cli@3.0.1
-2
View File
@@ -45,7 +45,6 @@
- [ ] I have [tested my contribution][c1t] (or proof-read it for documentation-only changes)
myself, if applicable. This includes ensuring code compiles.
- [ ] My commit messages follow the [commit message format][c1cm] and are descriptive.
- [ ] I have written a [news fragment][n1] for this PR, if applicable<!--(can be done after hitting open!)-->.
<!--
Notes on these requirements:
@@ -79,4 +78,3 @@
[c1pc]: https://forgejo.ellis.link/continuwuation/continuwuity/src/branch/main/CONTRIBUTING.md#pre-commit-checks
[c1t]: https://forgejo.ellis.link/continuwuation/continuwuity/src/branch/main/CONTRIBUTING.md#running-tests-locally
[c1cm]: https://forgejo.ellis.link/continuwuation/continuwuity/src/branch/main/CONTRIBUTING.md#commit-messages
[n1]: https://towncrier.readthedocs.io/en/stable/tutorial.html#creating-news-fragments
+1 -1
View File
@@ -24,7 +24,7 @@ repos:
- id: check-added-large-files
- repo: https://github.com/crate-ci/typos
rev: v1.46.0
rev: v1.46.1
hooks:
- id: typos
- id: typos
+1 -1
View File
@@ -1 +1 @@
Contributors are expected to follow the [Continuwuity Community Guidelines](continuwuity.org/community/guidelines).
Contributors are expected to follow the [Continuwuity Community Guidelines](https://continuwuity.org/community/guidelines).
Generated
+276 -756
View File
File diff suppressed because it is too large Load Diff
+14 -22
View File
@@ -12,7 +12,7 @@ license = "Apache-2.0"
# See also `rust-toolchain.toml`
readme = "README.md"
repository = "https://forgejo.ellis.link/continuwuation/continuwuity"
version = "0.5.8"
version = "0.5.9"
[workspace.metadata.crane]
name = "conduwuit"
@@ -39,7 +39,10 @@ features = ["ffi", "std", "union"]
version = "1.1.0"
[workspace.dependencies.ctor]
version = "0.10.0"
version = "0.13.0"
[workspace.dependencies.dtor]
version = "0.13.0"
[workspace.dependencies.cargo_toml]
version = "0.22"
@@ -177,7 +180,7 @@ version = "0.5.3"
features = ["alloc", "rand"]
default-features = false
# Used to generate thumbnails for images & blurhashes
# Used to generate thumbnails for images
[workspace.dependencies.image]
version = "0.25.5"
default-features = false
@@ -188,14 +191,6 @@ features = [
"webp",
]
[workspace.dependencies.blurhash]
version = "0.2.3"
default-features = false
features = [
"fast-linear-to-srgb",
"image",
]
# logging
[workspace.dependencies.log]
version = "0.4.27"
@@ -349,7 +344,7 @@ version = "1.1.1"
[workspace.dependencies.ruma]
# version = "0.14.1"
git = "https://github.com/ruma/ruma.git"
rev = "5742fec0021b85fedbf5cd1f59c50a00bb5b9f7c"
rev = "9c9dccc93f054bbd28f23f630223fffa6289ecbc"
features = [
"appservice-api-c",
"client-api",
@@ -361,7 +356,6 @@ features = [
"ring-compat",
"compat-upload-signatures",
"compat-optional-txn-pdus",
"unstable-msc2448",
"unstable-msc2666",
"unstable-msc2867",
"unstable-msc2870",
@@ -430,7 +424,7 @@ features = ["http", "grpc-tonic", "trace", "logs", "metrics"]
# optional sentry metrics for crash/panic reporting
[workspace.dependencies.sentry]
version = "0.47.0"
version = "0.48.0"
default-features = false
features = [
"backtrace",
@@ -445,9 +439,9 @@ features = [
]
[workspace.dependencies.sentry-tracing]
version = "0.47.0"
version = "0.48.0"
[workspace.dependencies.sentry-tower]
version = "0.47.0"
version = "0.48.0"
# jemalloc usage
[workspace.dependencies.tikv-jemalloc-sys]
@@ -546,16 +540,11 @@ features = ["std"]
[workspace.dependencies.maplit]
version = "1.0.2"
[workspace.dependencies.ldap3]
version = "0.12.0"
default-features = false
features = ["sync", "tls-rustls", "rustls-provider"]
[workspace.dependencies.yansi]
version = "1.0.1"
[workspace.dependencies.askama]
version = "0.15.0"
version = "0.16.0"
[workspace.dependencies.lettre]
version = "0.11.19"
@@ -570,6 +559,9 @@ features = ["std"]
[workspace.dependencies.nonzero_ext]
version = "0.3.0"
[workspace.dependencies.serde_urlencoded]
version = "0.7.1"
#
# Patches
#
+1
View File
@@ -0,0 +1 @@
Users may now be forbidden from deactivating their own accounts with the new `allow_deactivation` config option.
+1
View File
@@ -0,0 +1 @@
Added support for authenticating clients using the new OAuth 2.0 login API. Contributed by @ginger.
+1
View File
@@ -0,0 +1 @@
Removed support for guest user registration, a little-used and deprecated approach to room previews.
+1
View File
@@ -0,0 +1 @@
The deprecated `well_known.rtc_focus_server_urls` config option has been removed. MatrixRTC foci should be configured using the `matrix_rtc.foci` config option.
+1
View File
@@ -0,0 +1 @@
Support for server-side blurhashing (part of MSC2448) has been removed.
+1
View File
@@ -0,0 +1 @@
Add performance tuning documentation. Contributed by @stratself.
+1
View File
@@ -0,0 +1 @@
Removed support for LDAP.
+1
View File
@@ -0,0 +1 @@
Clarified in the config that `max_request_size` affects federated media as well.
+1
View File
@@ -0,0 +1 @@
Added support for fallback encryption keys.
+1
View File
@@ -0,0 +1 @@
Add `!admin users reject-all-invites` to clean invite spam
+1
View File
@@ -0,0 +1 @@
fix `!admin query account-data account-data-get` not returning the content
+9
View File
@@ -0,0 +1,9 @@
Implemented event rejection, which should resolve and prevent future netsplits of the kinds observed
within some Continuwuity rooms.
Also resolved several bugs related to both soft-failing events, and event backfilling, which should
improve state resolution stability.
The `!admin debug get-pdu` command was updated to disambiguate event acceptance status, and
`!admin debug show-auth-chain` was added to visually display event auth chains, which may assist
developers in debugging strangely complex events.
Contributed by @nex.
+1
View File
@@ -0,0 +1 @@
Fixed an issue where Continuwuity would only advertise support for the unstable endpoint for Mutual Rooms (MSC2666), despite only supporting the stable endpoint. Contributed by @Henry-Hiles (QuadRadical)
-2
View File
@@ -7,7 +7,6 @@
[global]
address = "0.0.0.0"
allow_device_name_federation = true
allow_guest_registration = true
allow_public_room_directory_over_federation = true
allow_registration = true
database_path = "/database"
@@ -32,7 +31,6 @@ rocksdb_log_level = "info"
rocksdb_max_log_files = 1
rocksdb_recovery_mode = 0
rocksdb_paranoid_file_checks = true
log_guest_registrations = false
allow_legacy_media = true
startup_netburst = true
startup_netburst_keep = -1
+40 -152
View File
@@ -291,6 +291,7 @@
#ip_lookup_strategy = 5
# Max request size for file uploads in bytes. Defaults to 20MB.
# Also limits incoming federated media.
#
#max_request_size = 20971520
@@ -523,17 +524,15 @@
#
#recaptcha_private_site_key =
# Policy documents, such as terms and conditions or a privacy policy,
# which users must agree to when registering an account.
# Controls whether users are allowed to deactivate their own accounts
# through the account management panel or their Matrix clients. Server
# admins can always deactivate users using the relevant admin commands.
#
# Example:
# ```ignore
# [global.registration_terms.privacy_policy]
# en = { name = "Privacy Policy", url = "https://homeserver.example/en/privacy_policy.html" }
# es = { name = "Política de Privacidad", url = "https://homeserver.example/es/privacy_policy.html" }
# ```
# Note that, in some jurisdictions, you may be legally required to honor
# users who request to deactivate their accounts if you set this option
# to `false`.
#
#registration_terms = {}
#allow_deactivation = true
# Controls whether encrypted rooms and events are allowed.
#
@@ -1270,21 +1269,6 @@
#
#brotli_compression = false
# Set to true to allow user type "guest" registrations. Some clients like
# Element attempt to register guest users automatically.
#
#allow_guest_registration = false
# Set to true to log guest registrations in the admin room. Note that
# these may be noisy or unnecessary if you're a public homeserver.
#
#log_guest_registrations = false
# Set to true to allow guest registrations/users to auto join any rooms
# specified in `auto_join_rooms`.
#
#allow_guests_auto_join_rooms = false
# Enable the legacy unauthenticated Matrix media repository endpoints.
# These endpoints consist of:
# - /_matrix/media/*/config
@@ -1801,11 +1785,9 @@
#stream_amplification = 1024
# Number of sender task workers; determines sender parallelism. Default is
# '0' which means the value is determined internally, likely matching the
# number of tokio worker-threads or number of cores, etc. Override by
# setting a non-zero value.
# core count. Override by setting a different value.
#
#sender_workers = 0
#sender_workers = core count
# Enables listener sockets; can be set to false to disable listening. This
# option is intended for developer/diagnostic purposes only.
@@ -1888,34 +1870,6 @@
#
#support_pgp_key =
# **DEPRECATED**: Use `[global.matrix_rtc].foci` instead.
#
# A list of MatrixRTC foci URLs which will be served as part of the
# MSC4143 client endpoint at /.well-known/matrix/client.
#
# This option is deprecated and will be removed in a future release.
# Please migrate to the new `[global.matrix_rtc]` config section.
#
#rtc_focus_server_urls = []
[global.blurhashing]
# blurhashing x component, 4 is recommended by https://blurha.sh/
#
#components_x = 4
# blurhashing y component, 3 is recommended by https://blurha.sh/
#
#components_y = 3
# Max raw size that the server will blurhash, this is the size of the
# image after converting it to raw data, it should be higher than the
# upload limit but not too high. The higher it is the higher the
# potential load will be for clients requesting blurhashes. The default
# is 33.55MB. Setting it to 0 disables blurhashing.
#
#blurhash_max_raw_size = 33554432
[global.matrix_rtc]
# A list of MatrixRTC foci (transports) which will be served via the
@@ -1933,102 +1887,6 @@
#
#foci = []
[global.ldap]
# Whether to enable LDAP login.
#
# example: "true"
#
#enable = false
# Whether to force LDAP authentication or authorize classical password
# login.
#
# example: "true"
#
#ldap_only = false
# URI of the LDAP server.
#
# example: "ldap://ldap.example.com:389"
#
#uri = ""
# StartTLS for LDAP connections.
#
#use_starttls = false
# Skip TLS certificate verification, possibly dangerous.
#
#disable_tls_verification = false
# Root of the searches.
#
# example: "ou=users,dc=example,dc=org"
#
#base_dn = ""
# Bind DN if anonymous search is not enabled.
#
# You can use the variable `{username}` that will be replaced by the
# entered username. In such case, the password used to bind will be the
# one provided for the login and not the one given by
# `bind_password_file`. Beware: automatically granting admin rights will
# not work if you use this direct bind instead of a LDAP search.
#
# example: "cn=ldap-reader,dc=example,dc=org" or
# "cn={username},ou=users,dc=example,dc=org"
#
#bind_dn = ""
# Path to a file on the system that contains the password for the
# `bind_dn`.
#
# The server must be able to access the file, and it must not be empty.
#
#bind_password_file = ""
# Search filter to limit user searches.
#
# You can use the variable `{username}` that will be replaced by the
# entered username for more complex filters.
#
# example: "(&(objectClass=person)(memberOf=matrix))"
#
#filter = "(objectClass=*)"
# Attribute to use to uniquely identify the user.
#
# example: "uid" or "cn"
#
#uid_attribute = "uid"
# Attribute containing the display name of the user.
#
# example: "givenName" or "sn"
#
#name_attribute = "givenName"
# Root of the searches for admin users.
#
# Defaults to `base_dn` if empty.
#
# example: "ou=admins,dc=example,dc=org"
#
#admin_base_dn = ""
# The LDAP search filter to find administrative users for continuwuity.
#
# If left blank, administrative state must be configured manually for each
# user.
#
# You can use the variable `{username}` that will be replaced by the
# entered username for more complex filters.
#
# example: "(objectClass=conduwuitAdmin)" or "(uid={username})"
#
#admin_filter = ""
#[global.antispam]
#[global.antispam.meowlnir]
@@ -2109,3 +1967,33 @@
# provide an email address.
#
#require_email_for_token_registration = false
#[global.registration-terms]
# The language code to provide to clients along with the policy documents.
#
#language = "en"
# Policy documents, such as terms and conditions or a privacy policy,
# which users must agree to when registering an account.
#
# Example:
# ```ignore
# [global.registration_terms.documents]
# privacy_policy = { name = "Privacy Policy", url = "https://homeserver.example/en/privacy_policy.html" }
# ```
#
#documents = {}
#[global.oauth]
# The compatibility mode to use for OAuth.
#
# - "disabled": OAuth will be unavailable. Users will only be able to log
# in using legacy authentication.
# - "hybrid": OAuth and legacy authentication will both be available. Some
# clients may only use one or the other.
# - "exclusive": Only OAuth will be available. Clients which require
# legacy authentication will be unable to log in.
#
#compatibility_mode = "hybrid"
+1 -1
View File
@@ -50,7 +50,7 @@ EOF
# Developer tool versions
# renovate: datasource=github-releases depName=cargo-bins/cargo-binstall
ENV BINSTALL_VERSION=1.18.1
ENV BINSTALL_VERSION=1.19.1
# renovate: datasource=github-releases depName=psastras/sbom-rs
ENV CARGO_SBOM_VERSION=0.9.1
# renovate: datasource=crate depName=lddtree
+1 -1
View File
@@ -18,7 +18,7 @@ RUN --mount=type=cache,target=/etc/apk/cache apk add \
# Developer tool versions
# renovate: datasource=github-releases depName=cargo-bins/cargo-binstall
ENV BINSTALL_VERSION=1.18.1
ENV BINSTALL_VERSION=1.19.1
# renovate: datasource=github-releases depName=psastras/sbom-rs
ENV CARGO_SBOM_VERSION=0.9.1
# renovate: datasource=crate depName=lddtree
+5
View File
@@ -8,6 +8,11 @@
"type": "file",
"name": "dns",
"label": "DNS tuning (recommended)"
},
{
"type": "file",
"name": "performance",
"label": "Performance tuning"
}
]
+4 -2
View File
@@ -156,9 +156,11 @@ ### Serving well-known files manually
## Troubleshooting
Check with the [Matrix Connectivity Tester][federation-tester] to see that it's working.
Check that other servers can connect to you.
Here are some tools that can help identify federation issues:
[federation-tester]: https://federationtester.mtrnord.blog/
- [Matrix Connectivity Tester](https://federationtester.mtrnord.blog/)
- [Matrix Federation Tester](https://federationtester.matrix.org/)
### Cannot log in with web clients
+135
View File
@@ -0,0 +1,135 @@
# Performance tuning
Continuwuity's default configs are suited for many typical setups and scales appropriately with the size of your hardware. However, there are many scenarios where additional modifications can be made to better utilize your server resources.
This page aims to outline various performance tweaks for Continuwuity and their effects. These adjustments are especially helpful for homeservers that join many large federated rooms or have many users, and it will become increasingly necessary as the Matrix network expands. As always, your mileage may vary according to your setup's specifics. If you have further discussions or recommendations, please share them in the community rooms.
## DNS tuning (recommended)
Please see the dedicated [DNS tuning guide](./dns.mdx).
## Cache capacities
If you have memory to spare, consider increasing the `cache_capacity_modifier` value to a larger number to allow more data to be stored in hot memory. This *significantly* speeds up many intensive operations (such as state resolutions) and decreases CPU usage and disk I/O. Start with a baseline of `cache_capacity_modifier = 2.0` and tune up until you are satisfied with RAM usage.
On the other hand, if your system doesn't have a lot of RAM, consider decreasing the cache capacity modifier to something smaller than `1.0` to avoid low-memory issues (at the cost of higher load on disk/CPU). This recommendation also works if your system has abnormally little RAM compared to the number of CPU cores (for example, 2GB RAM for 12 cores), as cache capacities scale according to number of available cores.
## Disabling some features
You can disable outgoing **typing notifications** and **read markers** to reduce strain on the CPU and network when actively participating in rooms.
```toml
# disables sending read receipts
allow_outgoing_read_receipts = false
# disables sending typing notifications
allow_outgoing_typing = false
```
Outgoing presence updates are also considered very expensive and have been disabled by default (`allow_outgoing_presence = false`). For more savings, you may wish to disable _all_ processing of presence entirely.
```toml title=continuwuity.toml
# disabling presence updates entirely
allow_local_presence = false
allow_incoming_presence = false
allow_outgoing_presence = false
```
## Tuning database compression
:::warning
These steps SHOULD be done **before** starting Continuwuity for the first time. While switching database compression midway through is theoretically possible, this has not been tested extensively in the wild.
:::
### Changing the compression algorithm
For reduced CPU usage at a tradeoff of increased storage space, consider deploying Continuwuity with the faster and less intensive `lz4` algorithm instead of `zstd` for rocksdb, and disable WAL compression entirely:
```toml
### in continuwuity.toml ###
rocksdb_compression_algo = "lz4"
rocksdb_wal_compression = "none"
```
This tweak can especially be helpful if you have an older or less performant CPU (e.g. a Raspberry Pi) and disk space to spare.
### Increasing bottommost layer compression (`zstd` only)
The bottommost layer of the database usually contains old and read-only data, so it is a suitable place for further compression. In Continuwuity, this is possible by setting `rocksdb_bottommost_compression = true` and tuning `rocksdb_bottommost_compression_level` to a more compact level than the default one used in `rocksdb_compression_level`. This tweak comes at a cost of increased CPU usage, but may prevent your database from growing too large in the long run.
For those using `zstd` compression, the compression level ranges from 1 to 22. An example like this could apply:
```toml
### in continuwuity.toml ###
rocksdb_compression_algo = "zstd"
rocksdb_compression_level = 32767 # magic number, translates to level 3 on zstd
rocksdb_bottommost_compression = true
rocksdb_bottommost_compression_level = 9 # level 9 on zstd
```
For `lz4` users, the default level (`-1`) is already the most compact. You can only further decrease it to favor compression speed over ratio.
Consult these documents for more information on compression tuning and levels:
- [Rocksdb compression documentation][rocksdb-compression]
- [Rocksdb default compression levels][rocksdb-compression-defaults]
- [Zstd manual][zstd-manual]
- [Lz4 manual][lz4-manual]
[rocksdb-compression]: https://github.com/facebook/rocksdb/wiki/Compression
[rocksdb-compression-defaults]: https://github.com/facebook/rocksdb/blob/main/include/rocksdb/options.h#L208-L217
[zstd-manual]: https://facebook.github.io/zstd/zstd_manual.html
[lz4-manual]: https://github.com/lz4/lz4/blob/release/doc/lz4_manual.html
## Other tweaks
### Using UNIX sockets
If your homeserver and reverse proxy live on the same machine, you may wish to expose Continuwuity on a UNIX socket instead of a port. This removes TCP overhead between the two programs.
<details>
<summary>Example config with Caddy</summary>
```toml
### in continuwuity.toml ###
# `address` and `port` has to be commented out first
#address = ["127.0.0.1", "::1"]
#port = 8008
unix_socket_path = "/run/continuwuity/continuwuity.sock"
```
```
### in your Caddyfile ###
https://matrix.example.com {
reverse_proxy unix//run/continuwuity/continuwuity.sock
# alternatively, use the http2-plaintext protocol
# reverse_proxy unix+h2c//run/continuwuity/continuwuity.sock
}
```
</details>
### Tuning your trusted servers
:::info Vet your trusted servers!
Trusted servers are your first point of contact when obtaining public keys from other servers, and they could theoretically impersonate other servers and cause significant harm to your deployment. Please thoroughly verify your trusted servers' credibility before adding them to your configuration.
:::
Trusted servers are queried sequentially in the order they are listed. If you have multiple trusted servers configured, put the faster ones first:
```toml
# Example config, using maintainers' recommended homeservers
trusted_servers = ["codestorm.net","starstruck.systems","unredacted.org","matrix.org"]
```
Avoid prioritising `matrix.org` as your primary trusted server, as it tends to be quite slow.
Some users have also reported that increasing `trusted_server_batch_size` has helped with faster joins for huge rooms. Start with doubling the default to `2048` until you find a suitable value.
### Enable HTTP/3 on your reverse proxy
Consider enabling the newer **HTTP/3** protocol for inbound connections to Continuwuity. In Caddy HTTP/3 is allowed by default, but you must expose port :443/**udp** on your firewall.
HTTP/3 can vastly improve Client-Server connections especially on unstable networks, as it reduces packet losses and latency from TCP head-of-line blocking, includes workarounds for network switching, and reduces connection establishment handshakes. Continuwuity also includes experimental _outbound_ HTTP/3 support in its Docker images, so connections between Continuwuity servers can benefit from this too.
+7 -3
View File
@@ -268,9 +268,13 @@ ## Starting Your Server
## How do I know it works?
To check if your server can communicate with other homeservers, use the
[Matrix Federation Tester](https://federationtester.mtrnord.blog/). If you can
register your account but cannot join federated rooms, check your configuration
To check if your server can communicate with other homeservers,
use an external testing tool:
- [Matrix Connectivity Tester](https://federationtester.mtrnord.blog/)
- [Matrix Federation Tester](https://federationtester.matrix.org/)
If you can register your account but cannot join federated rooms, check your configuration
and verify that your federation endpoints are opened and forwarded correctly.
As a quick health check, you can also use these cURL commands:
@@ -6,10 +6,10 @@
"message": "Welcome to Continuwuity! Important announcements about the project will appear here."
},
{
"id": 12,
"mention_room": false,
"date": "2026-04-24",
"message": "[v0.5.8](https://forgejo.ellis.link/continuwuation/continuwuity/releases/tag/v0.5.8) is out! This is a patch release which fixes a bug in 0.5.7's email support -- upgrade soon if you use that feature."
"id": 13,
"mention_room": true,
"date": "2026-05-08",
"message": "[v0.5.9](https://forgejo.ellis.link/continuwuation/continuwuity/releases/tag/v0.5.9) has been released, fixing a few low-severity federation-related vulnerabilities. It is recommended you read the changelog and update as soon as possible. There are no new features or other changes in this release, only related bugfixes. Deployments tracking the main branch should also update to the latest commit."
}
]
}
+1 -1
View File
@@ -7,7 +7,7 @@ ## Running commands
* All commands listed here may be used by server administrators in the admin room by sending them as messages.
* If the `admin_escape_commands` configuration option is enabled, server administrators may run certain commands in public rooms by prefixing them with a single backslash. These commands will only run on _their_ homeserver, even if they are a member of another homeserver's admin room. Some sensitive commands cannot be used outside the admin room and will return an error.
* All commands listed here may be used in the server's console, if it is enabled. Commands entered in the console do not require the `!admin` prefix. If Continuwuity is deployed via Docker, be sure to set the appropriate options detailed in [the Docker deployment guide](../../deploying/docker.mdx#accessing-the-servers-console) to enable access to the server's console.
* All commands listed here may be used in the server's console, if it is enabled. Commands entered in the console do not require the `!admin` prefix.
## Categories
+1 -1
View File
@@ -146,7 +146,7 @@ cargo clippy \
--locked \
--profile test \
--no-default-features \
--features=console,systemd,element_hacks,direct_tls,perf_measurements,brotli_compression,blurhashing \
--features=console,systemd,element_hacks,direct_tls,perf_measurements,brotli_compression \
--color=always \
-- \
-D warnings
Generated
+15 -15
View File
@@ -3,11 +3,11 @@
"advisory-db": {
"flake": false,
"locked": {
"lastModified": 1775907537,
"narHash": "sha256-vbeLNgmsx1Z6TwnlDV0dKyeBCcon3UpkV9yLr/yc6HM=",
"lastModified": 1777645914,
"narHash": "sha256-P1T7QVQS13OvkXEuEhI91CLaQfyv6iqV9vW8IBLLDYg=",
"owner": "rustsec",
"repo": "advisory-db",
"rev": "d99f7b9eb81731bddebf80a355f8be7b2f8b1b28",
"rev": "d6ba1f7070ba91f45efe372d68eb648be67d0417",
"type": "github"
},
"original": {
@@ -18,11 +18,11 @@
},
"crane": {
"locked": {
"lastModified": 1775839657,
"narHash": "sha256-SPm9ck7jh3Un9nwPuMGbRU04UroFmOHjLP56T10MOeM=",
"lastModified": 1777335812,
"narHash": "sha256-bEg5xoAxAwsyfnGhkEX7RJViTIBIYPd8ISg4O1c0HFc=",
"owner": "ipetkov",
"repo": "crane",
"rev": "7cf72d978629469c4bd4206b95c402514c1f6000",
"rev": "5e0fb2f64edff2822249f21293b8304dedaaf676",
"type": "github"
},
"original": {
@@ -39,11 +39,11 @@
"rust-analyzer-src": "rust-analyzer-src"
},
"locked": {
"lastModified": 1775891769,
"narHash": "sha256-EOfVlTKw2n8w1uhfh46GS4hEGnQ7oWrIWQfIY6utIkI=",
"lastModified": 1777624102,
"narHash": "sha256-thSyElkje577x/kAbP72nHlfiFc1a+tCudskLPHXe9s=",
"owner": "nix-community",
"repo": "fenix",
"rev": "6fbc54dde15aee725bdc7aae5e478849685d5f56",
"rev": "4d81601e0b73f20d81d066754ad0e7d1e7f75a06",
"type": "github"
},
"original": {
@@ -89,11 +89,11 @@
},
"nixpkgs": {
"locked": {
"lastModified": 1775710090,
"narHash": "sha256-ar3rofg+awPB8QXDaFJhJ2jJhu+KqN/PRCXeyuXR76E=",
"lastModified": 1777268161,
"narHash": "sha256-bxrdOn8SCOv8tN4JbTF/TXq7kjo9ag4M+C8yzzIRYbE=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "4c1018dae018162ec878d42fec712642d214fdfa",
"rev": "1c3fe55ad329cbcb28471bb30f05c9827f724c76",
"type": "github"
},
"original": {
@@ -132,11 +132,11 @@
"rust-analyzer-src": {
"flake": false,
"locked": {
"lastModified": 1775843361,
"narHash": "sha256-j53ZgyDvmYf3Sjh1IPvvTjqa614qUfVQSzj59+MpzkY=",
"lastModified": 1777583169,
"narHash": "sha256-dVJ4+wrRKc8oIgp3rLOFSq1obt/sCKlXy3h47qof/w0=",
"owner": "rust-lang",
"repo": "rust-analyzer",
"rev": "9eb97ea96d8400e8957ddd56702e962614296583",
"rev": "aa64e4828a2bbba44463c1229a81c748d3cce583",
"type": "github"
},
"original": {
+104 -104
View File
@@ -125,13 +125,13 @@
}
},
"node_modules/@rsbuild/core": {
"version": "2.0.3",
"resolved": "https://registry.npmjs.org/@rsbuild/core/-/core-2.0.3.tgz",
"integrity": "sha512-2myp7jUgGen50saxW8OJD/eMVKp7HnuBN5MUzwRb6mDbRZZVpoorfI4LQqiGSBNjGLB6jltvx/R2yHmcmnchwg==",
"version": "2.0.5",
"resolved": "https://registry.npmjs.org/@rsbuild/core/-/core-2.0.5.tgz",
"integrity": "sha512-KajO50hbXb32S8MsyDh2f+xKcVeRy9Gfzdcy0JjpMLj22djHugly6jrGo7jH7ls9X6/TDcyCTncSuNK4+D2lTw==",
"dev": true,
"license": "MIT",
"dependencies": {
"@rspack/core": "~2.0.1",
"@rspack/core": "~2.0.2",
"@swc/helpers": "^0.5.21"
},
"bin": {
@@ -169,28 +169,28 @@
}
},
"node_modules/@rspack/binding": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/@rspack/binding/-/binding-2.0.1.tgz",
"integrity": "sha512-ynV1gw4KqFtQ0P+ZZh76SUj49wBb2FuHW3zSmHverHWuxBhzvrZS6/dZ+fCFQG8bTTPtrPz0RQUTN3uEDbPVBQ==",
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/@rspack/binding/-/binding-2.0.2.tgz",
"integrity": "sha512-0kZPplW9GWx8mfC6DfsaRY3QBIYPuUs42JfmSM6aSb8tMHZAXQeLeMB8M+h8i4SeI+aFtCgO6UuYGtyWf7+L+A==",
"dev": true,
"license": "MIT",
"optionalDependencies": {
"@rspack/binding-darwin-arm64": "2.0.1",
"@rspack/binding-darwin-x64": "2.0.1",
"@rspack/binding-linux-arm64-gnu": "2.0.1",
"@rspack/binding-linux-arm64-musl": "2.0.1",
"@rspack/binding-linux-x64-gnu": "2.0.1",
"@rspack/binding-linux-x64-musl": "2.0.1",
"@rspack/binding-wasm32-wasi": "2.0.1",
"@rspack/binding-win32-arm64-msvc": "2.0.1",
"@rspack/binding-win32-ia32-msvc": "2.0.1",
"@rspack/binding-win32-x64-msvc": "2.0.1"
"@rspack/binding-darwin-arm64": "2.0.2",
"@rspack/binding-darwin-x64": "2.0.2",
"@rspack/binding-linux-arm64-gnu": "2.0.2",
"@rspack/binding-linux-arm64-musl": "2.0.2",
"@rspack/binding-linux-x64-gnu": "2.0.2",
"@rspack/binding-linux-x64-musl": "2.0.2",
"@rspack/binding-wasm32-wasi": "2.0.2",
"@rspack/binding-win32-arm64-msvc": "2.0.2",
"@rspack/binding-win32-ia32-msvc": "2.0.2",
"@rspack/binding-win32-x64-msvc": "2.0.2"
}
},
"node_modules/@rspack/binding-darwin-arm64": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/@rspack/binding-darwin-arm64/-/binding-darwin-arm64-2.0.1.tgz",
"integrity": "sha512-CGFO5zmajD1Itch1lxAI7+gvKiagzyqXopHv/jHG9Su2WWQ2/Nhn2/rkSpdp6ptE9ri6+6tCOOahf099/v/Xog==",
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/@rspack/binding-darwin-arm64/-/binding-darwin-arm64-2.0.2.tgz",
"integrity": "sha512-0o7lbgBBsDlICWdjIH0q3e0BsSco4GRiImHWVfZSVEG+q2+ykZJvSvYCVhPM1Co375Z0S3VMPa/8SjcY1FHwlw==",
"cpu": [
"arm64"
],
@@ -202,9 +202,9 @@
]
},
"node_modules/@rspack/binding-darwin-x64": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/@rspack/binding-darwin-x64/-/binding-darwin-x64-2.0.1.tgz",
"integrity": "sha512-2vvBNBoS09/PurupBwSrlTZd8283o00B8v20ncsNUdEff41uCR/hzIrYoTIVWnVST+Gt5O1+cfcfORp397lajg==",
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/@rspack/binding-darwin-x64/-/binding-darwin-x64-2.0.2.tgz",
"integrity": "sha512-tOwxZpoPlTlRs/w6UyUinXJ4TYRVHMlR7+eQxO1R3muKpixvhXQjtvoaY16HuFyTVky5F0IfOoWr3x9FEsgdLg==",
"cpu": [
"x64"
],
@@ -216,9 +216,9 @@
]
},
"node_modules/@rspack/binding-linux-arm64-gnu": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/@rspack/binding-linux-arm64-gnu/-/binding-linux-arm64-gnu-2.0.1.tgz",
"integrity": "sha512-uvNXk6ahE3AH3h2avnd1Mgno68YQpS4cfX1OkOGWIC/roL+NrOP2XVXV4yfVAoydPALDO7AfbIfN0QdmBK3rsA==",
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/@rspack/binding-linux-arm64-gnu/-/binding-linux-arm64-gnu-2.0.2.tgz",
"integrity": "sha512-1ZD4YFhG1rmgqj+W8hfwHyKV8xDxGsc/3KgU0FwmiVEX7JfzhCkgBO/xlCG79kRKSrzuVzt4icO/G3cCKn0pag==",
"cpu": [
"arm64"
],
@@ -233,9 +233,9 @@
]
},
"node_modules/@rspack/binding-linux-arm64-musl": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/@rspack/binding-linux-arm64-musl/-/binding-linux-arm64-musl-2.0.1.tgz",
"integrity": "sha512-S/a6uN9PiZ5O/PjSqyIXhuRC1lVzeJkJV69NeLk5sIEUiDQ/aQGZG97uN+tluwpbo1tPbLJkdHYETfjspOX4Pg==",
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/@rspack/binding-linux-arm64-musl/-/binding-linux-arm64-musl-2.0.2.tgz",
"integrity": "sha512-/PtTkM/DsDLjeuXTmeJeRfbjCDbcL9jvoVgZrgxYFZ28y2cdLvbChbW9uigOzs5dQEs1CIBQXMTTj7KhdBTuQg==",
"cpu": [
"arm64"
],
@@ -250,9 +250,9 @@
]
},
"node_modules/@rspack/binding-linux-x64-gnu": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/@rspack/binding-linux-x64-gnu/-/binding-linux-x64-gnu-2.0.1.tgz",
"integrity": "sha512-C13Kk0OkZiocZVj187Sf753UH6pDXnuEu6vzUvi3qv9ltibG1ki0H2Y8isXBYL2cHQOV+hk0g1S6/4z3TTB97A==",
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/@rspack/binding-linux-x64-gnu/-/binding-linux-x64-gnu-2.0.2.tgz",
"integrity": "sha512-bBjsZxMHRaPo6X9SokApm6ucs+UhXtAJFyJJyuk2BH4XJsLeCU9Dz1vMwioeohFbJUUeTASVPm6/BL+RhSaunw==",
"cpu": [
"x64"
],
@@ -267,9 +267,9 @@
]
},
"node_modules/@rspack/binding-linux-x64-musl": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/@rspack/binding-linux-x64-musl/-/binding-linux-x64-musl-2.0.1.tgz",
"integrity": "sha512-TQsiBFpEDGkuvK9tNdGj/Uc+AIytzqhxXH/1jKU6M24cWB1DTw/Cx7DdrkCBDyq3129K3POLdujvbWCGqBzQUw==",
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/@rspack/binding-linux-x64-musl/-/binding-linux-x64-musl-2.0.2.tgz",
"integrity": "sha512-HjlpInqzabDNkhVsUJpsHPqa9QYVWBViJoyWNjzXCAW0vKMDvwaphyUvokSinX8FGTlZi/sr5UEaHJo6XtQ35g==",
"cpu": [
"x64"
],
@@ -284,9 +284,9 @@
]
},
"node_modules/@rspack/binding-wasm32-wasi": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/@rspack/binding-wasm32-wasi/-/binding-wasm32-wasi-2.0.1.tgz",
"integrity": "sha512-wk3gyUgBW/ayP49bI54bkY8+EQnfBHxdoe9dz3oobSTZQc8AOWwmUUDEPltW8rUvPOM6dfHECTOUMnfaf2f5yA==",
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/@rspack/binding-wasm32-wasi/-/binding-wasm32-wasi-2.0.2.tgz",
"integrity": "sha512-YaRYNFLJRpkGfYjSWR7n9f+nQKtrlmrrffpAn/blc2geHcRvXoBc5SCs1idPtsLhj7H9qWWhs7ucjyHy4csWFg==",
"cpu": [
"wasm32"
],
@@ -300,9 +300,9 @@
}
},
"node_modules/@rspack/binding-win32-arm64-msvc": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/@rspack/binding-win32-arm64-msvc/-/binding-win32-arm64-msvc-2.0.1.tgz",
"integrity": "sha512-rHjLcy3VcAC3+x+PxH+gwhwv6tPe0JdXTNT5eAOs9wgZIM6T9p4wre49+K4Qy98+Fb7TTbLX0ObUitlOkGwTSA==",
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/@rspack/binding-win32-arm64-msvc/-/binding-win32-arm64-msvc-2.0.2.tgz",
"integrity": "sha512-d/3kTEKq+asLjRFPO96t+wfWiM7DLN76VQEPDD9bc1kdsZXlVJBuvyXfsgK8bbEvKplWXYcSsokhmEnuXrLOpg==",
"cpu": [
"arm64"
],
@@ -314,9 +314,9 @@
]
},
"node_modules/@rspack/binding-win32-ia32-msvc": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/@rspack/binding-win32-ia32-msvc/-/binding-win32-ia32-msvc-2.0.1.tgz",
"integrity": "sha512-Ad1vVqMBBnd4T8rsORngu9sl2kyRTlS4kMlvFudjzl1X2UFArEDBe0YVGNN7ZvahM12CErUx2WiN8Sd8pb+qXQ==",
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/@rspack/binding-win32-ia32-msvc/-/binding-win32-ia32-msvc-2.0.2.tgz",
"integrity": "sha512-161cWineq3RW+Jdm1FAfSpXeUtYWvhB3kAbm46vNT9h/YYz+spwsFMvveAZ1nsVSVL0IC5lDBGUte7yUAY8K2g==",
"cpu": [
"ia32"
],
@@ -328,9 +328,9 @@
]
},
"node_modules/@rspack/binding-win32-x64-msvc": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/@rspack/binding-win32-x64-msvc/-/binding-win32-x64-msvc-2.0.1.tgz",
"integrity": "sha512-oPM2Jtm7HOlmxl/aBfleAVlL6t9VeHx6WvEets7BBJMInemFXAQd4CErRqybf7rXutACzLeUWBOue4Jpd1/ykw==",
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/@rspack/binding-win32-x64-msvc/-/binding-win32-x64-msvc-2.0.2.tgz",
"integrity": "sha512-y7Q0S1FE+OlkL5GMqLG0PwxrPw6E1r892KhGrGKE1Vdufe5YTEx6xTPxzZ+b7N2KPD7s9G1/iJmWHQxb1+Bjkg==",
"cpu": [
"x64"
],
@@ -342,13 +342,13 @@
]
},
"node_modules/@rspack/core": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/@rspack/core/-/core-2.0.1.tgz",
"integrity": "sha512-lgfZiExh8kDR/3obgi3RQKwKG5av1Xf5qDN1aVde777W9pbmx0Pqvrww1qtNvJ+gobEjbrrn5HEZWYGe0VLmcA==",
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/@rspack/core/-/core-2.0.2.tgz",
"integrity": "sha512-VM3UHOo26uC+4QSqY5tU1ybI7KuXY5rTof8nhFOaBY9SYau0Smvr+hMSAPmrmHwknB6dXT8yaNVxrj7I+qxE1Q==",
"dev": true,
"license": "MIT",
"dependencies": {
"@rspack/binding": "2.0.1"
"@rspack/binding": "2.0.2"
},
"engines": {
"node": "^20.19.0 || >=22.12.0"
@@ -383,20 +383,20 @@
}
},
"node_modules/@rspress/core": {
"version": "2.0.10",
"resolved": "https://registry.npmjs.org/@rspress/core/-/core-2.0.10.tgz",
"integrity": "sha512-DvoV7YUW538x0CVAGyYPKfjUHgEuq7Z8LZq1cpfUgBpA1DynFUK3Ls6spvdoAHAl3l0AN+xxOHpu/sRVhzqi/A==",
"version": "2.0.11",
"resolved": "https://registry.npmjs.org/@rspress/core/-/core-2.0.11.tgz",
"integrity": "sha512-4YBOFmSMFv5GWrCa80qSIW8VxqZQQS/PknVq2r7Hb7kgfB38Fzciopn3hjb3hNwI4TTRbsi/Jev2HyRWD4bYAQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"@mdx-js/mdx": "^3.1.1",
"@mdx-js/react": "^3.1.1",
"@rsbuild/core": "^2.0.2",
"@rsbuild/core": "^2.0.5",
"@rsbuild/plugin-react": "~2.0.0",
"@rspress/shared": "2.0.10",
"@rspress/shared": "2.0.11",
"@shikijs/rehype": "^4.0.2",
"@types/unist": "^3.0.3",
"@unhead/react": "^2.1.13",
"@unhead/react": "^2.1.15",
"body-scroll-lock": "4.0.0-beta.0",
"clsx": "2.1.1",
"copy-to-clipboard": "^3.3.3",
@@ -407,12 +407,12 @@
"mdast-util-mdxjs-esm": "^2.0.1",
"medium-zoom": "1.1.0",
"nprogress": "^0.2.0",
"react": "^19.2.5",
"react-dom": "^19.2.5",
"react": "^19.2.6",
"react-dom": "^19.2.6",
"react-lazy-with-preload": "^2.2.1",
"react-reconciler": "0.33.0",
"react-render-to-markdown": "19.0.1",
"react-router-dom": "^7.13.2",
"react-router-dom": "^7.15.0",
"rehype-external-links": "^3.0.0",
"rehype-raw": "^7.0.0",
"remark-cjk-friendly": "^2.0.1",
@@ -436,9 +436,9 @@
}
},
"node_modules/@rspress/plugin-client-redirects": {
"version": "2.0.10",
"resolved": "https://registry.npmjs.org/@rspress/plugin-client-redirects/-/plugin-client-redirects-2.0.10.tgz",
"integrity": "sha512-ImOm3h/cbXiJXIvpwv3Wn9rM91xgdhKbD2WX+WlMlWO4AtQfKR4XFrVhIZZAkrt09eeotRIklA7nu8Nuzzzbsw==",
"version": "2.0.11",
"resolved": "https://registry.npmjs.org/@rspress/plugin-client-redirects/-/plugin-client-redirects-2.0.11.tgz",
"integrity": "sha512-DI9vod5mGccg57c19CuFpN3mGP1FEEueOUnEUz1UHXSyXg9YTj+ox7Xla4jUUzAzoPVGiWSSsfbtCTwdoxAsbg==",
"dev": true,
"license": "MIT",
"engines": {
@@ -449,9 +449,9 @@
}
},
"node_modules/@rspress/plugin-sitemap": {
"version": "2.0.10",
"resolved": "https://registry.npmjs.org/@rspress/plugin-sitemap/-/plugin-sitemap-2.0.10.tgz",
"integrity": "sha512-PZLig9+OlnyLcy6x9BlEqWSRef6TzDWB6Dlh2/hY41FtKlhyb7d7U56RGlLselWaQV54SHVa6H/y611A56ZI2g==",
"version": "2.0.11",
"resolved": "https://registry.npmjs.org/@rspress/plugin-sitemap/-/plugin-sitemap-2.0.11.tgz",
"integrity": "sha512-046LCHgbJXdaPipWB2SWMjZcAtIrOjXGZOD92xlTjhZ74D7Mk1Nod1MQdtOEoISWedcHdgpUVXMDbB1doKBpPQ==",
"dev": true,
"license": "MIT",
"engines": {
@@ -462,13 +462,13 @@
}
},
"node_modules/@rspress/shared": {
"version": "2.0.10",
"resolved": "https://registry.npmjs.org/@rspress/shared/-/shared-2.0.10.tgz",
"integrity": "sha512-Kx10OAHWqi2jvW7ScmBUbkGjnwv4E6rEoelUchcL8It8nQ4nAVk0xvvES7m64knEon55zDbs8JQumCjbHu801Q==",
"version": "2.0.11",
"resolved": "https://registry.npmjs.org/@rspress/shared/-/shared-2.0.11.tgz",
"integrity": "sha512-7l5Pso4s597utJyisVEnd7n/40h053nfE8DwGQMeS8RLGtSwVgxFwNHsSrvQEGtFlLrg2aWWSITqnAVO1wfTew==",
"dev": true,
"license": "MIT",
"dependencies": {
"@rsbuild/core": "^2.0.2",
"@rsbuild/core": "^2.0.5",
"@shikijs/rehype": "^4.0.2",
"unified": "^11.0.5"
}
@@ -610,9 +610,9 @@
}
},
"node_modules/@tybys/wasm-util": {
"version": "0.10.1",
"resolved": "https://registry.npmjs.org/@tybys/wasm-util/-/wasm-util-0.10.1.tgz",
"integrity": "sha512-9tTaPJLSiejZKx+Bmog4uSubteqTvFrVrURwkmHixBo0G4seD0zUxp98E1DzUBJxLQ3NPwXrGKDiVjwx/DpPsg==",
"version": "0.10.2",
"resolved": "https://registry.npmjs.org/@tybys/wasm-util/-/wasm-util-0.10.2.tgz",
"integrity": "sha512-RoBvJ2X0wuKlWFIjrwffGw1IqZHKQqzIchKaadZZfnNpsAYp2mM0h36JtPCjNDAHGgYez/15uMBpfGwchhiMgg==",
"dev": true,
"license": "MIT",
"optional": true,
@@ -707,13 +707,13 @@
"license": "ISC"
},
"node_modules/@unhead/react": {
"version": "2.1.13",
"resolved": "https://registry.npmjs.org/@unhead/react/-/react-2.1.13.tgz",
"integrity": "sha512-gC48tNJ0UtbithkiKCc2WUlxbVVk5o171EtruS2w2hQUblfYFHzCPu2hljjT1e0tUHXXqN8EMv7mpxHddMB2sg==",
"version": "2.1.15",
"resolved": "https://registry.npmjs.org/@unhead/react/-/react-2.1.15.tgz",
"integrity": "sha512-5hfAaZ3XJq9JkspRzZdSPsMrXXA8v/SKiEOxZcN9L40o44byF/50bcQuOLgSSCAx8802mI5VG32KZXWTtsLu9Q==",
"dev": true,
"license": "MIT",
"dependencies": {
"unhead": "2.1.13"
"unhead": "2.1.15"
},
"funding": {
"url": "https://github.com/sponsors/harlan-zw"
@@ -1399,9 +1399,9 @@
}
},
"node_modules/hookable": {
"version": "6.1.0",
"resolved": "https://registry.npmjs.org/hookable/-/hookable-6.1.0.tgz",
"integrity": "sha512-ZoKZSJgu8voGK2geJS+6YtYjvIzu9AOM/KZXsBxr83uhLL++e9pEv/dlgwgy3dvHg06kTz6JOh1hk3C8Ceiymw==",
"version": "6.1.1",
"resolved": "https://registry.npmjs.org/hookable/-/hookable-6.1.1.tgz",
"integrity": "sha512-U9LYDy1CwhMCnprUfeAZWZGByVbhd54hwepegYTK7Pi5NvqEj63ifz5z+xukznehT7i6NIZRu89Ay1AZmRsLEQ==",
"dev": true,
"license": "MIT"
},
@@ -2683,20 +2683,20 @@
"license": "MIT"
},
"node_modules/oniguruma-parser": {
"version": "0.12.1",
"resolved": "https://registry.npmjs.org/oniguruma-parser/-/oniguruma-parser-0.12.1.tgz",
"integrity": "sha512-8Unqkvk1RYc6yq2WBYRj4hdnsAxVze8i7iPfQr8e4uSP3tRv0rpZcbGUDvxfQQcdwHt/e9PrMvGCsa8OqG9X3w==",
"version": "0.12.2",
"resolved": "https://registry.npmjs.org/oniguruma-parser/-/oniguruma-parser-0.12.2.tgz",
"integrity": "sha512-6HVa5oIrgMC6aA6WF6XyyqbhRPJrKR02L20+2+zpDtO5QAzGHAUGw5TKQvwi5vctNnRHkJYmjAhRVQF2EKdTQw==",
"dev": true,
"license": "MIT"
},
"node_modules/oniguruma-to-es": {
"version": "4.3.5",
"resolved": "https://registry.npmjs.org/oniguruma-to-es/-/oniguruma-to-es-4.3.5.tgz",
"integrity": "sha512-Zjygswjpsewa0NLTsiizVuMQZbp0MDyM6lIt66OxsF21npUDlzpHi1Mgb/qhQdkb+dWFTzJmFbEWdvZgRho8eQ==",
"version": "4.3.6",
"resolved": "https://registry.npmjs.org/oniguruma-to-es/-/oniguruma-to-es-4.3.6.tgz",
"integrity": "sha512-csuQ9x3Yr0cEIs/Zgx/OEt9iBw9vqIunAPQkx19R/fiMq2oGVTgcMqO/V3Ybqefr1TBvosI6jU539ksaBULJyA==",
"dev": true,
"license": "MIT",
"dependencies": {
"oniguruma-parser": "^0.12.1",
"oniguruma-parser": "^0.12.2",
"regex": "^6.1.0",
"regex-recursion": "^6.0.2"
}
@@ -2753,9 +2753,9 @@
}
},
"node_modules/react": {
"version": "19.2.5",
"resolved": "https://registry.npmjs.org/react/-/react-19.2.5.tgz",
"integrity": "sha512-llUJLzz1zTUBrskt2pwZgLq59AemifIftw4aB7JxOqf1HY2FDaGDxgwpAPVzHU1kdWabH7FauP4i1oEeer2WCA==",
"version": "19.2.6",
"resolved": "https://registry.npmjs.org/react/-/react-19.2.6.tgz",
"integrity": "sha512-sfWGGfavi0xr8Pg0sVsyHMAOziVYKgPLNrS7ig+ivMNb3wbCBw3KxtflsGBAwD3gYQlE/AEZsTLgToRrSCjb0Q==",
"dev": true,
"license": "MIT",
"engines": {
@@ -2763,16 +2763,16 @@
}
},
"node_modules/react-dom": {
"version": "19.2.5",
"resolved": "https://registry.npmjs.org/react-dom/-/react-dom-19.2.5.tgz",
"integrity": "sha512-J5bAZz+DXMMwW/wV3xzKke59Af6CHY7G4uYLN1OvBcKEsWOs4pQExj86BBKamxl/Ik5bx9whOrvBlSDfWzgSag==",
"version": "19.2.6",
"resolved": "https://registry.npmjs.org/react-dom/-/react-dom-19.2.6.tgz",
"integrity": "sha512-0prMI+hvBbPjsWnxDLxlCGyM8PN6UuWjEUCYmZhO67xIV9Xasa/r/vDnq+Xyq4Lo27g8QSbO5YzARu0D1Sps3g==",
"dev": true,
"license": "MIT",
"dependencies": {
"scheduler": "^0.27.0"
},
"peerDependencies": {
"react": "^19.2.5"
"react": "^19.2.6"
}
},
"node_modules/react-lazy-with-preload": {
@@ -2822,9 +2822,9 @@
}
},
"node_modules/react-router": {
"version": "7.14.0",
"resolved": "https://registry.npmjs.org/react-router/-/react-router-7.14.0.tgz",
"integrity": "sha512-m/xR9N4LQLmAS0ZhkY2nkPA1N7gQ5TUVa5n8TgANuDTARbn1gt+zLPXEm7W0XDTbrQ2AJSJKhoa6yx1D8BcpxQ==",
"version": "7.15.0",
"resolved": "https://registry.npmjs.org/react-router/-/react-router-7.15.0.tgz",
"integrity": "sha512-HW9vYwuM8f4yx66Izy8xfrzCM+SBJluoZcCbww9A1TySax11S5Vgw6fi3ZjMONw9J4gQwngL7PzkyIpJJpJ7RQ==",
"dev": true,
"license": "MIT",
"dependencies": {
@@ -2845,13 +2845,13 @@
}
},
"node_modules/react-router-dom": {
"version": "7.14.0",
"resolved": "https://registry.npmjs.org/react-router-dom/-/react-router-dom-7.14.0.tgz",
"integrity": "sha512-2G3ajSVSZMEtmTjIklRWlNvo8wICEpLihfD/0YMDxbWK2UyP5EGfnoIn9AIQGnF3G/FX0MRbHXdFcD+rL1ZreQ==",
"version": "7.15.0",
"resolved": "https://registry.npmjs.org/react-router-dom/-/react-router-dom-7.15.0.tgz",
"integrity": "sha512-VcrVg64Fo8nwBvDscajG8gRTLIuTC6N50nb22l2HOOV4PTOHgoGp8mUjy9wLiHYoYTSYI36tUnXZgasSRFZorQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"react-router": "7.14.0"
"react-router": "7.15.0"
},
"engines": {
"node": ">=20.0.0"
@@ -3290,9 +3290,9 @@
}
},
"node_modules/unhead": {
"version": "2.1.13",
"resolved": "https://registry.npmjs.org/unhead/-/unhead-2.1.13.tgz",
"integrity": "sha512-jO9M1sI6b2h/1KpIu4Jeu+ptumLmUKboRRLxys5pYHFeT+lqTzfNHbYUX9bxVDhC1FBszAGuWcUVlmvIPsah8Q==",
"version": "2.1.15",
"resolved": "https://registry.npmjs.org/unhead/-/unhead-2.1.15.tgz",
"integrity": "sha512-MCt5T90mCWyr3Z6pUCdM9lVRXoMoVBlL7z7U4CYVIiaDiuzad/UCfLuMqz5MeNmpZUgoBCQnrucJimU7EZR+XA==",
"dev": true,
"license": "MIT",
"dependencies": {
+1
View File
@@ -81,6 +81,7 @@ conduwuit-macros.workspace = true
conduwuit-service.workspace = true
const-str.workspace = true
ctor.workspace = true
dtor.workspace = true
futures.workspace = true
lettre.workspace = true
log.workspace = true
+1 -1
View File
@@ -16,7 +16,7 @@
};
#[derive(Debug, Parser)]
#[command(name = conduwuit_core::name(), version = conduwuit_core::version())]
#[command(name = conduwuit_core::BRANDING, version = conduwuit_core::version())]
pub enum AdminCommand {
#[command(subcommand)]
/// Commands for managing appservices
+213 -9
View File
@@ -1,5 +1,5 @@
use std::{
collections::HashMap,
collections::{HashMap, HashSet},
fmt::Write,
iter::once,
time::{Instant, SystemTime},
@@ -22,7 +22,7 @@
use lettre::message::Mailbox;
use ruma::{
CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId,
OwnedRoomOrAliasId, OwnedServerName, RoomId, RoomVersionId,
OwnedRoomOrAliasId, OwnedServerName, RoomId, RoomVersionId, UInt,
api::federation::event::get_room_state, events::AnyStateEvent, serde::Raw,
};
use service::rooms::{
@@ -69,6 +69,189 @@ pub(super) async fn get_auth_chain(&self, event_id: OwnedEventId) -> Result {
self.write_str(&out).await
}
#[derive(Clone, Copy, Eq, PartialEq)]
enum NodeStatus {
Normal(bool),
SoftFailed(bool),
Rejected(bool),
}
struct AuthChild {
node_id: String,
event_id: OwnedEventId,
depth: UInt,
ts: UInt,
first_seen: bool,
pdu: Option<PduEvent>,
}
fn render_node(
graph: &mut String,
node_id: &str,
event_id: &EventId,
status: NodeStatus,
) -> Result {
let evt_str = event_id.to_string();
let status_label = match status {
| NodeStatus::Normal(false) => evt_str,
| NodeStatus::Normal(true) => format!("{evt_str} (missing locally)"),
| NodeStatus::SoftFailed(false) => format!("{evt_str} (soft-failed)"),
| NodeStatus::SoftFailed(true) => format!("{evt_str} (soft-failed & missing locally)"),
| NodeStatus::Rejected(false) => format!("{evt_str} (rejected)"),
| NodeStatus::Rejected(true) => format!("{evt_str} (rejected & missing locally)"),
};
writeln!(graph, "{node_id}[\"{}\"]", status_label.as_str())?;
match status {
| NodeStatus::Rejected(_) => writeln!(graph, "class {node_id} rejected;")?,
| NodeStatus::SoftFailed(_) => writeln!(graph, "class {node_id} soft_failed;")?,
| NodeStatus::Normal(_) => {},
}
Ok(())
}
#[admin_command]
pub(super) async fn show_auth_chain(&self, event_id: OwnedEventId) -> Result {
let node_status = async |event_id: &EventId, missing: bool| -> NodeStatus {
if self
.services
.rooms
.pdu_metadata
.is_event_rejected(event_id)
.await
{
NodeStatus::Rejected(missing)
} else if self
.services
.rooms
.pdu_metadata
.is_event_soft_failed(event_id)
.await
{
NodeStatus::SoftFailed(missing)
} else {
NodeStatus::Normal(missing)
}
};
let Ok(root) = self.services.rooms.timeline.get_pdu(&event_id).await else {
return Err!("Event not found.");
};
let mut graph = String::from(
"```mermaid\n%% This is a mermaid graph. You can plug this output into\n\
%% https://mermaid.live/edit to visualise it on-the-fly.\nflowchart TD\n\
classDef rejected fill:#ffe5e5,stroke:#cc0000,stroke-width:2px,color:#000;\n\
classDef soft_failed fill:#fff6cc,stroke:#c9a400,stroke-width:2px,color:#000;\n"
);
let mut node_ids: HashMap<OwnedEventId, String> = HashMap::new();
let mut cached_events: HashMap<OwnedEventId, PduEvent> =
HashMap::from([(event_id.clone(), root.clone())]);
let mut scheduled: HashSet<OwnedEventId> = HashSet::from([event_id.clone()]);
let mut visited: HashSet<OwnedEventId> = HashSet::new();
let mut stack = vec![root];
let mut next_node_id = 0_usize;
let node_id_for = |event_id: &OwnedEventId,
node_ids: &mut HashMap<OwnedEventId, String>,
next_node_id: &mut usize| {
node_ids
.entry(event_id.clone())
.or_insert_with(|| {
let id = format!("n{}", *next_node_id);
*next_node_id = next_node_id.saturating_add(1);
id
})
.clone()
};
while let Some(event) = stack.pop() {
let current_event_id = event.event_id().to_owned();
if !visited.insert(current_event_id.clone()) {
continue;
}
let current_node_id = node_id_for(&current_event_id, &mut node_ids, &mut next_node_id);
let current_status = node_status(&current_event_id, false).await;
render_node(&mut graph, &current_node_id, &current_event_id, current_status)?;
let mut children = Vec::with_capacity(event.auth_events.len());
for auth_event_id in event.auth_events().rev() {
let auth_event_id = auth_event_id.to_owned();
let auth_node_id = node_id_for(&auth_event_id, &mut node_ids, &mut next_node_id);
writeln!(graph, "{current_node_id} --> {auth_node_id}")?;
let first_seen = scheduled.insert(auth_event_id.clone());
let auth_pdu = if let Some(auth_pdu) = cached_events.get(&auth_event_id) {
// NOTE: events might be referenced multiple times (like the create event)
// so this saves some cheeky db lookup time
Some(auth_pdu.clone())
} else if first_seen {
match self.services.rooms.timeline.get_pdu(&auth_event_id).await {
| Ok(auth_event) => {
cached_events.insert(auth_event_id.clone(), auth_event.clone());
Some(auth_event)
},
| Err(_) => None,
}
} else {
None
};
// NOTE: Depth is used as the primary sorting key here, even though it has no
// bearing on state resolution or anything. Timestamp is used as a
// tiebreaker, failing back to lexicographical comparison.
let (depth, ts) = auth_pdu
.as_ref()
.map_or((UInt::MAX, UInt::MAX), |pdu| (pdu.depth, pdu.origin_server_ts));
children.push(AuthChild {
node_id: auth_node_id,
event_id: auth_event_id,
depth,
ts,
first_seen,
pdu: auth_pdu,
});
}
children.sort_by(|a, b| {
a.depth
.cmp(&b.depth)
.then(a.ts.cmp(&b.ts))
.then(a.event_id.as_str().cmp(b.event_id.as_str()))
});
for child in children.into_iter().rev() {
if !child.first_seen {
continue;
}
if let Some(child_pdu) = child.pdu {
// We have this PDU so will want to traverse it.
stack.push(child_pdu);
} else {
// We don't have this PDU locally so we can't traverse its auth events,
// but we can still render it as a node.
render_node(
&mut graph,
&child.node_id,
&child.event_id,
node_status(&child.event_id, true).await,
)?;
}
}
}
graph.push_str("```\n");
self.write_str(&graph).await
}
#[admin_command]
pub(super) async fn parse_pdu(&self) -> Result {
if self.body.len() < 2
@@ -111,15 +294,31 @@ pub(super) async fn get_pdu(&self, event_id: OwnedEventId) -> Result {
outlier = true;
pdu_json = self.services.rooms.timeline.get_pdu_json(&event_id).await;
}
let rejected = self
.services
.rooms
.pdu_metadata
.is_event_rejected(&event_id)
.await;
let soft_failed = self
.services
.rooms
.pdu_metadata
.is_event_soft_failed(&event_id)
.await;
match pdu_json {
| Err(_) => return Err!("PDU not found locally."),
| Ok(json) => {
let text = serde_json::to_string_pretty(&json)?;
let msg = if outlier {
"Outlier (Rejected / Soft Failed) PDU found in our database"
let msg = if rejected {
"Rejected PDU:"
} else if soft_failed {
"Soft-failed PDU:"
} else if outlier {
"Outlier PDU:"
} else {
"PDU found in our database"
"PDU:"
};
write!(self, "{msg}\n```json\n{text}\n```")
},
@@ -614,6 +813,10 @@ pub(super) async fn force_set_room_state_from_server(
.await;
state.insert(shortstatekey, pdu.event_id.clone());
self.services
.rooms
.pdu_metadata
.clear_pdu_markers(pdu.event_id());
}
}
@@ -631,6 +834,10 @@ pub(super) async fn force_set_room_state_from_server(
.rooms
.outlier
.add_pdu_outlier(&event_id, &value);
self.services
.rooms
.pdu_metadata
.clear_pdu_markers(&event_id);
}
info!("Resolving new room state");
@@ -662,10 +869,7 @@ pub(super) async fn force_set_room_state_from_server(
.force_state(room_id.clone().as_ref(), short_state_hash, added, removed, &state_lock)
.await?;
info!(
"Updating joined counts for room just in case (e.g. we may have found a difference in \
the room's m.room.member state"
);
info!("Updating joined counts for room");
self.services
.rooms
.state_cache
+10 -1
View File
@@ -17,12 +17,21 @@ pub enum DebugCommand {
message: Vec<String>,
},
/// Get the auth_chain of a PDU
/// Loads the auth_chain of a PDU, reporting how long it took.
GetAuthChain {
/// An event ID (the $ character followed by the base64 reference hash)
event_id: OwnedEventId,
},
/// Walks & displays the auth_chain of a PDU in a mermaid graph format.
///
/// This is useless to basically anyone but developers, and is also probably
/// slow and memory hungry.
ShowAuthChain {
/// The root event ID to start walking back from.
event_id: OwnedEventId,
},
/// Parse and print a PDU from a JSON
///
/// The PDU event is only checked for validity and is not added to the
-4
View File
@@ -102,10 +102,6 @@ pub(super) async fn remote_user_in_rooms(&self, user_id: OwnedUserId) -> Result
);
}
if !self.services.users.exists(&user_id).await {
return Err!("Remote user does not exist in our database.",);
}
let mut rooms: Vec<(OwnedRoomId, u64, String)> = self
.services
.rooms
+13 -3
View File
@@ -1,7 +1,8 @@
use clap::Subcommand;
use conduwuit::Result;
use conduwuit_database::Deserialized as _;
use futures::StreamExt;
use ruma::{OwnedRoomId, OwnedUserId};
use ruma::{OwnedRoomId, OwnedUserId, exports::serde::Serialize};
use crate::{admin_command, admin_command_dispatch};
@@ -58,13 +59,22 @@ async fn account_data_get(
room_id: Option<OwnedRoomId>,
) -> Result {
let timer = tokio::time::Instant::now();
let results = self
let result = self
.services
.account_data
.get_raw(room_id.as_deref(), &user_id, &kind)
.await;
let query_time = timer.elapsed();
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"))
let json = serde_json::to_string_pretty(&match room_id {
| None => result
.deserialized::<ruma::serde::Raw<ruma::events::AnyGlobalAccountDataEvent>>()?
.serialize(serde_json::value::Serializer)?,
| Some(_) => result
.deserialized::<ruma::serde::Raw<ruma::events::AnyRoomAccountDataEvent>>()?
.serialize(serde_json::value::Serializer)?,
})?;
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{json}\n```"))
.await
}
-14
View File
@@ -15,10 +15,6 @@ pub enum UsersCommand {
IterUsers2,
PasswordHash {
user_id: OwnedUserId,
},
ListDevices {
user_id: OwnedUserId,
},
@@ -235,16 +231,6 @@ async fn count_users(&self) -> Result {
.await
}
#[admin_command]
async fn password_hash(&self, user_id: OwnedUserId) -> Result {
let timer = tokio::time::Instant::now();
let result = self.services.users.password_hash(&user_id).await;
let query_time = timer.elapsed();
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"))
.await
}
#[admin_command]
async fn list_devices(&self, user_id: OwnedUserId) -> Result {
let timer = tokio::time::Instant::now();
+183 -1
View File
@@ -1,6 +1,6 @@
use conduwuit::{Err, Result};
use futures::StreamExt;
use ruma::OwnedRoomId;
use ruma::{OwnedRoomId, OwnedRoomOrAliasId};
use crate::{PAGE_SIZE, admin_command, get_room_info};
@@ -82,3 +82,185 @@ pub(super) async fn exists(&self, room_id: OwnedRoomId) -> Result {
self.write_str(&format!("{result}")).await
}
#[admin_command]
pub(super) async fn purge_sync_tokens(&self, room: OwnedRoomOrAliasId) -> Result {
// Resolve the room ID from the room or alias ID
let room_id = self.services.rooms.alias.resolve(&room).await?;
// Delete all tokens for this room using the service method
let Ok(deleted_count) = self.services.rooms.user.delete_room_tokens(&room_id).await else {
return Err!("Failed to delete sync tokens for room {}", room_id.as_str());
};
self.write_str(&format!(
"Successfully deleted {deleted_count} sync tokens for room {}",
room_id.as_str()
))
.await
}
/// Target options for room purging
#[derive(Default, Debug, clap::ValueEnum, Clone)]
pub enum RoomTargetOption {
#[default]
/// Target all rooms
All,
/// Target only disabled rooms
DisabledOnly,
/// Target only banned rooms
BannedOnly,
}
#[admin_command]
pub(super) async fn purge_all_sync_tokens(
&self,
target_option: Option<RoomTargetOption>,
execute: bool,
) -> Result {
use conduwuit::{debug, info};
let mode = if !execute { "Simulating" } else { "Starting" };
// strictly, we should check if these reach the max value after the loop and
// warn the user that the count is too large
let mut total_rooms_checked: usize = 0;
let mut total_tokens_deleted: usize = 0;
let mut error_count: u32 = 0;
let mut skipped_rooms: usize = 0;
info!("{} purge of sync tokens", mode);
// Get all rooms in the server
let all_rooms = self
.services
.rooms
.metadata
.iter_ids()
.collect::<Vec<_>>()
.await;
info!("Found {} rooms total on the server", all_rooms.len());
// Filter rooms based on options
let mut rooms = Vec::new();
for room_id in all_rooms {
if let Some(target) = &target_option {
match target {
| RoomTargetOption::DisabledOnly => {
if !self.services.rooms.metadata.is_disabled(&room_id).await {
debug!("Skipping room {} as it's not disabled", room_id.as_str());
skipped_rooms = skipped_rooms.saturating_add(1);
continue;
}
},
| RoomTargetOption::BannedOnly => {
if !self.services.rooms.metadata.is_banned(&room_id).await {
debug!("Skipping room {} as it's not banned", room_id.as_str());
skipped_rooms = skipped_rooms.saturating_add(1);
continue;
}
},
| RoomTargetOption::All => {},
}
}
rooms.push(room_id);
}
// Total number of rooms we'll be checking
let total_rooms = rooms.len();
info!(
"Processing {} rooms after filtering (skipped {} rooms)",
total_rooms, skipped_rooms
);
// Process each room
for room_id in rooms {
total_rooms_checked = total_rooms_checked.saturating_add(1);
// Log progress periodically
if total_rooms_checked.is_multiple_of(100) || total_rooms_checked == total_rooms {
info!(
"Progress: {}/{} rooms checked, {} tokens {}",
total_rooms_checked,
total_rooms,
total_tokens_deleted,
if !execute { "would be deleted" } else { "deleted" }
);
}
// In dry run mode, just count what would be deleted, don't actually delete
debug!(
"Room {}: {}",
room_id.as_str(),
if !execute {
"would purge sync tokens"
} else {
"purging sync tokens"
}
);
if !execute {
// For dry run mode, count tokens without deleting
match self.services.rooms.user.count_room_tokens(&room_id).await {
| Ok(count) =>
if count > 0 {
debug!(
"Would delete {} sync tokens for room {}",
count,
room_id.as_str()
);
total_tokens_deleted = total_tokens_deleted.saturating_add(count);
} else {
debug!("No sync tokens found for room {}", room_id.as_str());
},
| Err(e) => {
debug!("Error counting sync tokens for room {}: {:?}", room_id.as_str(), e);
error_count = error_count.saturating_add(1);
},
}
} else {
// Real deletion mode
match self.services.rooms.user.delete_room_tokens(&room_id).await {
| Ok(count) =>
if count > 0 {
debug!("Deleted {} sync tokens for room {}", count, room_id.as_str());
total_tokens_deleted = total_tokens_deleted.saturating_add(count);
} else {
debug!("No sync tokens found for room {}", room_id.as_str());
},
| Err(e) => {
debug!("Error purging sync tokens for room {}: {:?}", room_id.as_str(), e);
error_count = error_count.saturating_add(1);
},
}
}
}
let action = if !execute { "would be deleted" } else { "deleted" };
info!(
"Finished {}: checked {} rooms out of {} total, {} tokens {}, errors: {}",
if !execute {
"purge simulation"
} else {
"purging sync tokens"
},
total_rooms_checked,
total_rooms,
total_tokens_deleted,
action,
error_count
);
self.write_str(&format!(
"Finished {}: checked {} rooms out of {} total, {} tokens {}, errors: {}",
if !execute { "simulation" } else { "purging sync tokens" },
total_rooms_checked,
total_rooms,
total_tokens_deleted,
action,
error_count
))
.await
}
+23 -1
View File
@@ -5,8 +5,9 @@
mod moderation;
use clap::Subcommand;
use commands::RoomTargetOption;
use conduwuit::Result;
use ruma::OwnedRoomId;
use ruma::{OwnedRoomId, OwnedRoomOrAliasId};
use self::{
alias::RoomAliasCommand, directory::RoomDirectoryCommand, info::RoomInfoCommand,
@@ -60,4 +61,25 @@ pub enum RoomCommand {
Exists {
room_id: OwnedRoomId,
},
/// - Delete all sync tokens for a room
PurgeSyncTokens {
/// Room ID or alias to purge sync tokens for
#[arg(value_parser)]
room: OwnedRoomOrAliasId,
},
/// - Delete sync tokens for all rooms that have no local users
///
/// By default, processes all empty rooms.
PurgeAllSyncTokens {
/// Target specific room types
#[arg(long, value_enum)]
target_option: Option<RoomTargetOption>,
/// Execute token deletions. Otherwise,
/// Performs a dry run without actually deleting any tokens
#[arg(long)]
execute: bool,
},
}
+25 -2
View File
@@ -30,14 +30,37 @@ pub(super) async fn issue_token(&self, expires: super::TokenExpires) -> Result {
.issue_token(self.sender_or_service_user().into(), expires);
self.write_str(&format!(
"New registration token issued: `{token}`. {}.",
"New registration token issued: `{token}` . {}.",
if let Some(expires) = info.expires {
format!("{expires}")
} else {
"Never expires".to_owned()
}
))
.await
.await?;
if self
.services
.config
.oauth
.compatibility_mode
.oauth_available()
{
self.write_str(&format!(
"\nInvite link using this token: {}",
self.services
.config
.get_client_domain()
.join(&format!(
"{}/account/register/?flow=trusted&token={token}",
conduwuit::ROUTE_PREFIX
))
.unwrap()
))
.await?;
}
Ok(())
}
#[admin_command]
+149 -178
View File
@@ -1,14 +1,10 @@
use std::{
collections::{BTreeMap, HashSet},
fmt::Write as _,
};
use std::collections::{BTreeMap, HashSet};
use api::client::{
full_user_deactivate, join_room_by_id_helper, leave_room, recreate_push_rules_and_return,
remote_leave_room,
full_user_deactivate, leave_room, recreate_push_rules_and_return, remote_leave_room,
};
use conduwuit::{
Err, Result, debug_warn, error, info,
Err, Result, debug_warn, info,
matrix::{Event, pdu::PartialPdu},
utils::{self, ReadyExt},
warn,
@@ -24,6 +20,7 @@
tag::{TagEvent, TagEventContent, TagInfo},
},
};
use service::users::HashedPassword;
use crate::{
admin_command, get_room_info,
@@ -53,128 +50,22 @@ pub(super) async fn list_users(&self) -> Result {
#[admin_command]
pub(super) async fn create_user(&self, username: String, password: Option<String>) -> Result {
// Validate user id
let user_id = parse_local_user_id(self.services, &username)?;
if let Err(e) = user_id.validate_strict() {
if self.services.config.emergency_password.is_none() {
return Err!("Username {user_id} contains disallowed characters or spaces: {e}");
}
}
if self.services.users.exists(&user_id).await {
return Err!("User {user_id} already exists");
}
let password = password.unwrap_or_else(|| utils::random_string(AUTO_GEN_PASSWORD_LENGTH));
// Create user
self.services
.users
.create(&user_id, Some(password.as_str()), None)
.await?;
// Default to pretty displayname
let mut displayname = user_id.localpart().to_owned();
// If `new_user_displayname_suffix` is set, registration will push whatever
// content is set to the user's display name with a space before it
if !self
let user_id = self
.services
.server
.config
.new_user_displayname_suffix
.is_empty()
{
write!(displayname, " {}", self.services.server.config.new_user_displayname_suffix)?;
}
.users
.determine_registration_user_id(Some(username), None, None)
.await?;
let password = HashedPassword::new(
&password.unwrap_or_else(|| utils::random_string(AUTO_GEN_PASSWORD_LENGTH)),
)?;
self.services
.users
.set_displayname(&user_id, Some(displayname));
.create_local_account(&user_id, password, None)
.await;
// Initial account data
self.services
.account_data
.update(
None,
&user_id,
ruma::events::GlobalAccountDataEventType::PushRules
.to_string()
.into(),
&serde_json::to_value(ruma::events::push_rules::PushRulesEvent::new(
ruma::events::push_rules::PushRulesEventContent::new(
ruma::push::Ruleset::server_default(&user_id),
),
))
.unwrap(),
)
.await?;
if !self.services.server.config.auto_join_rooms.is_empty() {
for room in &self.services.server.config.auto_join_rooms {
let Ok(room_id) = self.services.rooms.alias.resolve(room).await else {
error!(
%user_id,
"Failed to resolve room alias to room ID when attempting to auto join {room}, skipping"
);
continue;
};
if !self
.services
.rooms
.state_cache
.server_in_room(self.services.globals.server_name(), &room_id)
.await
{
warn!(
"Skipping room {room} to automatically join as we have never joined before."
);
continue;
}
if let Some(room_server_name) = room.server_name() {
match join_room_by_id_helper(
self.services,
&user_id,
&room_id,
Some("Automatically joining this room upon registration".to_owned()),
&[
self.services.globals.server_name().to_owned(),
room_server_name.to_owned(),
],
&None,
)
.await
{
| Ok(_response) => {
info!("Automatically joined room {room} for user {user_id}");
},
| Err(e) => {
// don't return this error so we don't fail registrations
error!(
"Failed to automatically join room {room} for user {user_id}: {e}"
);
self.services
.admin
.send_text(&format!(
"Failed to automatically join room {room} for user {user_id}: \
{e}"
))
.await;
},
}
}
}
}
// we dont add a device since we're not the user, just the creator
// Make the first user to register an administrator and disable first-run mode.
self.services.firstrun.empower_first_user(&user_id).await?;
self.write_str(&format!("Created user with user_id: {user_id} and password: `{password}`"))
.await
self.write_str(&format!("Created user {user_id}")).await
}
#[admin_command]
@@ -274,17 +165,13 @@ pub(super) async fn reset_password(
let new_password = password.unwrap_or_else(|| utils::random_string(AUTO_GEN_PASSWORD_LENGTH));
match self
.services
self.services
.users
.set_password(&user_id, Some(new_password.as_str()))
.await
{
| Err(e) => return Err!("Couldn't reset the password for user {user_id}: {e}"),
| Ok(()) => {
write!(self, "Successfully reset the password for user {user_id}: `{new_password}`")
},
}
.set_password(&user_id, Some(HashedPassword::new(&new_password)?));
self.write_str(&format!(
"Successfully reset the password for user {user_id}: `{new_password}`"
))
.await?;
if logout {
@@ -304,31 +191,6 @@ pub(super) async fn reset_password(
Ok(())
}
#[admin_command]
pub(super) async fn issue_password_reset_link(&self, username: String) -> Result {
use conduwuit_service::password_reset::{PASSWORD_RESET_PATH, RESET_TOKEN_QUERY_PARAM};
self.bail_restricted()?;
let mut reset_url = self
.services
.config
.get_client_domain()
.join(PASSWORD_RESET_PATH)
.unwrap();
let user_id = parse_local_user_id(self.services, &username)?;
let token = self.services.password_reset.issue_token(user_id).await?;
reset_url
.query_pairs_mut()
.append_pair(RESET_TOKEN_QUERY_PARAM, &token.token);
self.write_str(&format!("Password reset link issued for {username}: {reset_url}"))
.await?;
Ok(())
}
#[admin_command]
pub(super) async fn deactivate_all(&self, no_leave_rooms: bool, force: bool) -> Result {
if self.body.len() < 2
@@ -431,6 +293,82 @@ pub(super) async fn deactivate_all(&self, no_leave_rooms: bool, force: bool) ->
.await
}
#[admin_command]
pub(super) async fn list_invited_rooms(&self, user_id: String) -> Result {
// Validate user id
let user_id = parse_local_user_id(self.services, &user_id)?;
let mut rooms: Vec<((OwnedRoomId, u64, String), Result<OwnedUserId>)> = self
.services
.rooms
.state_cache
.rooms_invited(&user_id)
.then(async |(room_id, _)| {
let sender = self
.services
.rooms
.state_cache
.invite_sender(&user_id, &room_id)
.await;
(get_room_info(self.services, &room_id).await, sender)
})
.collect()
.await;
if rooms.is_empty() {
return Err!("User is not invited to any rooms.");
}
rooms.sort_by_key(|r| r.0.1);
rooms.reverse();
let body = rooms
.iter()
.map(|((id, members, name), sender)| match sender {
| Ok(user_id) =>
format!("{id}\tInviter: {user_id}\tMembers: {members}\tName: {name}"),
| Err(_) => format!("{id}\tMembers: {members}\tName: {name}"),
})
.collect::<Vec<_>>()
.join("\n");
self.write_str(&format!("Rooms {user_id} is Invited to ({}):\n```\n{body}\n```", rooms.len()))
.await
}
#[admin_command]
pub(super) async fn reject_all_invites(&self, user_id: String) -> Result {
let user_id = parse_local_user_id(self.services, &user_id)?;
assert!(
self.services.globals.user_is_local(&user_id),
"Parsed user_id must be a local user"
);
let fails = self
.services
.rooms
.state_cache
.rooms_invited(&user_id)
.filter_map(async |(room_id, _)| {
match leave_room(self.services, &user_id, &room_id, None).await {
| Err(ref e) => {
warn!(%user_id, "Failed to leave {room_id}: {e}");
Some(())
},
| Ok(()) => None,
}
})
.count()
.await;
if fails > 0 {
return Err!("{fails} invites could not be rejected");
}
self.write_str("Successfully rejected all invites.").await
}
#[admin_command]
pub(super) async fn list_joined_rooms(&self, user_id: String) -> Result {
// Validate user id
@@ -556,15 +494,12 @@ pub(super) async fn force_join_list_of_local_users(
let mut successful_joins: usize = 0;
for user_id in user_ids {
match join_room_by_id_helper(
self.services,
&user_id,
&room_id,
Some(String::from(BULK_JOIN_REASON)),
&servers,
&None,
)
.await
match self
.services
.rooms
.membership
.join_room(&user_id, &room_id, Some(String::from(BULK_JOIN_REASON)), &servers)
.await
{
| Ok(_res) => {
successful_joins = successful_joins.saturating_add(1);
@@ -640,15 +575,12 @@ pub(super) async fn force_join_all_local_users(
.collect::<Vec<_>>()
.await
{
match join_room_by_id_helper(
self.services,
user_id,
&room_id,
Some(String::from(BULK_JOIN_REASON)),
&servers,
&None,
)
.await
match self
.services
.rooms
.membership
.join_room(user_id, &room_id, Some(String::from(BULK_JOIN_REASON)), &servers)
.await
{
| Ok(_res) => {
successful_joins = successful_joins.saturating_add(1);
@@ -685,7 +617,46 @@ pub(super) async fn force_join_room(
self.services.globals.user_is_local(&user_id),
"Parsed user_id must be a local user"
);
join_room_by_id_helper(self.services, &user_id, &room_id, None, &servers, &None).await?;
self.services
.rooms
.membership
.join_room(&user_id, &room_id, None, &servers)
.await?;
self.write_str(&format!("{user_id} has been joined to {room_id}."))
.await
}
#[admin_command]
pub(super) async fn force_join_room_remotely(
&self,
user_id: String,
room_id: OwnedRoomOrAliasId,
via: String,
) -> Result {
let via = ServerName::parse(&via)?;
let user_id = parse_local_user_id(self.services, &user_id)?;
let (room_id, mut servers) = self
.services
.rooms
.alias
.resolve_with_servers(&room_id, None)
.await?;
if servers.contains(&via) {
servers.retain(|n| *n != via);
}
servers.insert(0, via);
assert!(
self.services.globals.user_is_local(&user_id),
"Parsed user_id must be a local user"
);
let state_lock = self.services.rooms.state.mutex.lock(&*room_id).await;
self.services
.rooms
.membership
.join_remote_room(&user_id, &room_id, None, &servers, state_lock)
.await?;
self.write_str(&format!("{user_id} has been joined to {room_id}."))
.await
+25 -6
View File
@@ -29,12 +29,6 @@ pub enum UserCommand {
password: Option<String>,
},
/// Issue a self-service password reset link for a user.
IssuePasswordResetLink {
/// Username of the user who may use the link
username: String,
},
/// Get a user's associated email address.
GetEmail {
user_id: String,
@@ -160,6 +154,17 @@ pub enum UserCommand {
#[clap(alias = "list")]
ListUsers,
/// Lists all the rooms (local and remote) that the specified user is
/// invited to
ListInvitedRooms {
user_id: String,
},
/// Manually make a user reject all current invites
RejectAllInvites {
user_id: String,
},
/// Lists all the rooms (local and remote) that the specified user is
/// joined in
ListJoinedRooms {
@@ -172,6 +177,20 @@ pub enum UserCommand {
room_id: OwnedRoomOrAliasId,
},
/// Manually join a local user to a room via a remote server, regardless of
/// our current residency.
ForceJoinRoomRemotely {
/// The user to join
user_id: String,
/// The room to join
room_id: OwnedRoomOrAliasId,
/// The server name to join via.
///
/// This server will always be tried first, however if more are
/// available, they may be tried after.
via: String,
},
/// Manually leave a local user from a room.
ForceLeaveRoom {
user_id: String,
+1 -1
View File
@@ -48,7 +48,7 @@ pub(crate) fn parse_local_user_id(services: &Services, user_id: &str) -> Result<
Ok(user_id)
}
/// Parses user ID that is an active (not guest or deactivated) local user
/// Parses user ID that is an active (not deactivated) local user
pub(crate) async fn parse_active_local_user_id(
services: &Services,
user_id: &str,
+1 -3
View File
@@ -48,9 +48,6 @@ jemalloc_stats = [
"conduwuit-core/jemalloc_stats",
"conduwuit-service/jemalloc_stats",
]
ldap = [
"conduwuit-service/ldap"
]
release_max_log_level = [
"conduwuit-core/release_max_log_level",
"conduwuit-service/release_max_log_level",
@@ -77,6 +74,7 @@ conduwuit-macros.workspace = true
conduwuit-service.workspace = true
const-str.workspace = true
ctor.workspace = true
dtor.workspace = true
futures.workspace = true
hmac.workspace = true
http.workspace = true
+16 -20
View File
@@ -24,9 +24,9 @@
power_levels::RoomPowerLevelsEventContent,
},
};
use service::{mailer::messages, uiaa::Identity};
use service::{mailer::messages, uiaa::UiaaInitiator, users::HashedPassword};
use super::{DEVICE_ID_LENGTH, TOKEN_LENGTH, join_room_by_id_helper};
use super::{DEVICE_ID_LENGTH, TOKEN_LENGTH};
use crate::Ruma;
pub(crate) mod register;
@@ -121,7 +121,7 @@ pub(crate) async fn change_password_route(
&body.auth,
vec![AuthFlow::new(vec![AuthType::Password])],
Box::default(),
Some(Identity::from_user_id(user_id)),
Some(UiaaInitiator::new(user_id, body.sender_device())),
)
.await?
} else {
@@ -150,8 +150,7 @@ pub(crate) async fn change_password_route(
services
.users
.set_password(&sender_user, Some(&body.new_password))
.await?;
.set_password(&sender_user, Some(HashedPassword::new(&body.new_password)?));
if body.logout_devices {
// Logout all devices except the current one
@@ -188,7 +187,7 @@ pub(crate) async fn change_password_route(
if services.server.config.admin_room_notices {
services
.admin
.notice(&format!("User {} changed their password.", &sender_user))
.notice(&format!("User {sender_user} changed their password."))
.await;
}
@@ -239,19 +238,11 @@ pub(crate) async fn request_password_change_token_via_email_route(
///
/// Note: Also works for Application Services
pub(crate) async fn whoami_route(
State(services): State<crate::State>,
State(_): State<crate::State>,
body: Ruma<whoami::v3::Request>,
) -> Result<whoami::v3::Response> {
let is_guest = services
.users
.is_deactivated(body.sender_user())
.await
.map_err(|_| {
err!(Request(Forbidden("Application service has not registered this user.")))
})? && body.appservice_info.is_none();
Ok(assign!(whoami::v3::Response::new(body.sender_user().to_owned(), is_guest), {
device_id: body.sender_device.clone(),
Ok(assign!(whoami::v3::Response::new(body.sender_user().to_owned(), false), {
device_id: body.sender_device,
}))
}
@@ -279,10 +270,17 @@ pub(crate) async fn deactivate_route(
.as_ref()
.ok_or_else(|| err!(Request(MissingToken("Missing access token."))))?;
if !services.config.allow_deactivation {
return Err!(Request(Unauthorized(
"You may not deactivate your own account. Contact your server's administrator for \
assistance."
)));
}
// Prompt the user to confirm with their password using UIAA
let _ = services
.uiaa
.authenticate_password(&body.auth, Some(Identity::from_user_id(sender_user)))
.authenticate_password(&body.auth, sender_user, body.sender_device(), None)
.await?;
// Remove profile pictures and display name
@@ -331,8 +329,6 @@ pub(crate) async fn check_registration_token_validity(
/// Runs through all the deactivation steps:
///
/// - Mark as deactivated
/// - Removing display name
/// - Removing avatar URL and blurhash
/// - Removing all profile data
/// - Leaving all rooms (and forgets all of them)
pub async fn full_user_deactivate(
+63 -399
View File
@@ -1,40 +1,30 @@
use std::{collections::HashMap, fmt::Write};
use std::collections::HashMap;
use axum::extract::State;
use axum_client_ip::ClientIp;
use conduwuit::{
Err, Result, debug_info, error, info,
Err, Result, debug_info, info,
utils::{self},
warn,
};
use conduwuit_service::Services;
use futures::{FutureExt, StreamExt};
use futures::StreamExt;
use lettre::{Address, message::Mailbox};
use register::RegistrationKind;
use ruma::{
OwnedUserId, UserId,
api::client::{
account::{
register::{self, LoginType},
register::{self, LoginType, RegistrationKind},
request_registration_token_via_email,
},
uiaa::{AuthFlow, AuthType},
},
assign,
events::{
GlobalAccountDataEventType, push_rules::PushRulesEvent,
room::message::RoomMessageEventContent,
},
push,
};
use serde_json::value::RawValue;
use service::mailer::messages;
use service::{mailer::messages, users::HashedPassword};
use super::{DEVICE_ID_LENGTH, TOKEN_LENGTH, join_room_by_id_helper};
use super::{DEVICE_ID_LENGTH, TOKEN_LENGTH};
use crate::Ruma;
const RANDOM_USER_ID_LENGTH: usize = 10;
/// # `POST /_matrix/client/v3/register`
///
/// Register an account on this homeserver.
@@ -42,16 +32,6 @@
/// You can use [`GET
/// /_matrix/client/v3/register/available`](fn.get_register_available_route.
/// html) to check if the user id is valid and available.
///
/// - Only works if registration is enabled
/// - If type is guest: ignores all parameters except
/// initial_device_display_name
/// - If sender is not appservice: Requires UIAA (but we only use a dummy stage)
/// - If type is not guest and no username is given: Always fails after UIAA
/// check
/// - Creates a new account and populates it with default account data
/// - If `inhibit_login` is false: Creates a device and returns device id and
/// access_token
#[allow(clippy::doc_markdown)]
#[tracing::instrument(skip_all, fields(%client), name = "register", level = "info")]
pub(crate) async fn register_route(
@@ -59,8 +39,9 @@ pub(crate) async fn register_route(
ClientIp(client): ClientIp,
body: Ruma<register::v3::Request>,
) -> Result<register::v3::Response> {
let is_guest = body.kind == RegistrationKind::Guest;
let emergency_mode_enabled = services.config.emergency_password.is_some();
if body.kind != RegistrationKind::User {
return Err!(Request(GuestAccessForbidden("Guests may not register on this server.")));
}
// Allow registration if it's enabled in the config file or if this is the first
// run (so the first user account can be created)
@@ -68,161 +49,65 @@ pub(crate) async fn register_route(
services.config.allow_registration || services.firstrun.is_first_run();
if !allow_registration && body.appservice_info.is_none() {
match (body.username.as_ref(), body.initial_device_display_name.as_ref()) {
| (Some(username), Some(device_display_name)) => {
info!(
%is_guest,
user = %username,
device_name = %device_display_name,
"Rejecting registration attempt as registration is disabled"
);
},
| (Some(username), _) => {
info!(
%is_guest,
user = %username,
"Rejecting registration attempt as registration is disabled"
);
},
| (_, Some(device_display_name)) => {
info!(
%is_guest,
device_name = %device_display_name,
"Rejecting registration attempt as registration is disabled"
);
},
| (None, _) => {
info!(
%is_guest,
"Rejecting registration attempt as registration is disabled"
);
},
}
return Err!(Request(Forbidden(
"This server is not accepting registrations at this time."
)));
}
if is_guest && !services.config.allow_guest_registration {
info!(
"Guest registration disabled, rejecting guest registration attempt, initial device \
name: \"{}\"",
body.initial_device_display_name.as_deref().unwrap_or("")
?body.username,
?body.initial_device_display_name,
"Rejecting registration attempt as registration is disabled"
);
return Err!(Request(GuestAccessForbidden("Guest registration is disabled.")));
}
// forbid guests from registering if there is not a real admin user yet. give
// generic user error.
if is_guest && services.firstrun.is_first_run() {
warn!(
"Guest account attempted to register before a real admin user has been registered, \
rejecting registration. Guest's initial device name: \"{}\"",
body.initial_device_display_name.as_deref().unwrap_or("")
);
return Err!(Request(Forbidden(
"This server is not accepting registrations at this time."
)));
}
// Appeservices and guests get to skip auth
let skip_auth = body.appservice_info.is_some() || is_guest;
let user_id = if body.body.login_type == Some(LoginType::ApplicationService) {
let Some(appservice_info) = &body.appservice_info else {
return Err!(Request(Forbidden(
"Only appservices can use the appservice login type."
)));
};
let identity = if skip_auth {
// Appservices and guests have no identity
None
let user_id = services
.users
.determine_registration_user_id(body.username.clone(), None, Some(appservice_info))
.await?;
services.users.create(&user_id, None).await?;
user_id
} else {
// Perform UIAA to determine the user's identity
let (flows, params) = create_registration_uiaa_session(&services).await?;
Some(
services
.uiaa
.authenticate(&body.auth, flows, params, None)
.await?,
)
let identity = services
.uiaa
.authenticate(&body.auth, flows, params, None)
.await?;
let password = if let Some(password) = &body.password {
HashedPassword::new(password)?
} else {
return Err!(Request(InvalidParam("A password must be provided.")));
};
let user_id = services
.users
.determine_registration_user_id(body.username.clone(), identity.email.as_ref(), None)
.await?;
services
.users
.create_local_account(&user_id, password, identity.email)
.await;
user_id
};
// If the user didn't supply a username but did supply an email, use
// the email's user as their initial localpart to avoid falling back to
// a randomly generated localpart
let supplied_username = body.username.clone().or_else(|| {
if let Some(identity) = &identity
&& let Some(email) = &identity.email
{
Some(email.user().to_owned())
} else {
None
}
});
let user_id = determine_registration_user_id(
&services,
supplied_username,
is_guest,
emergency_mode_enabled,
)
.await?;
if body.body.login_type == Some(LoginType::ApplicationService) {
// For appservice logins, make sure that the user ID is in the appservice's
// namespace
match body.appservice_info {
| Some(ref info) =>
if !info.is_user_match(&user_id) && !emergency_mode_enabled {
return Err!(Request(Exclusive(
"Username is not in an appservice namespace."
)));
},
| _ => {
return Err!(Request(MissingToken("Missing appservice token.")));
},
}
} else if services.appservice.is_exclusive_user_id(&user_id).await && !emergency_mode_enabled
{
// For non-appservice logins, ban user IDs which are in an appservice's
// namespace (unless emergency mode is enabled)
return Err!(Request(Exclusive("Username is reserved by an appservice.")));
}
let password = if is_guest { None } else { body.password.as_deref() };
// Create user
services.users.create(&user_id, password, None).await?;
// Set an initial display name
let mut displayname = user_id.localpart().to_owned();
// Apply the new user displayname suffix, if it's set
if !services.globals.new_user_displayname_suffix().is_empty()
&& body.appservice_info.is_none()
{
write!(displayname, " {}", services.server.config.new_user_displayname_suffix)?;
}
services
.users
.set_displayname(&user_id, Some(displayname.clone()));
// Initial account data
services
.account_data
.update(
None,
&user_id,
GlobalAccountDataEventType::PushRules.to_string().into(),
&serde_json::to_value(PushRulesEvent::new(
push::Ruleset::server_default(&user_id).into(),
))
.expect("should be able to serialize push rules"),
)
.await?;
// Generate new device id if the user didn't specify one
let (token, device) = if !body.inhibit_login {
let device_id = if is_guest { None } else { body.device_id.clone() }
// Generate new device id if the user didn't specify one
let device_id = body
.device_id
.clone()
.unwrap_or_else(|| utils::random_string(DEVICE_ID_LENGTH).into());
// Generate new token for the device
@@ -235,6 +120,7 @@ pub(crate) async fn register_route(
&user_id,
&device_id,
&new_token,
None,
body.initial_device_display_name.clone(),
Some(client.to_string()),
)
@@ -245,151 +131,7 @@ pub(crate) async fn register_route(
(None, None)
};
debug_info!(%user_id, ?device, "User account was created");
// If the user registered with an email, associate it with their account.
if let Some(identity) = identity
&& let Some(email) = identity.email
{
// This may fail if the email is already in use, but we already check for that
// in `/requestToken`, so ignoring the error is acceptable here in the rare case
// that an email is sniped by another user between the `/requestToken` request
// and the `/register` request.
let _ = services
.threepid
.associate_localpart_email(user_id.localpart(), &email)
.await;
}
let device_display_name = body.initial_device_display_name.as_deref().unwrap_or("");
// log in conduit admin channel if a non-guest user registered
if body.appservice_info.is_none() && !is_guest {
if !device_display_name.is_empty() {
let notice = format!(
"New user \"{user_id}\" registered on this server from IP {client} and device \
display name \"{device_display_name}\""
);
info!("{notice}");
if services.server.config.admin_room_notices {
services.admin.notice(&notice).await;
}
} else {
let notice = format!("New user \"{user_id}\" registered on this server.");
info!("{notice}");
if services.server.config.admin_room_notices {
services.admin.notice(&notice).await;
}
}
}
// log in conduit admin channel if a guest registered
if body.appservice_info.is_none() && is_guest && services.config.log_guest_registrations {
debug_info!("New guest user \"{user_id}\" registered on this server.");
if !device_display_name.is_empty() {
if services.server.config.admin_room_notices {
services
.admin
.notice(&format!(
"Guest user \"{user_id}\" with device display name \
\"{device_display_name}\" registered on this server from IP {client}"
))
.await;
}
} else {
#[allow(clippy::collapsible_else_if)]
if services.server.config.admin_room_notices {
services
.admin
.notice(&format!(
"Guest user \"{user_id}\" with no device display name registered on \
this server from IP {client}",
))
.await;
}
}
}
if !is_guest {
// Make the first user to register an administrator and disable first-run mode.
let was_first_user = services.firstrun.empower_first_user(&user_id).await?;
// If the registering user was not the first and we're suspending users on
// register, suspend them.
if !was_first_user && services.config.suspend_on_register {
// Note that we can still do auto joins for suspended users
services
.users
.suspend_account(&user_id, &services.globals.server_user)
.await;
// And send an @room notice to the admin room, to prompt admins to review the
// new user and ideally unsuspend them if deemed appropriate.
if services.server.config.admin_room_notices {
services
.admin
.send_loud_message(RoomMessageEventContent::text_plain(format!(
"User {user_id} has been suspended as they are not the first user on \
this server. Please review and unsuspend them if appropriate."
)))
.await
.ok();
}
}
}
if body.appservice_info.is_none()
&& !services.server.config.auto_join_rooms.is_empty()
&& (services.config.allow_guests_auto_join_rooms || !is_guest)
{
for room in &services.server.config.auto_join_rooms {
let Ok(room_id) = services.rooms.alias.resolve(room).await else {
error!(
"Failed to resolve room alias to room ID when attempting to auto join \
{room}, skipping"
);
continue;
};
if !services
.rooms
.state_cache
.server_in_room(services.globals.server_name(), &room_id)
.await
{
warn!(
"Skipping room {room} to automatically join as we have never joined before."
);
continue;
}
if let Some(room_server_name) = room.server_name() {
match join_room_by_id_helper(
&services,
&user_id,
&room_id,
Some("Automatically joining this room upon registration".to_owned()),
&[services.globals.server_name().to_owned(), room_server_name.to_owned()],
&body.appservice_info,
)
.boxed()
.await
{
| Err(e) => {
// don't return this error so we don't fail registrations
error!(
"Failed to automatically join room {room} for user {user_id}: {e}"
);
},
| _ => {
info!("Automatically joined room {room} for user {user_id}");
},
}
}
}
}
debug_info!(%user_id, ?device, "New account created via legacy registration");
Ok(assign!(register::v3::Response::new(user_id), {
access_token: token,
@@ -461,21 +203,21 @@ async fn create_registration_uiaa_session(
// Require all users to agree to the terms and conditions, if configured
let terms = &services.config.registration_terms;
if !terms.is_empty() {
let mut terms =
serde_json::to_value(terms.clone()).expect("failed to serialize terms");
if !terms.documents.is_empty() {
let mut terms_map = HashMap::new();
// Insert a dummy `version` field
for (_, documents) in terms.as_object_mut().unwrap() {
let documents = documents.as_object_mut().unwrap();
documents.insert("version".to_owned(), "latest".into());
for (id, document) in &terms.documents {
terms_map.insert(id.to_owned(), serde_json::json!({
terms.language.clone(): serde_json::to_value(document).expect("should be able to serialize document")
}));
}
terms_map.insert("version".to_owned(), "latest".into());
params.insert(
AuthType::Terms.as_str().to_owned(),
serde_json::json!({
"policies": terms,
"policies": terms_map,
}),
);
@@ -508,84 +250,6 @@ async fn create_registration_uiaa_session(
Ok((flows, params))
}
async fn determine_registration_user_id(
services: &Services,
supplied_username: Option<String>,
is_guest: bool,
emergency_mode_enabled: bool,
) -> Result<OwnedUserId> {
if let Some(supplied_username) = supplied_username
&& !is_guest
{
// The user gets to pick their username. Do some validation to make sure it's
// acceptable.
// Don't allow registration with forbidden usernames.
if services
.globals
.forbidden_usernames()
.is_match(&supplied_username)
&& !emergency_mode_enabled
{
return Err!(Request(Forbidden("Username is forbidden")));
}
// Create and validate the user ID
let user_id = match UserId::parse_with_server_name(
&supplied_username,
services.globals.server_name(),
) {
| Ok(user_id) => {
if let Err(e) = user_id.validate_strict() {
// Unless we are in emergency mode, we should follow synapse's behaviour on
// not allowing things like spaces and UTF-8 characters in usernames
if !emergency_mode_enabled {
return Err!(Request(InvalidUsername(debug_warn!(
"Username {supplied_username} contains disallowed characters or \
spaces: {e}"
))));
}
}
// Don't allow registration with user IDs that aren't local
if !services.globals.user_is_local(&user_id) {
return Err!(Request(InvalidUsername(
"Username {supplied_username} is not local to this server"
)));
}
user_id
},
| Err(e) => {
return Err!(Request(InvalidUsername(debug_warn!(
"Username {supplied_username} is not valid: {e}"
))));
},
};
if services.users.exists(&user_id).await {
return Err!(Request(UserInUse("User ID is not available.")));
}
Ok(user_id)
} else {
// The user is a guest or didn't specify a username. Generate a username for
// them.
loop {
let user_id = UserId::parse_with_server_name(
utils::random_string(RANDOM_USER_ID_LENGTH).to_lowercase(),
services.globals.server_name(),
)
.unwrap();
if !services.users.exists(&user_id).await {
break Ok(user_id);
}
}
}
}
/// # `POST /_matrix/client/v3/register/email/requestToken`
///
/// Requests a validation email for the purpose of registering a new account.
+5 -4
View File
@@ -11,7 +11,7 @@
},
thirdparty::{Medium, ThirdPartyIdentifierInit},
};
use service::{mailer::messages, uiaa::Identity};
use service::mailer::messages;
use crate::Ruma;
@@ -116,14 +116,15 @@ pub(crate) async fn add_3pid_route(
// Require password auth to add an email
let _ = services
.uiaa
.authenticate_password(&body.auth, Some(Identity::from_user_id(sender_user)))
.authenticate_password(&body.auth, sender_user, body.sender_device(), None)
.await?;
let email = services
.threepid
.consume_valid_session(&body.sid, &body.client_secret)
.get_valid_session(&body.sid, &body.client_secret)
.await
.map_err(|message| err!(Request(ThreepidAuthFailed("{message}"))))?;
.map_err(|message| err!(Request(ThreepidAuthFailed("{message}"))))?
.consume();
services
.threepid
+3 -3
View File
@@ -8,7 +8,6 @@
self, delete_device, delete_devices, get_device, get_devices, update_device,
},
};
use service::uiaa::Identity;
use crate::{Ruma, client::DEVICE_ID_LENGTH};
@@ -95,6 +94,7 @@ pub(crate) async fn update_device_route(
&device_id,
&appservice.registration.as_token,
None,
None,
Some(client.to_string()),
)
.await?;
@@ -126,7 +126,7 @@ pub(crate) async fn delete_device_route(
// Prompt the user to confirm with their password using UIAA
let _ = services
.uiaa
.authenticate_password(&body.auth, Some(Identity::from_user_id(sender_user)))
.authenticate_password(&body.auth, sender_user, body.sender_device(), None)
.await?;
}
@@ -162,7 +162,7 @@ pub(crate) async fn delete_devices_route(
// Prompt the user to confirm with their password using UIAA
let _ = services
.uiaa
.authenticate_password(&body.auth, Some(Identity::from_user_id(sender_user)))
.authenticate_password(&body.auth, sender_user, body.sender_device(), None)
.await?;
}
-10
View File
@@ -122,16 +122,6 @@ pub(crate) async fn set_room_visibility_route(
return Err!(Request(UserSuspended("You cannot perform this action while suspended.")));
}
if services
.users
.is_deactivated(sender_user)
.await
.unwrap_or(false)
&& body.appservice_info.is_none()
{
return Err!(Request(Forbidden("Guests cannot publish to room directories")));
}
if !user_can_publish_room(&services, sender_user, &body.room_id).await? {
return Err!(Request(Forbidden("User is not allowed to publish this room")));
}
+28 -2
View File
@@ -26,7 +26,7 @@
serde::Raw,
};
use serde_json::json;
use service::uiaa::Identity;
use service::oauth::OAuthTicket;
use crate::Ruma;
@@ -64,6 +64,27 @@ pub(crate) async fn upload_keys_route(
.await?;
}
for (key_id, fallback_key) in &body.fallback_keys {
if fallback_key
.deserialize()
.inspect_err(|e| {
debug_warn!(
%key_id,
?fallback_key,
"Invalid one time key JSON submitted by client, skipping: {e}"
);
})
.is_err()
{
continue;
}
services
.users
.add_fallback_key(sender_user, sender_device, key_id, fallback_key, false)
.await?;
}
if let Some(device_keys) = &body.device_keys {
let deser_device_keys = device_keys.deserialize().map_err(|e| {
err!(Request(BadJson(debug_warn!(
@@ -183,7 +204,12 @@ pub(crate) async fn upload_signing_keys_route(
{
let _ = services
.uiaa
.authenticate_password(&body.auth, Some(Identity::from_user_id(sender_user)))
.authenticate_password(
&body.auth,
sender_user,
body.sender_device(),
Some(OAuthTicket::CrossSigningReset),
)
.await?;
}
+1 -12
View File
@@ -21,7 +21,6 @@
},
media::create_content,
},
assign,
};
use service::media::mxc::Mxc;
@@ -76,17 +75,7 @@ pub(crate) async fn create_content_route(
return Err!(Request(Unknown("Failed to save uploaded media")));
}
let blurhash = body.generate_blurhash.then(|| {
services
.media
.create_blurhash(&body.file, content_type, filename)
.ok()
.flatten()
});
Ok(assign!(create_content::v3::Response::new(mxc.to_string().into()), {
blurhash: blurhash.flatten(),
}))
Ok(create_content::v3::Response::new(mxc.to_string().into()))
}
/// # `GET /_matrix/client/v1/media/thumbnail/{serverName}/{mediaId}`
-1
View File
@@ -247,7 +247,6 @@ pub(crate) async fn invite_helper(
let mut content = RoomMemberEventContent::new(MembershipState::Invite);
content.displayname = services.users.displayname(recipient_user).await.ok();
content.avatar_url = services.users.avatar_url(recipient_user).await.ok();
content.blurhash = services.users.blurhash(recipient_user).await.ok();
content.is_direct = Some(is_direct);
content.reason = reason;
+19 -698
View File
@@ -1,58 +1,18 @@
use std::{borrow::Borrow, collections::HashMap, iter::once, sync::Arc};
use axum::extract::State;
use axum_client_ip::ClientIp;
use conduwuit::{
Err, Result, debug, debug_info, debug_warn, err, error, info, is_true,
matrix::{
StateKey,
event::{gen_event_id, gen_event_id_canonical_json},
pdu::{PartialPdu, PduEvent},
state_res,
},
Err, Result, debug,
result::FlatOk,
trace,
utils::{
self, shuffle,
stream::{IterStream, ReadyExt},
to_canonical_object,
},
warn,
utils::{shuffle, stream::IterStream},
};
use futures::{FutureExt, StreamExt, TryFutureExt};
use futures::{FutureExt, StreamExt};
use ruma::{
CanonicalJsonObject, CanonicalJsonValue, OwnedRoomId, OwnedServerName, OwnedUserId, RoomId,
RoomVersionId, UserId,
api::{
client::membership::{join_room_by_id, join_room_by_id_or_alias},
error::{ErrorKind, IncompatibleRoomVersionErrorData},
federation::{self},
},
canonical_json::to_canonical_value,
events::{
StateEventType,
room::{
join_rules::JoinRule,
member::{MembershipState, RoomMemberEventContent},
},
},
OwnedRoomId, OwnedServerName, OwnedUserId, UserId,
api::client::membership::{join_room_by_id, join_room_by_id_or_alias},
};
use service::{
Services,
appservice::RegistrationInfo,
rooms::{
state::RoomMutexGuard,
state_compressor::{CompressedState, HashSetCompressStateEvent},
timeline::pdu_fits,
},
};
use tokio::join;
use super::{banned_room_check, validate_remote_member_event_stub};
use crate::{
Ruma,
server::{select_authorising_user, user_can_perform_restricted_join},
};
use super::banned_room_check;
use crate::Ruma;
/// # `POST /_matrix/client/r0/rooms/{roomId}/join`
///
@@ -112,16 +72,14 @@ pub(crate) async fn join_room_by_id_route(
shuffle(&mut servers);
let servers = deprioritize(servers, &services.config.deprioritize_joins_through_servers);
join_room_by_id_helper(
&services,
sender_user,
&body.room_id,
body.reason.clone(),
&servers,
&body.appservice_info,
)
.boxed()
.await
let room_id = services
.rooms
.membership
.join_room(sender_user, &body.room_id, body.reason.clone(), &servers)
.boxed()
.await?;
Ok(join_room_by_id::v3::Response::new(room_id))
}
/// # `POST /_matrix/client/r0/join/{roomIdOrAlias}`
@@ -140,7 +98,6 @@ pub(crate) async fn join_room_by_id_or_alias_route(
body: Ruma<join_room_by_id_or_alias::v3::Request>,
) -> Result<join_room_by_id_or_alias::v3::Response> {
let sender_user = body.sender_user();
let appservice_info = &body.appservice_info;
let body = &body.body;
if services.users.is_suspended(sender_user).await? {
return Err!(Request(UserSuspended("You cannot perform this action while suspended.")));
@@ -235,650 +192,14 @@ pub(crate) async fn join_room_by_id_or_alias_route(
};
let servers = deprioritize(servers, &services.config.deprioritize_joins_through_servers);
let join_room_response = join_room_by_id_helper(
&services,
sender_user,
&room_id,
body.reason.clone(),
&servers,
appservice_info,
)
.boxed()
.await?;
Ok(join_room_by_id_or_alias::v3::Response::new(join_room_response.room_id))
}
pub async fn join_room_by_id_helper(
services: &Services,
sender_user: &UserId,
room_id: &RoomId,
reason: Option<String>,
servers: &[OwnedServerName],
appservice_info: &Option<RegistrationInfo>,
) -> Result<join_room_by_id::v3::Response> {
let state_lock = services.rooms.state.mutex.lock(room_id).await;
let user_is_guest = services
.users
.is_deactivated(sender_user)
.await
.unwrap_or(false)
&& appservice_info.is_none();
if user_is_guest && !services.rooms.state_accessor.guest_can_join(room_id).await {
return Err!(Request(Forbidden("Guests are not allowed to join this room")));
}
if services
let room_id = services
.rooms
.state_cache
.is_joined(sender_user, room_id)
.await
{
debug_warn!("{sender_user} is already joined in {room_id}");
return Ok(join_room_by_id::v3::Response::new(room_id.to_owned()));
}
if let Err(e) = services
.antispam
.user_may_join_room(
sender_user.to_owned(),
room_id.to_owned(),
services
.rooms
.state_cache
.is_invited(sender_user, room_id)
.await,
)
.await
{
warn!("Antispam prevented user {} from joining room {}: {}", sender_user, room_id, e);
return Err!(Request(Forbidden("You are not allowed to join this room.")));
}
let server_in_room = services
.rooms
.state_cache
.server_in_room(services.globals.server_name(), room_id)
.await;
// Only check our known membership if we're already in the room.
// See: https://forgejo.ellis.link/continuwuation/continuwuity/issues/855
let membership = if server_in_room {
services
.rooms
.state_accessor
.get_member(room_id, sender_user)
.await
} else {
debug!("Ignoring local state for join {room_id}, we aren't in the room yet.");
Ok(RoomMemberEventContent::new(MembershipState::Leave))
};
if let Ok(m) = membership {
if m.membership == MembershipState::Ban {
debug_warn!("{sender_user} is banned from {room_id} but attempted to join");
// TODO: return reason
return Err!(Request(Forbidden("You are banned from the room.")));
}
}
if !server_in_room && servers.is_empty() {
return Err!(Request(NotFound(
"No servers were provided to assist in joining the room remotely, and we are not \
already participating in the room."
)));
}
if services.antispam.check_all_joins() {
if let Err(e) = services
.antispam
.meowlnir_accept_make_join(room_id.to_owned(), sender_user.to_owned())
.await
{
warn!("Antispam prevented user {} from joining room {}: {}", sender_user, room_id, e);
return Err!(Request(Forbidden("Antispam rejected join request.")));
}
}
if server_in_room {
join_room_by_id_helper_local(services, sender_user, room_id, reason, servers, state_lock)
.boxed()
.await?;
} else {
// Ask a remote server if we are not participating in this room
join_room_by_id_helper_remote(
services,
sender_user,
room_id,
reason,
servers,
state_lock,
)
.membership
.join_room(sender_user, &room_id, body.reason.clone(), &servers)
.boxed()
.await?;
}
Ok(join_room_by_id::v3::Response::new(room_id.to_owned()))
}
#[tracing::instrument(skip_all, fields(%sender_user, %room_id), name = "join_remote", level = "info")]
async fn join_room_by_id_helper_remote(
services: &Services,
sender_user: &UserId,
room_id: &RoomId,
reason: Option<String>,
servers: &[OwnedServerName],
state_lock: RoomMutexGuard,
) -> Result {
info!("Joining {room_id} over federation.");
let (make_join_response, remote_server) =
make_join_request(services, sender_user, room_id, servers).await?;
info!("make_join finished");
let room_version = make_join_response.room_version.unwrap_or(RoomVersionId::V1);
let room_version_rules = room_version
.rules()
.expect("room version should have defined rules");
if !services.server.supported_room_version(&room_version) {
// How did we get here?
return Err!(BadServerResponse("Remote room version {room_version} is not supported"));
}
let mut join_event_stub: CanonicalJsonObject =
serde_json::from_str(make_join_response.event.get()).map_err(|e| {
err!(BadServerResponse(warn!(
"Invalid make_join event json received from server: {e:?}"
)))
})?;
let join_authorized_via_users_server = {
use RoomVersionId::*;
if !matches!(room_version, V1 | V2 | V3 | V4 | V5 | V6 | V7) {
join_event_stub
.get("content")
.map(|s| {
s.as_object()?
.get("join_authorised_via_users_server")?
.as_str()
})
.and_then(|s| OwnedUserId::try_from(s.unwrap_or_default()).ok())
} else {
None
}
};
join_event_stub.insert(
"origin_server_ts".to_owned(),
CanonicalJsonValue::Integer(
utils::millis_since_unix_epoch()
.try_into()
.expect("Timestamp is valid js_int value"),
),
);
let mut join_content = RoomMemberEventContent::new(MembershipState::Join);
join_content.displayname = services.users.displayname(sender_user).await.ok();
join_content.avatar_url = services.users.avatar_url(sender_user).await.ok();
join_content.blurhash = services.users.blurhash(sender_user).await.ok();
join_content.reason = reason;
join_content
.join_authorized_via_users_server
.clone_from(&join_authorized_via_users_server);
join_event_stub.insert(
"content".to_owned(),
to_canonical_value(join_content).expect("event is valid, we just created it"),
);
// Remove event id if it exists
join_event_stub.remove("event_id");
// In order to create a compatible ref hash (EventID) the `hashes` field needs
// to be present
services
.server_keys
.hash_and_sign_event(&mut join_event_stub, &room_version_rules)?;
// Generate event id
let event_id = gen_event_id(&join_event_stub, &room_version_rules)?;
// Add event_id back
join_event_stub
.insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.clone().into()));
// It has enough fields to be called a proper event now
let mut join_event = join_event_stub;
info!("Asking {remote_server} for send_join in room {room_id}");
let send_join_request = federation::membership::create_join_event::v2::Request::new(
room_id.to_owned(),
event_id.clone(),
services
.sending
.convert_to_outgoing_federation_event(join_event.clone())
.await,
);
let send_join_response = match services
.sending
.send_synapse_request(&remote_server, send_join_request)
.await
{
| Ok(response) => response,
| Err(e) => {
error!("send_join failed: {e}");
return Err(e);
},
};
info!("send_join finished");
if join_authorized_via_users_server.is_some() {
if let Some(signed_raw) = &send_join_response.room_state.event {
debug_info!(
"There is a signed event with join_authorized_via_users_server. This room is \
probably using restricted joins. Adding signature to our event"
);
let (signed_event_id, signed_value) =
gen_event_id_canonical_json(signed_raw, &room_version_rules).map_err(|e| {
err!(Request(BadJson(warn!(
"Could not convert event to canonical JSON: {e}"
))))
})?;
if signed_event_id != event_id {
return Err!(Request(BadJson(warn!(
%signed_event_id, %event_id,
"Server {remote_server} sent event with wrong event ID"
))));
}
match signed_value["signatures"]
.as_object()
.ok_or_else(|| {
err!(BadServerResponse(warn!(
"Server {remote_server} sent invalid signatures type"
)))
})
.and_then(|e| {
e.get(remote_server.as_str()).ok_or_else(|| {
err!(BadServerResponse(warn!(
"Server {remote_server} did not send its signature for a restricted \
room"
)))
})
}) {
| Ok(signature) => {
join_event
.get_mut("signatures")
.expect("we created a valid pdu")
.as_object_mut()
.expect("we created a valid pdu")
.insert(remote_server.to_string(), signature.clone());
},
| Err(e) => {
warn!(
"Server {remote_server} sent invalid signature in send_join signatures \
for event {signed_value:?}: {e:?}",
);
},
}
}
}
services
.rooms
.short
.get_or_create_shortroomid(room_id)
.await;
info!("Parsing join event");
let parsed_join_pdu = PduEvent::from_id_val(&event_id, join_event.clone())
.map_err(|e| err!(BadServerResponse("Invalid join event PDU: {e:?}")))?;
info!("Acquiring server signing keys for response events");
let resp_events = &send_join_response.room_state;
let resp_state = &resp_events.state;
let resp_auth = &resp_events.auth_chain;
services
.server_keys
.acquire_events_pubkeys(resp_auth.iter().chain(resp_state.iter()))
.await;
info!("Going through send_join response room_state");
let cork = services.db.cork_and_flush();
let state = send_join_response
.room_state
.state
.iter()
.stream()
.then(|pdu| {
services
.server_keys
.validate_and_add_event_id_no_fetch(pdu, &room_version_rules)
.inspect_err(|e| {
debug_warn!("Could not validate send_join response room_state event: {e:?}");
})
.inspect(|_| debug!("Completed validating send_join response room_state event"))
})
.ready_filter_map(Result::ok)
.fold(HashMap::new(), |mut state, (event_id, value)| async move {
let pdu = match PduEvent::from_id_val(&event_id, value.clone()) {
| Ok(pdu) => pdu,
| Err(e) => {
debug_warn!("Invalid PDU in send_join response: {e:?}: {value:#?}");
return state;
},
};
if !pdu_fits(&mut value.clone()) {
warn!(
"dropping incoming PDU {event_id} in room {room_id} from room join because \
it exceeds 65535 bytes or is otherwise too large."
);
return state;
}
services.rooms.outlier.add_pdu_outlier(&event_id, &value);
if let Some(state_key) = &pdu.state_key {
let shortstatekey = services
.rooms
.short
.get_or_create_shortstatekey(&pdu.kind.to_string().into(), state_key)
.await;
state.insert(shortstatekey, pdu.event_id.clone());
}
state
})
.await;
drop(cork);
info!("Going through send_join response auth_chain");
let cork = services.db.cork_and_flush();
send_join_response
.room_state
.auth_chain
.iter()
.stream()
.then(|pdu| {
services
.server_keys
.validate_and_add_event_id_no_fetch(pdu, &room_version_rules)
})
.ready_filter_map(Result::ok)
.ready_for_each(|(event_id, value)| {
trace!(%event_id, "Adding PDU as an outlier from send_join auth_chain");
services.rooms.outlier.add_pdu_outlier(&event_id, &value);
})
.await;
drop(cork);
debug!("Running send_join auth check");
let fetch_state = &state;
let state_fetch = |k: StateEventType, s: StateKey| async move {
let shortstatekey = services.rooms.short.get_shortstatekey(&k, &s).await.ok()?;
let event_id = fetch_state.get(&shortstatekey)?;
services.rooms.timeline.get_pdu(event_id).await.ok()
};
let auth_check = state_res::event_auth::auth_check(
&room_version.rules().unwrap(),
&parsed_join_pdu,
None, // TODO: third party invite
|k, s| state_fetch(k.clone(), s.into()),
&state_fetch(StateEventType::RoomCreate, "".into())
.await
.expect("create event is missing from send_join auth"),
)
.await
.map_err(|e| err!(Request(Forbidden(warn!("Auth check failed: {e:?}")))))?;
if !auth_check {
return Err!(Request(Forbidden("Auth check failed")));
}
info!("Compressing state from send_join");
let compressed: CompressedState = services
.rooms
.state_compressor
.compress_state_events(state.iter().map(|(ssk, eid)| (ssk, eid.borrow())))
.collect()
.await;
debug!("Saving compressed state");
let HashSetCompressStateEvent {
shortstatehash: statehash_before_join,
added,
removed,
} = services
.rooms
.state_compressor
.save_state(room_id, Arc::new(compressed))
.await?;
debug!("Forcing state for new room");
services
.rooms
.state
.force_state(room_id, statehash_before_join, added, removed, &state_lock)
.await?;
debug!("Updating joined counts for new room");
services
.rooms
.state_cache
.update_joined_count(room_id)
.await;
// We append to state before appending the pdu, so we don't have a moment in
// time with the pdu without it's state. This is okay because append_pdu can't
// fail.
let statehash_after_join = services
.rooms
.state
.append_to_state(&parsed_join_pdu, room_id)
.await?;
info!("Appending new room join event");
services
.rooms
.timeline
.append_pdu(
&parsed_join_pdu,
join_event,
once(parsed_join_pdu.event_id.borrow()),
&state_lock,
room_id,
)
.await?;
info!("Setting final room state for new room");
// We set the room state after inserting the pdu, so that we never have a moment
// in time where events in the current room state do not exist
services
.rooms
.state
.set_room_state(room_id, statehash_after_join, &state_lock);
Ok(())
}
#[tracing::instrument(skip_all, fields(%sender_user, %room_id), name = "join_local", level = "info")]
async fn join_room_by_id_helper_local(
services: &Services,
sender_user: &UserId,
room_id: &RoomId,
reason: Option<String>,
servers: &[OwnedServerName],
state_lock: RoomMutexGuard,
) -> Result {
info!("Joining room locally");
let (room_version, join_rules, is_invited) = join!(
services.rooms.state.get_room_version(room_id),
services.rooms.state_accessor.get_join_rules(room_id),
services.rooms.state_cache.is_invited(sender_user, room_id)
);
let room_version = room_version?;
let mut auth_user: Option<OwnedUserId> = None;
if !is_invited && matches!(join_rules, JoinRule::Restricted(_) | JoinRule::KnockRestricted(_))
{
use RoomVersionId::*;
if !matches!(room_version, V1 | V2 | V3 | V4 | V5 | V6 | V7) {
// This is a restricted room, check if we can complete the join requirements
// locally.
let needs_auth_user =
user_can_perform_restricted_join(services, sender_user, room_id).await;
if needs_auth_user.is_ok_and(is_true!()) {
// If there was an error or the value is false, we'll try joining over
// federation. Since it's Ok(true), we can authorise this locally.
// If we can't select a local user, this will remain None, the join will fail,
// and we'll fall back to federation.
auth_user = select_authorising_user(services, room_id, sender_user, &state_lock)
.await
.ok();
}
}
}
let mut content = RoomMemberEventContent::new(MembershipState::Join);
content.displayname = services.users.displayname(sender_user).await.ok();
content.avatar_url = services.users.avatar_url(sender_user).await.ok();
content.blurhash = services.users.blurhash(sender_user).await.ok();
content.reason.clone_from(&reason);
content.join_authorized_via_users_server = auth_user;
// Try normal join first
let Err(error) = services
.rooms
.timeline
.build_and_append_pdu(
PartialPdu::state(sender_user.to_string(), &content),
sender_user,
Some(room_id),
&state_lock,
)
.await
else {
info!("Joined room locally");
return Ok(());
};
if servers.is_empty() || servers.len() == 1 && services.globals.server_is_ours(&servers[0]) {
if !services.rooms.metadata.exists(room_id).await {
return Err!(Request(
Unknown(
"Room was not found locally and no servers were found to help us discover it"
),
NOT_FOUND
));
}
return Err(error);
}
info!(
?error,
remote_servers = %servers.len(),
"Could not join room locally, attempting remote join",
);
join_room_by_id_helper_remote(services, sender_user, room_id, reason, servers, state_lock)
.await
}
async fn make_join_request(
services: &Services,
sender_user: &UserId,
room_id: &RoomId,
servers: &[OwnedServerName],
) -> Result<(federation::membership::prepare_join_event::v1::Response, OwnedServerName)> {
let mut make_join_counter: usize = 1;
for remote_server in servers {
if services.globals.server_is_ours(remote_server) {
continue;
}
info!(
"Asking {remote_server} for make_join (attempt {make_join_counter}/{})",
servers.len()
);
let mut request = federation::membership::prepare_join_event::v1::Request::new(
room_id.to_owned(),
sender_user.to_owned(),
);
request.ver = services.server.supported_room_versions().collect();
let make_join_response = services
.sending
.send_federation_request(remote_server, request)
.await;
trace!("make_join response: {:?}", make_join_response);
make_join_counter = make_join_counter.saturating_add(1);
match make_join_response {
| Ok(response) => {
info!("Received make_join response from {remote_server}");
if let Err(e) = validate_remote_member_event_stub(
&MembershipState::Join,
sender_user,
room_id,
&to_canonical_object(&response.event)?,
) {
warn!("make_join response from {remote_server} failed validation: {e}");
continue;
}
return Ok((response, remote_server.clone()));
},
| Err(e) => match e.kind() {
| ErrorKind::UnableToAuthorizeJoin => {
info!(
"{remote_server} was unable to verify the joining user satisfied \
restricted join requirements: {e}. Will continue trying."
);
},
| ErrorKind::UnableToGrantJoin => {
info!(
"{remote_server} believes the joining user satisfies restricted join \
rules, but is unable to authorise a join for us. Will continue trying."
);
},
| ErrorKind::IncompatibleRoomVersion(IncompatibleRoomVersionErrorData {
room_version,
..
}) => {
warn!(
"{remote_server} reports the room we are trying to join is \
v{room_version}, which we do not support."
);
return Err(e);
},
| ErrorKind::Forbidden => {
warn!("{remote_server} refuses to let us join: {e}.");
return Err(e);
},
| ErrorKind::NotFound => {
info!(
"{remote_server} does not know about {room_id}: {e}. Will continue \
trying."
);
},
| _ => {
info!("{remote_server} failed to make_join: {e}. Will continue trying.");
},
},
}
}
info!("All {} servers were unable to assist in joining {room_id} :(", servers.len());
Err!(BadServerResponse("No server available to assist in joining."))
Ok(join_room_by_id_or_alias::v3::Response::new(room_id))
}
/// Moves deprioritized servers (if any) to the back of the list.
+7 -12
View File
@@ -33,12 +33,13 @@
use service::{
Services,
rooms::{
membership::validate_remote_member_event_stub,
state::RoomMutexGuard,
state_compressor::{CompressedState, HashSetCompressStateEvent},
},
};
use super::{banned_room_check, join::join_room_by_id_helper, validate_remote_member_event_stub};
use super::banned_room_check;
use crate::Ruma;
/// # `POST /_matrix/client/*/knock/{roomIdOrAlias}`
@@ -238,15 +239,11 @@ async fn knock_room_by_id_helper(
// join_room_by_id_helper We need to release the lock here and let
// join_room_by_id_helper acquire it again
drop(state_lock);
match join_room_by_id_helper(
services,
sender_user,
room_id,
reason.clone(),
servers,
&None,
)
.await
match services
.rooms
.membership
.join_room(sender_user, room_id, reason.clone(), servers)
.await
{
| Ok(_) => return Ok(knock_room::v3::Response::new(room_id.to_owned())),
| Err(e) => {
@@ -346,7 +343,6 @@ async fn knock_room_helper_local(
let mut content = RoomMemberEventContent::new(MembershipState::Knock);
content.displayname = services.users.displayname(sender_user).await.ok();
content.avatar_url = services.users.avatar_url(sender_user).await.ok();
content.blurhash = services.users.blurhash(sender_user).await.ok();
content.reason.clone_from(&reason.clone());
// Try normal knock first
@@ -530,7 +526,6 @@ async fn knock_room_helper_remote(
let mut knock_content = RoomMemberEventContent::new(MembershipState::Knock);
knock_content.displayname = services.users.displayname(sender_user).await.ok();
knock_content.avatar_url = services.users.avatar_url(sender_user).await.ok();
knock_content.blurhash = services.users.blurhash(sender_user).await.ok();
knock_content.reason = reason;
knock_event_stub.insert(
+1 -2
View File
@@ -19,9 +19,8 @@
room::member::{MembershipState, RoomMemberEventContent},
},
};
use service::Services;
use service::{Services, rooms::membership::validate_remote_member_event_stub};
use super::validate_remote_member_event_stub;
use crate::Ruma;
/// # `POST /_matrix/client/v3/rooms/{roomId}/leave`
+2 -89
View File
@@ -13,16 +13,10 @@
use axum::extract::State;
use conduwuit::{Err, Result, warn};
use futures::{FutureExt, StreamExt};
use ruma::{
CanonicalJsonObject, OwnedRoomId, RoomId, ServerName, UserId,
api::client::membership::joined_rooms,
events::{
StaticEventContent,
room::member::{MembershipState, RoomMemberEventContent},
},
};
use ruma::{OwnedRoomId, RoomId, ServerName, UserId, api::client::membership::joined_rooms};
use service::Services;
pub use self::leave::{leave_all_rooms, leave_room, remote_leave_room};
pub(crate) use self::{
ban::ban_user_route,
forget::forget_room_route,
@@ -34,10 +28,6 @@
members::{get_member_events_route, joined_members_route},
unban::unban_user_route,
};
pub use self::{
join::join_room_by_id_helper,
leave::{leave_all_rooms, leave_room, remote_leave_room},
};
use crate::{Ruma, client::full_user_deactivate};
/// # `POST /_matrix/client/r0/joined_rooms`
@@ -159,80 +149,3 @@ pub(crate) async fn banned_room_check(
Ok(())
}
/// Validates that an event returned from a remote server by `/make_*`
/// actually is a membership event with the expected fields.
///
/// Without checking this, the remote server could use the remote membership
/// mechanism to trick our server into signing arbitrary malicious events.
pub(crate) fn validate_remote_member_event_stub(
membership: &MembershipState,
user_id: &UserId,
room_id: &RoomId,
event_stub: &CanonicalJsonObject,
) -> Result<()> {
let Some(event_type) = event_stub.get("type") else {
return Err!(BadServerResponse(
"Remote server returned member event with missing type field"
));
};
if event_type != &RoomMemberEventContent::TYPE {
return Err!(BadServerResponse(
"Remote server returned member event with invalid event type"
));
}
let Some(sender) = event_stub.get("sender") else {
return Err!(BadServerResponse(
"Remote server returned member event with missing sender field"
));
};
if sender != &user_id.as_str() {
return Err!(BadServerResponse(
"Remote server returned member event with incorrect sender"
));
}
let Some(state_key) = event_stub.get("state_key") else {
return Err!(BadServerResponse(
"Remote server returned member event with missing state_key field"
));
};
if state_key != &user_id.as_str() {
return Err!(BadServerResponse(
"Remote server returned member event with incorrect state_key"
));
}
let Some(event_room_id) = event_stub.get("room_id") else {
return Err!(BadServerResponse(
"Remote server returned member event with missing room_id field"
));
};
if event_room_id != &room_id.as_str() {
return Err!(BadServerResponse(
"Remote server returned member event with incorrect room_id"
));
}
let Some(content) = event_stub
.get("content")
.and_then(|content| content.as_object())
else {
return Err!(BadServerResponse(
"Remote server returned member event with missing content field"
));
};
let Some(event_membership) = content.get("membership") else {
return Err!(BadServerResponse(
"Remote server returned member event with missing membership field"
));
};
if event_membership != &membership.as_str() {
return Err!(BadServerResponse(
"Remote server returned member event with incorrect membership type"
));
}
Ok(())
}
+4 -1
View File
@@ -16,6 +16,7 @@
pub(super) mod membership;
pub(super) mod message;
pub(super) mod mutual_rooms;
pub(super) mod oauth;
pub(super) mod openid;
pub(super) mod presence;
pub(super) mod profile;
@@ -58,9 +59,10 @@
pub(super) use media::*;
pub(super) use media_legacy::*;
pub(super) use membership::*;
pub use membership::{join_room_by_id_helper, leave_all_rooms, leave_room, remote_leave_room};
pub use membership::{leave_all_rooms, leave_room, remote_leave_room};
pub(super) use message::*;
pub(super) use mutual_rooms::*;
pub(super) use oauth::*;
pub(super) use openid::*;
pub(super) use presence::*;
pub(super) use profile::*;
@@ -73,6 +75,7 @@
pub(super) use room::*;
pub(super) use search::*;
pub(super) use send::*;
pub use session::handle_login;
pub(super) use session::*;
pub(super) use space::*;
pub(super) use state::*;
-4
View File
@@ -21,10 +21,6 @@ pub(crate) async fn get_mutual_rooms_route(
return Err!(Request(Unknown("You cannot request rooms in common with yourself.")));
}
if !services.users.exists(&body.user_id).await {
return Ok(mutual_rooms::unstable::Response::new(vec![]));
}
let mutual_rooms = services
.rooms
.state_cache
+56
View File
@@ -0,0 +1,56 @@
use axum::{
Json, Router,
extract::{Request, State},
middleware::{self, Next},
response::{IntoResponse, Response},
routing::method_routing::{get, post},
};
use const_str::concat;
use http::StatusCode;
use serde_json::json;
pub(crate) use server_metadata::*;
mod register_client;
mod server_metadata;
mod token;
const BASE_PATH: &str = concat!(conduwuit_core::ROUTE_PREFIX, "/oauth2/");
const AUTH_CODE_PATH: &str = "grant/authorization_code";
const JWKS_URI_PATH: &str = "client/keys.json";
const CLIENT_REGISTER_PATH: &str = "client/register";
const TOKEN_REVOKE_PATH: &str = "client/revoke";
const TOKEN_PATH: &str = "grant/token";
const ACCOUNT_MANAGEMENT_PATH: &str = concat!(conduwuit_core::ROUTE_PREFIX, "/account/deeplink");
pub(crate) fn router(state: crate::State) -> Router<crate::State> {
Router::new()
.nest(BASE_PATH, oauth_router())
.route(
"/.well-known/openid-configuration",
get(
// TODO(unspecced): used by old versions of the matrix-js-sdk
async |State(services): State<crate::State>| {
Json(authorization_server_metadata(&services).await)
},
),
)
.layer(middleware::from_fn_with_state(
state,
async |State(state): State<crate::State>, request: Request, next: Next| -> Response {
if state.config.oauth.compatibility_mode.oauth_available() {
next.run(request).await
} else {
(StatusCode::NOT_FOUND, "OAuth is unavailable on this server").into_response()
}
},
))
}
fn oauth_router() -> Router<crate::State> {
Router::new()
.route(concat!("/", CLIENT_REGISTER_PATH), post(register_client::register_client_route))
// TODO(unspecced): used by old versions of the matrix-js-sdk
.route(concat!("/", JWKS_URI_PATH), get(async || Json(json!({"keys": []}))))
.route(concat!("/", TOKEN_PATH), post(token::token_route))
.route(concat!("/", TOKEN_REVOKE_PATH), post(token::revoke_token_route))
}
+28
View File
@@ -0,0 +1,28 @@
use axum::{
Json,
extract::State,
response::{IntoResponse, Response},
};
use http::StatusCode;
use serde::Serialize;
use service::oauth::client_metadata::ClientMetadata;
#[derive(Serialize)]
struct RegisteredClient {
client_id: String,
#[serde(flatten)]
metadata: ClientMetadata,
}
pub(crate) async fn register_client_route(
State(services): State<crate::State>,
Json(metadata): Json<ClientMetadata>,
) -> Result<Response, Response> {
let client_id = services
.oauth
.register_client(&metadata)
.await
.map_err(|err| (StatusCode::BAD_REQUEST, err.to_owned()).into_response())?;
Ok(Json(RegisteredClient { client_id, metadata }).into_response())
}
+62
View File
@@ -0,0 +1,62 @@
use axum::extract::State;
use conduwuit::{Err, Result};
use ruma::{
api::client::discovery::get_authorization_server_metadata::{
self, v1::AccountManagementAction,
},
serde::Raw,
};
use serde_json::{Value, json};
use service::Services;
use crate::{
Ruma,
client::oauth::{
ACCOUNT_MANAGEMENT_PATH, AUTH_CODE_PATH, CLIENT_REGISTER_PATH, JWKS_URI_PATH, TOKEN_PATH,
TOKEN_REVOKE_PATH,
},
};
pub(crate) async fn get_authorization_server_metadata_route(
State(services): State<crate::State>,
_body: Ruma<get_authorization_server_metadata::v1::Request>,
) -> Result<get_authorization_server_metadata::v1::Response> {
if !services.config.oauth.compatibility_mode.oauth_available() {
return Err!(Request(Unrecognized("OAuth is unavailable on this server")));
}
let metadata = Raw::new(&authorization_server_metadata(&services).await).unwrap();
Ok(get_authorization_server_metadata::v1::Response::new(metadata.cast_unchecked()))
}
pub(crate) async fn authorization_server_metadata(services: &Services) -> Value {
let endpoint_base = services
.config
.get_client_domain()
.join(super::BASE_PATH)
.unwrap();
json!({
"account_management_uri": endpoint_base.join(ACCOUNT_MANAGEMENT_PATH).unwrap(),
"account_management_actions_supported": [
AccountManagementAction::AccountDeactivate,
AccountManagementAction::CrossSigningReset,
AccountManagementAction::DeviceDelete,
AccountManagementAction::DeviceView,
AccountManagementAction::DevicesList,
AccountManagementAction::Profile,
],
"authorization_endpoint": endpoint_base.join(AUTH_CODE_PATH).unwrap(),
"code_challenge_methods_supported": ["S256"],
"grant_types_supported": ["authorization_code", "refresh_token"],
"issuer": services.config.get_client_domain(),
"jwks_uri": endpoint_base.join(JWKS_URI_PATH).unwrap(),
"prompt_values_supported": ["create"],
"registration_endpoint": endpoint_base.join(CLIENT_REGISTER_PATH).unwrap(),
"response_modes_supported": ["query", "fragment"],
"response_types_supported": ["code"],
"revocation_endpoint": endpoint_base.join(TOKEN_REVOKE_PATH).unwrap(),
"token_endpoint": endpoint_base.join(TOKEN_PATH).unwrap(),
})
}
+23
View File
@@ -0,0 +1,23 @@
use axum::{Form, Json, extract::State, response::IntoResponse};
use http::StatusCode;
use service::oauth::grant::{RevokeTokenRequest, TokenRequest};
pub(crate) async fn token_route(
State(services): State<crate::State>,
Form(request): Form<TokenRequest>,
) -> impl IntoResponse {
match services.oauth.issue_token(request).await {
| Ok(response) => Ok(Json(response)),
| Err(err) => Err((StatusCode::BAD_REQUEST, err.message())),
}
}
pub(crate) async fn revoke_token_route(
State(services): State<crate::State>,
Form(request): Form<RevokeTokenRequest>,
) -> impl IntoResponse {
match services.oauth.revoke_token(request.token).await {
| Ok(()) => Ok(StatusCode::OK),
| Err(err) => Err((StatusCode::BAD_REQUEST, err.message())),
}
}
+4 -15
View File
@@ -23,8 +23,7 @@
/// # `GET /_matrix/client/v3/profile/{userId}`
///
/// Returns the displayname, avatar_url, blurhash, and custom profile fields of
/// the user.
/// Returns the user's profile information.
///
/// - If user is on another server and we do not have a local copy already,
/// fetch profile over federation.
@@ -322,19 +321,9 @@ async fn set_profile_field(
services.users.set_avatar_url(user_id, None);
},
| other =>
if other.field_name().as_str() == "blurhash" {
if let Some(Value::String(blurhash)) = other.value() {
services.users.set_blurhash(user_id, Some(blurhash));
} else {
services.users.set_blurhash(user_id, None);
}
} else {
services.users.set_profile_key(
user_id,
other.field_name().as_str(),
other.value(),
);
},
services
.users
.set_profile_key(user_id, other.field_name().as_str(), other.value()),
}
// If the user is local and changed their displayname or avatar_url, update it
+1 -5
View File
@@ -288,7 +288,6 @@ pub(crate) async fn create_room_route(
let mut join_event = RoomMemberEventContent::new(MembershipState::Join);
join_event.displayname = services.users.displayname(sender_user).await.ok();
join_event.avatar_url = services.users.avatar_url(sender_user).await.ok();
join_event.blurhash = services.users.blurhash(sender_user).await.ok();
join_event.is_direct = Some(body.is_direct);
debug_info!("Joining {sender_user} to room {room_id}");
@@ -537,10 +536,7 @@ pub(crate) async fn create_room_route(
if services.server.config.admin_room_notices {
services
.admin
.send_text(&format!(
"{sender_user} made {} public to the room directory",
&room_id
))
.send_text(&format!("{sender_user} made {room_id} public to the room directory"))
.await;
}
info!("{sender_user} made {0} public to the room directory", &room_id);
-1
View File
@@ -271,7 +271,6 @@ pub(crate) async fn upgrade_room_route(
&assign!(RoomMemberEventContent::new(MembershipState::Join), {
displayname: services.users.displayname(sender_user).await.ok(),
avatar_url: services.users.avatar_url(sender_user).await.ok(),
blurhash: services.users.blurhash(sender_user).await.ok(),
}),
),
sender_user,
+19 -135
View File
@@ -4,10 +4,9 @@
use axum_client_ip::ClientIp;
use conduwuit::{
Err, Result, debug, err, info,
utils::{self, ReadyExt, hash, stream::BroadbandExt},
utils::{self, ReadyExt, stream::BroadbandExt},
warn,
};
use conduwuit_core::{debug_error, debug_warn};
use conduwuit_service::Services;
use futures::StreamExt;
use lettre::Address;
@@ -30,7 +29,6 @@
},
assign,
};
use service::uiaa::Identity;
use super::{DEVICE_ID_LENGTH, TOKEN_LENGTH};
use crate::Ruma;
@@ -45,6 +43,12 @@ pub(crate) async fn get_login_types_route(
ClientIp(client): ClientIp,
_body: Ruma<get_login_types::v3::Request>,
) -> Result<get_login_types::v3::Response> {
if !services.config.oauth.compatibility_mode.uiaa_available() {
return Err!(Request(Unrecognized(
"User-interactive authentication is not available on this server."
)));
}
Ok(get_login_types::v3::Response::new(vec![
get_login_types::v3::LoginType::Password(PasswordLoginType::default()),
get_login_types::v3::LoginType::ApplicationService(ApplicationServiceLoginType::default()),
@@ -54,114 +58,7 @@ pub(crate) async fn get_login_types_route(
]))
}
/// Authenticates the given user by its ID and its password.
///
/// Returns the user ID if successful, and an error otherwise.
#[tracing::instrument(skip_all, fields(%user_id), name = "password", level = "debug")]
pub(crate) async fn password_login(
services: &Services,
user_id: &UserId,
lowercased_user_id: &UserId,
password: &str,
) -> Result<OwnedUserId> {
// Restrict login to accounts only of type 'password', including untyped
// legacy accounts which are equivalent to 'password'.
if services
.users
.origin(user_id)
.await
.is_ok_and(|origin| origin != "password")
{
return Err!(Request(Forbidden("Account does not permit password login.")));
}
let (hash, user_id) = match services.users.password_hash(user_id).await {
| Ok(hash) => (hash, user_id),
| Err(_) => services
.users
.password_hash(lowercased_user_id)
.await
.map(|hash| (hash, lowercased_user_id))
.map_err(|_| err!(Request(Forbidden("Invalid identifier or password."))))?,
};
if hash.is_empty() {
return Err!(Request(UserDeactivated("The user has been deactivated")));
}
hash::verify_password(password, &hash)
.inspect_err(|e| debug_error!("{e}"))
.map_err(|_| err!(Request(Forbidden("Invalid identifier or password."))))?;
Ok(user_id.to_owned())
}
/// Authenticates the given user through the configured LDAP server.
///
/// Creates the user if the user is found in the LDAP and do not already have an
/// account.
#[tracing::instrument(skip_all, fields(%user_id), name = "ldap", level = "debug")]
pub(super) async fn ldap_login(
services: &Services,
user_id: &UserId,
lowercased_user_id: &UserId,
password: &str,
) -> Result<OwnedUserId> {
let (user_dn, is_ldap_admin) = match services.config.ldap.bind_dn.as_ref() {
| Some(bind_dn) if bind_dn.contains("{username}") =>
(bind_dn.replace("{username}", lowercased_user_id.localpart()), None),
| _ => {
debug!("Searching user in LDAP");
let dns = services.users.search_ldap(user_id).await?;
if dns.len() >= 2 {
return Err!(Ldap("LDAP search returned two or more results"));
}
let Some((user_dn, is_admin)) = dns.first() else {
return password_login(services, user_id, lowercased_user_id, password).await;
};
(user_dn.clone(), *is_admin)
},
};
let user_id = services
.users
.auth_ldap(&user_dn, password)
.await
.map(|()| lowercased_user_id.to_owned())?;
// LDAP users are automatically created on first login attempt. This is a very
// common feature that can be seen on many services using a LDAP provider for
// their users (synapse, Nextcloud, Jellyfin, ...).
//
// LDAP users are crated with a dummy password but non empty because an empty
// password is reserved for deactivated accounts. The conduwuit password field
// will never be read to login a LDAP user so it's not an issue.
if !services.users.exists(lowercased_user_id).await {
services
.users
.create(lowercased_user_id, Some("*"), Some("ldap"))
.await?;
}
// Only sync admin status if LDAP can actually determine it.
// None means LDAP cannot determine admin status (manual config required).
if let Some(is_ldap_admin) = is_ldap_admin {
let is_conduwuit_admin = services.admin.user_is_admin(lowercased_user_id).await;
if is_ldap_admin && !is_conduwuit_admin {
Box::pin(services.admin.make_user_admin(lowercased_user_id)).await?;
} else if !is_ldap_admin && is_conduwuit_admin {
Box::pin(services.admin.revoke_admin(lowercased_user_id)).await?;
}
}
Ok(user_id)
}
pub(crate) async fn handle_login(
pub async fn handle_login(
services: &Services,
identifier: Option<&UserIdentifier>,
password: &str,
@@ -191,15 +88,7 @@ pub(crate) async fn handle_login(
UserId::parse_with_server_name(user_id_or_localpart, &services.config.server_name)
.map_err(|_| err!(Request(InvalidUsername("User ID is malformed"))))?;
let lowercased_user_id = UserId::parse_with_server_name(
user_id.localpart().to_lowercase(),
&services.config.server_name,
)
.unwrap();
if !services.globals.user_is_local(&user_id)
|| !services.globals.user_is_local(&lowercased_user_id)
{
if !services.globals.user_is_local(&user_id) {
return Err!(Request(InvalidParam("User ID does not belong to this homeserver")));
}
@@ -212,18 +101,7 @@ pub(crate) async fn handle_login(
return Err!(Request(Forbidden("This account is not permitted to log in.")));
}
if cfg!(feature = "ldap") && services.config.ldap.enable {
match Box::pin(ldap_login(services, &user_id, &lowercased_user_id, password)).await {
| Ok(user_id) => Ok(user_id),
| Err(err) if services.config.ldap.ldap_only => Err(err),
| Err(err) => {
debug_warn!("{err}");
password_login(services, &user_id, &lowercased_user_id, password).await
},
}
} else {
password_login(services, &user_id, &lowercased_user_id, password).await
}
services.users.check_password(&user_id, password).await
}
/// # `POST /_matrix/client/v3/login`
@@ -246,10 +124,15 @@ pub(crate) async fn login_route(
ClientIp(client): ClientIp,
body: Ruma<login::v3::Request>,
) -> Result<login::v3::Response> {
if !services.config.oauth.compatibility_mode.uiaa_available() {
return Err!(Request(Unrecognized(
"User-interactive authentication is not available on this server."
)));
}
let emergency_mode_enabled = services.config.emergency_password.is_some();
// Validate login method
// TODO: Other login methods
let user_id = match &body.login_info {
#[allow(deprecated)]
| login::v3::LoginInfo::Password(login::v3::Password {
@@ -330,7 +213,7 @@ pub(crate) async fn login_route(
if device_exists {
services
.users
.set_token(&user_id, &device_id, &token)
.set_token(&user_id, &device_id, &token, None)
.await?;
} else {
services
@@ -339,6 +222,7 @@ pub(crate) async fn login_route(
&user_id,
&device_id,
&token,
None,
body.initial_device_display_name.clone(),
Some(client.to_string()),
)
@@ -386,7 +270,7 @@ pub(crate) async fn login_token_route(
// Prompt the user to confirm with their password using UIAA
let _ = services
.uiaa
.authenticate_password(&body.auth, Some(Identity::from_user_id(sender_user)))
.authenticate_password(&body.auth, sender_user, body.sender_device(), None)
.await?;
let login_token = utils::random_string(TOKEN_LENGTH);
+7
View File
@@ -69,6 +69,12 @@ pub(super) async fn load_joined_room(
and `join*` functions are used to perform steps in parallel which do not depend on each other.
*/
let insert_lock = services
.rooms
.timeline
.mutex_insert
.lock(room_id.as_str())
.await;
let (
account_data,
ephemeral,
@@ -86,6 +92,7 @@ pub(super) async fn load_joined_room(
)
.boxed()
.await?;
drop(insert_lock);
if !timeline.is_empty() || !state_events.is_empty() {
trace!(
+8 -3
View File
@@ -395,6 +395,10 @@ pub(crate) async fn build_sync_events(
.users
.count_one_time_keys(syncing_user, syncing_device);
let unused_fallback_key_types = services
.users
.list_unused_fallback_key_types(syncing_user, syncing_device);
let (
(joined_rooms, mut device_list_updates),
left_rooms,
@@ -405,6 +409,7 @@ pub(crate) async fn build_sync_events(
to_device_events,
keys_changed,
device_one_time_keys_count,
unused_fallback_key_types,
) = async {
futures::join!(
joined_rooms,
@@ -415,7 +420,8 @@ pub(crate) async fn build_sync_events(
account_data,
to_device_events,
keys_changed,
device_one_time_keys_count
device_one_time_keys_count,
unused_fallback_key_types,
)
}
.boxed()
@@ -433,8 +439,7 @@ pub(crate) async fn build_sync_events(
account_data: assign!(GlobalAccountData::new(), { events: account_data }),
device_lists: device_list_updates.into(),
device_one_time_keys_count,
// Fallback keys are not yet supported
device_unused_fallback_key_types: None,
device_unused_fallback_key_types: Some(unused_fallback_key_types),
presence: assign!(Presence::new(), {
events: presence_updates
.into_iter()
-1
View File
@@ -69,7 +69,6 @@ pub(crate) async fn sync_events_v5_route(
ClientIp(client_ip): ClientIp,
body: Ruma<sync_events::v5::Request>,
) -> Result<sync_events::v5::Response> {
debug_assert!(DEFAULT_BUMP_TYPES.is_sorted(), "DEFAULT_BUMP_TYPES is not sorted");
let ref sender_user = body.sender_user().to_owned();
let ref sender_device = body.sender_device().to_owned();
+11 -47
View File
@@ -1,7 +1,8 @@
use std::collections::BTreeMap;
use axum::{Json, extract::State, response::IntoResponse};
use conduwuit::Result;
use conduwuit::{
Result,
matrix::versions::{unstable_features, versions},
};
use futures::StreamExt;
use ruma::{api::client::discovery::get_supported_versions, assign};
@@ -22,47 +23,10 @@
pub(crate) async fn get_supported_versions_route(
_body: Ruma<get_supported_versions::Request>,
) -> Result<get_supported_versions::Response> {
let versions = vec![
"r0.0.1".to_owned(),
"r0.1.0".to_owned(),
"r0.2.0".to_owned(),
"r0.3.0".to_owned(),
"r0.4.0".to_owned(),
"r0.5.0".to_owned(),
"r0.6.0".to_owned(),
"r0.6.1".to_owned(),
"v1.1".to_owned(),
"v1.2".to_owned(),
"v1.3".to_owned(),
"v1.4".to_owned(),
"v1.5".to_owned(),
"v1.8".to_owned(),
"v1.11".to_owned(),
"v1.12".to_owned(),
"v1.13".to_owned(),
"v1.14".to_owned(),
];
let unstable_features = BTreeMap::from_iter([
("org.matrix.e2e_cross_signing".to_owned(), true),
("org.matrix.msc2285.stable".to_owned(), true), /* private read receipts (https://github.com/matrix-org/matrix-spec-proposals/pull/2285) */
("uk.half-shot.msc2666.query_mutual_rooms".to_owned(), true), /* query mutual rooms (https://github.com/matrix-org/matrix-spec-proposals/pull/2666) */
("org.matrix.msc2836".to_owned(), true), /* threading/threads (https://github.com/matrix-org/matrix-spec-proposals/pull/2836) */
("org.matrix.msc2946".to_owned(), true), /* spaces/hierarchy summaries (https://github.com/matrix-org/matrix-spec-proposals/pull/2946) */
("org.matrix.msc3026.busy_presence".to_owned(), true), /* busy presence status (https://github.com/matrix-org/matrix-spec-proposals/pull/3026) */
("org.matrix.msc3814".to_owned(), true), /* dehydrated devices */
("org.matrix.msc3827".to_owned(), true), /* filtering of /publicRooms by room type (https://github.com/matrix-org/matrix-spec-proposals/pull/3827) */
("org.matrix.msc3952_intentional_mentions".to_owned(), true), /* intentional mentions (https://github.com/matrix-org/matrix-spec-proposals/pull/3952) */
("org.matrix.msc3916.stable".to_owned(), true), /* authenticated media (https://github.com/matrix-org/matrix-spec-proposals/pull/3916) */
("org.matrix.msc4180".to_owned(), true), /* stable flag for 3916 (https://github.com/matrix-org/matrix-spec-proposals/pull/4180) */
("uk.tcpip.msc4133".to_owned(), true), /* Extending User Profile API with Key:Value Pairs (https://github.com/matrix-org/matrix-spec-proposals/pull/4133) */
("us.cloke.msc4175".to_owned(), true), /* Profile field for user time zone (https://github.com/matrix-org/matrix-spec-proposals/pull/4175) */
("org.matrix.simplified_msc3575".to_owned(), true), /* Simplified Sliding sync (https://github.com/matrix-org/matrix-spec-proposals/pull/4186) */
("uk.timedout.msc4323".to_owned(), true), /* agnostic suspend (https://github.com/matrix-org/matrix-spec-proposals/pull/4323) */
("org.matrix.msc4155".to_owned(), true), /* invite filtering (https://github.com/matrix-org/matrix-spec-proposals/pull/4155) */
]);
Ok(assign!(get_supported_versions::Response::new(versions), { unstable_features }))
Ok(assign!(
get_supported_versions::Response::new(versions()),
{ unstable_features: unstable_features() }
))
}
/// # `GET /_conduwuit/server_version`
@@ -71,8 +35,8 @@ pub(crate) async fn get_supported_versions_route(
/// `/_matrix/federation/v1/version`
pub(crate) async fn conduwuit_server_version() -> Result<impl IntoResponse> {
Ok(Json(serde_json::json!({
"name": conduwuit::version::name(),
"version": conduwuit::version::version(),
"name": conduwuit::BRANDING,
"version": conduwuit::version(),
})))
}
@@ -80,7 +44,7 @@ pub(crate) async fn conduwuit_server_version() -> Result<impl IntoResponse> {
///
/// conduwuit-specific API to return the amount of users registered on this
/// homeserver. Endpoint is disabled if federation is disabled for privacy. This
/// only includes active users (not deactivated, no guests, etc)
/// only includes active users (not deactivated, etc)
pub(crate) async fn conduwuit_local_user_count(
State(services): State<crate::State>,
) -> Result<impl IntoResponse> {
+4 -9
View File
@@ -2,7 +2,7 @@
use conduwuit::{Err, Result};
use ruma::{
api::client::discovery::{
discover_homeserver::{self, HomeserverInfo, RtcFocusInfo},
discover_homeserver::{self, HomeserverInfo},
discover_support::{self, Contact, ContactRole},
},
assign,
@@ -31,10 +31,8 @@ pub(crate) async fn well_known_client(
rtc_foci: services
.config
.matrix_rtc
.effective_foci(&services.config.well_known.rtc_focus_server_urls)
.into_iter()
.map(|focus| RtcFocusInfo::new(focus.transport_type(), focus.data().into_owned()).unwrap())
.collect()
.foci
.clone()
}))
}
@@ -48,10 +46,7 @@ pub(crate) async fn get_rtc_transports(
_body: Ruma<ruma::api::client::rtc::transports::v1::Request>,
) -> Result<ruma::api::client::rtc::transports::v1::Response> {
Ok(ruma::api::client::rtc::transports::v1::Response::new(
services
.config
.matrix_rtc
.effective_foci(&services.config.well_known.rtc_focus_server_urls),
services.config.matrix_rtc.foci.clone(),
))
}
+1
View File
@@ -1,5 +1,6 @@
#![type_length_limit = "16384"] //TODO: reduce me
#![allow(clippy::toplevel_ref_arg)]
#![recursion_limit = "256"]
extern crate conduwuit_core as conduwuit;
extern crate conduwuit_service as service;
+5 -3
View File
@@ -10,7 +10,7 @@
response::{IntoResponse, Redirect},
routing::{any, get, post},
};
use conduwuit::{Server, err};
use conduwuit::err;
pub(super) use conduwuit_service::state::State;
use http::{Uri, uri};
@@ -18,8 +18,8 @@
pub(super) use self::{args::Args as Ruma, response::RumaResponse};
use crate::{admin, client, server};
pub fn build(router: Router<State>, server: &Server) -> Router<State> {
let config = &server.config;
pub fn build(router: Router<State>, state: State) -> Router<State> {
let config = &state.server.config;
let mut router = router
.ruma_route(&client::appservice_ping)
.ruma_route(&client::get_supported_versions_route)
@@ -185,6 +185,8 @@ pub fn build(router: Router<State>, server: &Server) -> Router<State> {
.ruma_route(&client::well_known_client)
.ruma_route(&client::get_rtc_transports)
.ruma_route(&client::room_initial_sync_route)
.ruma_route(&client::get_authorization_server_metadata_route)
.merge(client::oauth::router(state))
.route("/_conduwuit/server_version", get(client::conduwuit_server_version))
.route("/_continuwuity/server_version", get(client::conduwuit_server_version))
.ruma_route(&admin::rooms::ban::ban_room)
+23 -6
View File
@@ -1,6 +1,7 @@
use std::any::{Any, TypeId};
use conduwuit::{Err, Result, err};
use conduwuit::{Err, Error, Result, err};
use http::StatusCode;
use ruma::{
OwnedDeviceId, OwnedServerName, OwnedUserId, UserId,
api::{
@@ -9,12 +10,15 @@
AccessToken, AccessTokenOptional, AppserviceToken, AppserviceTokenOptional,
AuthScheme, NoAccessToken, NoAuthentication,
},
error::{ErrorKind, UnknownTokenErrorData},
federation::authentication::ServerSignatures,
},
assign,
};
use service::{
Services,
server_keys::{PubKeyMap, PubKeys},
users::AccessTokenStatus,
};
use crate::{router::args::AuthQueryParams, service::appservice::RegistrationInfo};
@@ -103,12 +107,21 @@ async fn verify<B: AsRef<[u8]> + Sync>(
query: AuthQueryParams,
route: TypeId,
) -> Result<Auth> {
// Check for appservice tokens first
let (sender_user, sender_device, appservice_info) = {
if let Ok((sender_user, sender_device)) =
if let Some((sender_user, sender_device, status)) =
services.users.find_from_token(&output).await
{
// If the token is expired we return a soft logout
if matches!(status, AccessTokenStatus::Expired) {
return Err(Error::Request(
ErrorKind::UnknownToken(
assign!(UnknownTokenErrorData::new(), { soft_logout: true }),
),
"This token has expired".into(),
StatusCode::UNAUTHORIZED,
));
}
// Locked users can only use /logout and /logout/all
if services
.users
@@ -120,7 +133,7 @@ async fn verify<B: AsRef<[u8]> + Sync>(
|| route
== TypeId::of::<ruma::api::client::session::logout_all::v3::Request>(
)) {
return Err!(Request(Unauthorized("Your account is locked.")));
return Err!(Request(UserLocked("Your account is locked.")));
}
}
@@ -168,7 +181,11 @@ async fn verify<B: AsRef<[u8]> + Sync>(
(Some(sender_user), sender_device, Some(appservice_info))
} else {
return Err!(Request(Unauthorized("Invalid access token.")));
return Err(Error::Request(
ErrorKind::UnknownToken(UnknownTokenErrorData::new()),
"Invalid token".into(),
StatusCode::UNAUTHORIZED,
));
}
};
+2 -2
View File
@@ -11,8 +11,8 @@ pub(crate) async fn get_server_version_route(
) -> Result<get_server_version::v1::Response> {
Ok(assign!(get_server_version::v1::Response::new(), {
server: Some(assign!(get_server_version::v1::Server::new(), {
name: Some(conduwuit::version::name().into()),
version: Some(conduwuit::version::version().into()),
name: Some(conduwuit::BRANDING.into()),
version: Some(conduwuit::version().into()),
})),
}))
}
+1
View File
@@ -70,6 +70,7 @@ conduwuit-build-metadata.workspace = true
const-str.workspace = true
core_affinity.workspace = true
ctor.workspace = true
dtor.workspace = true
cyborgtime.workspace = true
either.workspace = true
figment.workspace = true
+1 -1
View File
@@ -47,7 +47,7 @@
const NAME_MAX: usize = 128;
const KEY_SEGS: usize = 8;
#[ctor::ctor]
#[ctor::ctor(unsafe)]
fn _static_initialization() {
acq_epoch().expect("pre-initialization of jemalloc failed");
acq_epoch().expect("pre-initialization of jemalloc failed");
+119 -256
View File
@@ -4,7 +4,7 @@
pub mod proxy;
use std::{
collections::{BTreeMap, BTreeSet, HashMap},
collections::{BTreeMap, BTreeSet},
net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr},
path::PathBuf,
};
@@ -20,10 +20,7 @@
use regex::RegexSet;
use ruma::{
OwnedRoomId, OwnedRoomOrAliasId, OwnedServerName, OwnedUserId, RoomVersionId,
api::client::{
discovery::{discover_homeserver::RtcFocusInfo, discover_support::ContactRole},
rtc::transports::v1::RtcTransport,
},
api::client::{discovery::discover_support::ContactRole, rtc::RtcTransport},
};
use serde::{Deserialize, Serialize, de::IgnoredAny};
use url::Url;
@@ -371,6 +368,7 @@ pub struct Config {
pub ip_lookup_strategy: u8,
/// Max request size for file uploads in bytes. Defaults to 20MB.
/// Also limits incoming federated media.
///
/// default: 20971520
#[serde(default = "default_max_request_size")]
@@ -658,19 +656,25 @@ pub struct Config {
/// even if `recaptcha_site_key` is set.
pub recaptcha_private_site_key: Option<String>,
/// Policy documents, such as terms and conditions or a privacy policy,
/// which users must agree to when registering an account.
///
/// Example:
/// ```ignore
/// [global.registration_terms.privacy_policy]
/// en = { name = "Privacy Policy", url = "https://homeserver.example/en/privacy_policy.html" }
/// es = { name = "Política de Privacidad", url = "https://homeserver.example/es/privacy_policy.html" }
/// ```
///
/// default: {}
/// display: nested
#[serde(default)]
pub registration_terms: HashMap<String, HashMap<String, TermsDocument>>,
pub registration_terms: RegistrationTerms,
/// display: nested
#[serde(default)]
pub oauth: OauthConfig,
/// Controls whether users are allowed to deactivate their own accounts
/// through the account management panel or their Matrix clients. Server
/// admins can always deactivate users using the relevant admin commands.
///
/// Note that, in some jurisdictions, you may be legally required to honor
/// users who request to deactivate their accounts if you set this option
/// to `false`.
///
/// default: true
#[serde(default = "true_fn")]
pub allow_deactivation: bool,
/// Controls whether encrypted rooms and events are allowed.
#[serde(default = "true_fn")]
@@ -1485,21 +1489,6 @@ pub struct Config {
#[serde(default)]
pub brotli_compression: bool,
/// Set to true to allow user type "guest" registrations. Some clients like
/// Element attempt to register guest users automatically.
#[serde(default)]
pub allow_guest_registration: bool,
/// Set to true to log guest registrations in the admin room. Note that
/// these may be noisy or unnecessary if you're a public homeserver.
#[serde(default)]
pub log_guest_registrations: bool,
/// Set to true to allow guest registrations/users to auto join any rooms
/// specified in `auto_join_rooms`.
#[serde(default)]
pub allow_guests_auto_join_rooms: bool,
/// Enable the legacy unauthenticated Matrix media repository endpoints.
/// These endpoints consist of:
/// - /_matrix/media/*/config
@@ -2080,12 +2069,10 @@ pub struct Config {
pub stream_amplification: usize,
/// Number of sender task workers; determines sender parallelism. Default is
/// '0' which means the value is determined internally, likely matching the
/// number of tokio worker-threads or number of cores, etc. Override by
/// setting a non-zero value.
/// core count. Override by setting a different value.
///
/// default: 0
#[serde(default)]
/// default: core count
#[serde(default = "default_sender_workers")]
pub sender_workers: usize,
/// Enables listener sockets; can be set to false to disable listening. This
@@ -2129,19 +2116,11 @@ pub struct Config {
#[serde(default)]
pub allow_web_indexing: bool,
/// display: nested
#[serde(default)]
pub ldap: LdapConfig,
/// Configuration for antispam support
/// display: nested
#[serde(default)]
pub antispam: Option<Antispam>,
/// display: nested
#[serde(default)]
pub blurhashing: BlurhashConfig,
/// Configuration for MatrixRTC (MSC4143) transport discovery.
/// display: nested
#[serde(default)]
@@ -2215,43 +2194,6 @@ pub struct WellKnownConfig {
/// PGP key URI for server support contacts, to be served as part of the
/// MSC1929 server support endpoint.
pub support_pgp_key: Option<String>,
/// **DEPRECATED**: Use `[global.matrix_rtc].foci` instead.
///
/// A list of MatrixRTC foci URLs which will be served as part of the
/// MSC4143 client endpoint at /.well-known/matrix/client.
///
/// This option is deprecated and will be removed in a future release.
/// Please migrate to the new `[global.matrix_rtc]` config section.
///
/// default: []
#[serde(default)]
pub rtc_focus_server_urls: Vec<RtcFocusInfo>,
}
#[derive(Clone, Copy, Debug, Deserialize, Default)]
#[allow(rustdoc::broken_intra_doc_links, rustdoc::bare_urls)]
#[config_example_generator(filename = "conduwuit-example.toml", section = "global.blurhashing")]
pub struct BlurhashConfig {
/// blurhashing x component, 4 is recommended by https://blurha.sh/
///
/// default: 4
#[serde(default = "default_blurhash_x_component")]
pub components_x: u32,
/// blurhashing y component, 3 is recommended by https://blurha.sh/
///
/// default: 3
#[serde(default = "default_blurhash_y_component")]
pub components_y: u32,
/// Max raw size that the server will blurhash, this is the size of the
/// image after converting it to raw data, it should be higher than the
/// upload limit but not too high. The higher it is the higher the
/// potential load will be for clients requesting blurhashes. The default
/// is 33.55MB. Setting it to 0 disables blurhashing.
///
/// default: 33554432
#[serde(default = "default_blurhash_max_raw_size")]
pub blurhash_max_raw_size: u64,
}
#[derive(Clone, Debug, Deserialize, Default)]
@@ -2275,145 +2217,6 @@ pub struct MatrixRtcConfig {
pub foci: Vec<RtcTransport>,
}
impl MatrixRtcConfig {
/// Returns the effective foci, falling back to the deprecated
/// `rtc_focus_server_urls` if the new config is empty.
#[must_use]
pub fn effective_foci(&self, deprecated_foci: &[RtcFocusInfo]) -> Vec<RtcTransport> {
if !self.foci.is_empty() {
self.foci.clone()
} else {
deprecated_foci
.iter()
.map(|focus| {
RtcTransport::new(focus.focus_type().to_owned(), focus.data().into_owned())
.unwrap()
})
.collect()
}
}
}
#[derive(Clone, Debug, Default, Deserialize)]
#[config_example_generator(filename = "conduwuit-example.toml", section = "global.ldap")]
pub struct LdapConfig {
/// Whether to enable LDAP login.
///
/// example: "true"
#[serde(default)]
pub enable: bool,
/// Whether to force LDAP authentication or authorize classical password
/// login.
///
/// example: "true"
#[serde(default)]
pub ldap_only: bool,
/// URI of the LDAP server.
///
/// example: "ldap://ldap.example.com:389"
///
/// default: ""
#[serde(default)]
pub uri: Option<Url>,
/// StartTLS for LDAP connections.
///
/// default: false
#[serde(default)]
pub use_starttls: bool,
/// Skip TLS certificate verification, possibly dangerous.
///
/// default: false
#[serde(default)]
pub disable_tls_verification: bool,
/// Root of the searches.
///
/// example: "ou=users,dc=example,dc=org"
///
/// default: ""
#[serde(default)]
pub base_dn: String,
/// Bind DN if anonymous search is not enabled.
///
/// You can use the variable `{username}` that will be replaced by the
/// entered username. In such case, the password used to bind will be the
/// one provided for the login and not the one given by
/// `bind_password_file`. Beware: automatically granting admin rights will
/// not work if you use this direct bind instead of a LDAP search.
///
/// example: "cn=ldap-reader,dc=example,dc=org" or
/// "cn={username},ou=users,dc=example,dc=org"
///
/// default: ""
#[serde(default)]
pub bind_dn: Option<String>,
/// Path to a file on the system that contains the password for the
/// `bind_dn`.
///
/// The server must be able to access the file, and it must not be empty.
///
/// default: ""
#[serde(default)]
pub bind_password_file: Option<PathBuf>,
/// Search filter to limit user searches.
///
/// You can use the variable `{username}` that will be replaced by the
/// entered username for more complex filters.
///
/// example: "(&(objectClass=person)(memberOf=matrix))"
///
/// default: "(objectClass=*)"
#[serde(default = "default_ldap_search_filter")]
pub filter: String,
/// Attribute to use to uniquely identify the user.
///
/// example: "uid" or "cn"
///
/// default: "uid"
#[serde(default = "default_ldap_uid_attribute")]
pub uid_attribute: String,
/// Attribute containing the display name of the user.
///
/// example: "givenName" or "sn"
///
/// default: "givenName"
#[serde(default = "default_ldap_name_attribute")]
pub name_attribute: String,
/// Root of the searches for admin users.
///
/// Defaults to `base_dn` if empty.
///
/// example: "ou=admins,dc=example,dc=org"
///
/// default: ""
#[serde(default)]
pub admin_base_dn: String,
/// The LDAP search filter to find administrative users for continuwuity.
///
/// If left blank, administrative state must be configured manually for each
/// user.
///
/// You can use the variable `{username}` that will be replaced by the
/// entered username for more complex filters.
///
/// example: "(objectClass=conduwuitAdmin)" or "(uid={username})"
///
/// default: ""
#[serde(default)]
pub admin_filter: String,
}
#[derive(Deserialize, Clone, Debug)]
#[serde(transparent)]
struct ListeningPort {
@@ -2537,6 +2340,30 @@ pub struct SmtpConfig {
pub require_email_for_token_registration: bool,
}
#[derive(Clone, Debug, Default, Deserialize, Serialize)]
#[config_example_generator(
filename = "conduwuit-example.toml",
section = "global.registration-terms",
optional = "true"
)]
pub struct RegistrationTerms {
/// The language code to provide to clients along with the policy documents.
///
/// default: "en"
pub language: String,
/// Policy documents, such as terms and conditions or a privacy policy,
/// which users must agree to when registering an account.
///
/// Example:
/// ```ignore
/// [global.registration_terms.documents]
/// privacy_policy = { name = "Privacy Policy", url = "https://homeserver.example/en/privacy_policy.html" }
/// ```
///
/// default: {}
pub documents: BTreeMap<String, TermsDocument>,
}
/// A policy document for use with a m.login.terms stage.
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct TermsDocument {
@@ -2544,6 +2371,43 @@ pub struct TermsDocument {
pub url: String,
}
#[derive(Clone, Debug, Default, Deserialize)]
#[config_example_generator(
filename = "conduwuit-example.toml",
section = "global.oauth",
optional = "true"
)]
pub struct OauthConfig {
/// The compatibility mode to use for OAuth.
///
/// - "disabled": OAuth will be unavailable. Users will only be able to log
/// in using legacy authentication.
/// - "hybrid": OAuth and legacy authentication will both be available. Some
/// clients may only use one or the other.
/// - "exclusive": Only OAuth will be available. Clients which require
/// legacy authentication will be unable to log in.
///
/// default: "hybrid"
pub compatibility_mode: OAuthMode,
}
#[derive(Clone, Debug, Default, Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum OAuthMode {
Disabled,
#[default]
Hybrid,
Exclusive,
}
impl OAuthMode {
#[must_use]
pub fn uiaa_available(&self) -> bool { matches!(self, Self::Disabled | Self::Hybrid) }
#[must_use]
pub fn oauth_available(&self) -> bool { matches!(self, Self::Hybrid | Self::Exclusive) }
}
const DEPRECATED_KEYS: &[&str] = &[
"cache_capacity",
"conduit_cache_capacity_modifier",
@@ -2641,45 +2505,47 @@ fn default_database_backups_to_keep() -> i16 { 1 }
fn default_db_write_buffer_capacity_mb() -> f64 { 48.0 + parallelism_scaled_f64(4.0) }
fn default_db_cache_capacity_mb() -> f64 { 128.0 + parallelism_scaled_f64(64.0) }
fn default_db_cache_capacity_mb() -> f64 { 512.0 + parallelism_scaled_f64(512.0) }
fn default_pdu_cache_capacity() -> u32 { parallelism_scaled_u32(10_000).saturating_add(100_000) }
fn default_pdu_cache_capacity() -> u32 { parallelism_scaled_u32(50_000).saturating_add(100_000) }
fn default_cache_capacity_modifier() -> f64 { 1.0 }
fn default_auth_chain_cache_capacity() -> u32 {
parallelism_scaled_u32(10_000).saturating_add(100_000)
}
fn default_shorteventid_cache_capacity() -> u32 {
parallelism_scaled_u32(50_000).saturating_add(100_000)
}
fn default_shorteventid_cache_capacity() -> u32 {
parallelism_scaled_u32(100_000).saturating_add(100_000)
}
fn default_eventidshort_cache_capacity() -> u32 {
parallelism_scaled_u32(25_000).saturating_add(100_000)
parallelism_scaled_u32(50_000).saturating_add(100_000)
}
fn default_eventid_pdu_cache_capacity() -> u32 {
parallelism_scaled_u32(25_000).saturating_add(100_000)
parallelism_scaled_u32(50_000).saturating_add(100_000)
}
fn default_shortstatekey_cache_capacity() -> u32 {
parallelism_scaled_u32(10_000).saturating_add(100_000)
parallelism_scaled_u32(50_000).saturating_add(100_000)
}
fn default_statekeyshort_cache_capacity() -> u32 {
parallelism_scaled_u32(10_000).saturating_add(100_000)
parallelism_scaled_u32(50_000).saturating_add(100_000)
}
fn default_servernameevent_data_cache_capacity() -> u32 {
parallelism_scaled_u32(100_000).saturating_add(500_000)
parallelism_scaled_u32(100_000).saturating_add(100_000)
}
fn default_stateinfo_cache_capacity() -> u32 { parallelism_scaled_u32(100) }
fn default_stateinfo_cache_capacity() -> u32 { parallelism_scaled_u32(500).clamp(100, 12000) }
fn default_roomid_spacehierarchy_cache_capacity() -> u32 { parallelism_scaled_u32(1000) }
fn default_roomid_spacehierarchy_cache_capacity() -> u32 {
parallelism_scaled_u32(500).clamp(100, 12000)
}
fn default_dns_cache_entries() -> u32 { 32768 }
fn default_dns_cache_entries() -> u32 { 327_680 }
fn default_dns_min_ttl() -> u64 { 60 * 180 }
@@ -2887,15 +2753,26 @@ fn default_admin_log_capture() -> String {
fn default_admin_room_tag() -> String { "m.server_notice".to_owned() }
#[must_use]
#[allow(clippy::as_conversions, clippy::cast_precision_loss)]
fn parallelism_scaled_f64(val: f64) -> f64 { val * (sys::available_parallelism() as f64) }
pub fn parallelism_scaled_f64(val: f64) -> f64 { val * (sys::available_parallelism() as f64) }
fn parallelism_scaled_u32(val: u32) -> u32 {
let val = val.try_into().expect("failed to cast u32 to usize");
parallelism_scaled(val).try_into().unwrap_or(u32::MAX)
#[must_use]
#[allow(clippy::as_conversions, clippy::cast_possible_truncation)]
pub fn parallelism_scaled_u32(val: u32) -> u32 {
val.saturating_mul(sys::available_parallelism() as u32)
}
fn parallelism_scaled(val: usize) -> usize { val.saturating_mul(sys::available_parallelism()) }
#[must_use]
#[allow(clippy::as_conversions, clippy::cast_possible_truncation, clippy::cast_possible_wrap)]
pub fn parallelism_scaled_i32(val: i32) -> i32 {
val.saturating_mul(sys::available_parallelism() as i32)
}
#[must_use]
pub fn parallelism_scaled(val: usize) -> usize {
val.saturating_mul(sys::available_parallelism())
}
fn default_trusted_server_batch_size() -> usize { 256 }
@@ -2915,6 +2792,8 @@ fn default_stream_width_scale() -> f32 { 1.0 }
fn default_stream_amplification() -> usize { 1024 }
fn default_sender_workers() -> usize { parallelism_scaled(1) }
fn default_client_receive_timeout() -> u64 { 75 }
fn default_client_request_timeout() -> u64 { 180 }
@@ -2924,19 +2803,3 @@ fn default_client_response_timeout() -> u64 { 120 }
fn default_client_shutdown_timeout() -> u64 { 15 }
fn default_sender_shutdown_timeout() -> u64 { 5 }
// blurhashing defaults recommended by https://blurha.sh/
// 2^25
pub(super) fn default_blurhash_max_raw_size() -> u64 { 33_554_432 }
pub(super) fn default_blurhash_x_component() -> u32 { 4 }
pub(super) fn default_blurhash_y_component() -> u32 { 3 }
// end recommended & blurhashing defaults
fn default_ldap_search_filter() -> String { "(objectClass=*)".to_owned() }
fn default_ldap_uid_attribute() -> String { String::from("uid") }
fn default_ldap_name_attribute() -> String { String::from("givenName") }
+1 -1
View File
@@ -62,7 +62,7 @@ macro_rules! debug_info {
pub static DEBUGGER: LazyLock<bool> =
LazyLock::new(|| env::var("_").unwrap_or_default().ends_with("gdb"));
#[cfg_attr(debug_assertions, ctor::ctor)]
#[cfg_attr(debug_assertions, ctor::ctor(unsafe))]
#[cfg_attr(not(debug_assertions), allow(dead_code))]
fn set_panic_trap() {
if !*DEBUGGER {
+2 -3
View File
@@ -110,8 +110,6 @@ pub enum Error {
InconsistentRoomState(&'static str, ruma::OwnedRoomId),
#[error(transparent)]
IntoHttp(#[from] ruma::api::error::IntoHttpError),
#[error("{0}")]
Ldap(Cow<'static, str>),
#[error(transparent)]
Mxc(#[from] ruma::MxcUriError),
#[error(transparent)]
@@ -160,6 +158,7 @@ pub fn message(&self) -> String {
match self {
| Self::Federation(origin, error) => format!("Answer from {origin}: {error}"),
| Self::Ruma(error) => response::ruma_error_message(error),
| Self::Request(_, message, _) => message.clone().into_owned(),
| _ => format!("{self}"),
}
}
@@ -262,7 +261,7 @@ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{real_error}")
}
} else {
write!(f, "Request error: {}", &self.0)
write!(f, "Request error: {}", self.0)
}
}
}
+1 -4
View File
@@ -73,11 +73,8 @@ pub(super) fn bad_request_code(kind: &ErrorKind) -> StatusCode {
// 413
| TooLarge => StatusCode::PAYLOAD_TOO_LARGE,
// 405
| Unrecognized => StatusCode::METHOD_NOT_ALLOWED,
// 404
| NotFound => StatusCode::NOT_FOUND,
| Unrecognized | NotFound => StatusCode::NOT_FOUND,
// 403
| GuestAccessForbidden
+6 -9
View File
@@ -7,19 +7,16 @@
use std::sync::OnceLock;
static BRANDING: &str = "continuwuity";
static WEBSITE: &str = "https://continuwuity.org";
static SEMANTIC: &str = env!("CARGO_PKG_VERSION");
pub const BRANDING: &str = "continuwuity";
pub const ROUTE_PREFIX: &str = "/_continuwuity";
pub const WEBSITE: &str = "https://continuwuity.org";
pub const SEMANTIC: &str = env!("CARGO_PKG_VERSION");
static VERSION: OnceLock<String> = OnceLock::new();
static VERSION_UA: OnceLock<String> = OnceLock::new();
static USER_AGENT: OnceLock<String> = OnceLock::new();
static USER_AGENT_MEDIA: OnceLock<String> = OnceLock::new();
#[inline]
#[must_use]
pub fn name() -> &'static str { BRANDING }
#[inline]
pub fn version() -> &'static str { VERSION.get_or_init(init_version) }
@@ -32,10 +29,10 @@ pub fn user_agent() -> &'static str { USER_AGENT.get_or_init(init_user_agent) }
#[inline]
pub fn user_agent_media() -> &'static str { USER_AGENT_MEDIA.get_or_init(init_user_agent_media) }
fn init_user_agent() -> String { format!("{}/{} (bot; +{WEBSITE})", name(), version_ua()) }
fn init_user_agent() -> String { format!("{BRANDING}/{} (bot; +{WEBSITE})", version_ua()) }
fn init_user_agent_media() -> String {
format!("{}/{} (embedbot; facebookexternalhit/1.1; +{WEBSITE})", name(), version_ua())
format!("{BRANDING}/{} (embedbot; facebookexternalhit/1.1; +{WEBSITE})", version_ua())
}
fn init_version_ua() -> String {
+1 -2
View File
@@ -337,8 +337,7 @@ pub async fn auth_check<E, F, Fut>(
// If the create event content has the field m.federate set to false and the
// sender domain of the event does not match the sender domain of the create
// event, reject.
if !(room_version.room_id_format == RoomIdFormatVersion::V2)
&& !room_create_content.federate
if !room_create_content.federate
&& room_create_event.sender().server_name() != incoming_event.sender().server_name()
{
warn!(
+1 -4
View File
@@ -34,10 +34,7 @@ macro_rules! mod_dtor {
pub use conduwuit_build_metadata as build_metadata;
pub use config::Config;
pub use error::Error;
pub use info::{
version,
version::{name, version},
};
pub use info::version::*;
pub use matrix::{Event, EventTypeExt, Pdu, PduCount, PduEvent, PduId, pdu, state_res};
pub use parking_lot::{Mutex as SyncMutex, RwLock as SyncRwLock};
pub use server::Server;
+3
View File
@@ -5,6 +5,7 @@
/// Sha256 hash (input gather joined by 0xFF bytes)
#[must_use]
#[tracing::instrument(skip(inputs), level = "trace")]
#[allow(clippy::unnecessary_fallible_conversions)]
pub fn delimited<'a, T, I>(mut inputs: I) -> DigestOut
where
I: Iterator<Item = T> + 'a,
@@ -25,6 +26,7 @@ pub fn delimited<'a, T, I>(mut inputs: I) -> DigestOut
/// Sha256 hash (input gather)
#[must_use]
#[tracing::instrument(skip(inputs), level = "trace")]
#[allow(clippy::unnecessary_fallible_conversions)]
pub fn concat<'a, T, I>(inputs: I) -> DigestOut
where
I: Iterator<Item = T> + 'a,
@@ -43,6 +45,7 @@ pub fn concat<'a, T, I>(inputs: I) -> DigestOut
#[inline]
#[must_use]
#[tracing::instrument(skip(input), level = "trace")]
#[allow(clippy::unnecessary_fallible_conversions)]
pub fn hash<T>(input: T) -> DigestOut
where
T: AsRef<[u8]>,
+16 -10
View File
@@ -61,17 +61,23 @@ pub fn format(ts: SystemTime, str: &str) -> String {
pub fn pretty(d: Duration) -> String {
use Unit::*;
let fmt = |w, f, u| format!("{w}.{f} {u}");
let gen64 = |w, f, u| fmt(w, (f * 100.0) as u32, u);
let gen128 = |w, f, u| gen64(u64::try_from(w).expect("u128 to u64"), f, u);
let fmt = |w, u| {
if w == 1 {
format!("{w} {u}")
} else {
format!("{w} {u}s")
}
};
let gen64 = |w, u| fmt(w, u);
let gen128 = |w, u| gen64(u64::try_from(w).expect("u128 to u64"), u);
match whole_and_frac(d) {
| (Days(whole), frac) => gen64(whole, frac, "days"),
| (Hours(whole), frac) => gen64(whole, frac, "hours"),
| (Mins(whole), frac) => gen64(whole, frac, "minutes"),
| (Secs(whole), frac) => gen64(whole, frac, "seconds"),
| (Millis(whole), frac) => gen128(whole, frac, "milliseconds"),
| (Micros(whole), frac) => gen128(whole, frac, "microseconds"),
| (Nanos(whole), frac) => gen128(whole, frac, "nanoseconds"),
| (Days(whole), _) => gen64(whole, "day"),
| (Hours(whole), _) => gen64(whole, "hour"),
| (Mins(whole), _) => gen64(whole, "minute"),
| (Secs(whole), _) => gen64(whole, "second"),
| (Millis(whole), _) => gen128(whole, "millisecond"),
| (Micros(whole), _) => gen128(whole, "microsecond"),
| (Nanos(whole), _) => gen128(whole, "nanosecond"),
}
}
+1
View File
@@ -58,6 +58,7 @@ conduwuit-core.workspace = true
conduwuit-macros.workspace = true
const-str.workspace = true
ctor.workspace = true
dtor.workspace = true
futures.workspace = true
log.workspace = true
minicbor.workspace = true
+8 -2
View File
@@ -288,8 +288,14 @@ fn deserialize_option<V: Visitor<'de>>(self, visitor: V) -> Result<V::Value> {
}
#[cfg_attr(unabridged, tracing::instrument(level = "trace", skip_all))]
fn deserialize_bool<V: Visitor<'de>>(self, _visitor: V) -> Result<V::Value> {
unhandled!("deserialize bool not implemented")
fn deserialize_bool<V: Visitor<'de>>(self, visitor: V) -> Result<V::Value> {
let byte = self
.buf
.get(self.pos)
.ok_or(Self::Error::SerdeDe("bool buffer underflow".into()))?;
self.inc_pos(1);
visitor.visit_bool(*byte != 0x00)
}
#[cfg_attr(unabridged, tracing::instrument(level = "trace", skip_all))]
+1 -1
View File
@@ -29,7 +29,7 @@ fn descriptor_cf_options(
set_table_options(&mut opts, &desc, cache)?;
opts.set_min_write_buffer_number(1);
opts.set_max_write_buffer_number(2);
opts.set_max_write_buffer_number(3);
opts.set_write_buffer_size(desc.write_size);
opts.set_target_file_size_base(desc.file_size);
+30 -1
View File
@@ -49,6 +49,10 @@ pub(super) fn open_list(db: &Arc<Engine>, maps: &[Descriptor]) -> Result<Maps> {
name: "bannedroomids",
..descriptor::RANDOM_SMALL
},
Descriptor {
name: "clientid_clientmetadata",
..descriptor::RANDOM_SMALL
},
Descriptor {
name: "disabledroomids",
..descriptor::RANDOM_SMALL
@@ -120,6 +124,10 @@ pub(super) fn open_list(db: &Arc<Engine>, maps: &[Descriptor]) -> Result<Maps> {
name: "onetimekeyid_onetimekeys",
..descriptor::RANDOM_SMALL
},
Descriptor {
name: "fallbackkeyid_fallbackkey",
..descriptor::RANDOM_SMALL
},
Descriptor {
name: "passwordresettoken_info",
..descriptor::RANDOM_SMALL
@@ -153,6 +161,10 @@ pub(super) fn open_list(db: &Arc<Engine>, maps: &[Descriptor]) -> Result<Maps> {
name: "referencedevents",
..descriptor::RANDOM
},
Descriptor {
name: "refreshtoken_refreshtokeninfo",
..descriptor::RANDOM_SMALL
},
Descriptor {
name: "registrationtoken_info",
..descriptor::RANDOM_SMALL
@@ -307,6 +319,11 @@ pub(super) fn open_list(db: &Arc<Engine>, maps: &[Descriptor]) -> Result<Maps> {
key_size_hint: Some(48),
..descriptor::RANDOM_SMALL
},
Descriptor {
name: "rejectedeventids",
key_size_hint: Some(48),
..descriptor::RANDOM_SMALL
},
Descriptor {
name: "statehash_shortstatehash",
val_size_hint: Some(8),
@@ -362,6 +379,14 @@ pub(super) fn open_list(db: &Arc<Engine>, maps: &[Descriptor]) -> Result<Maps> {
name: "userdevicetxnid_response",
..descriptor::RANDOM_SMALL
},
Descriptor {
name: "userdeviceid_oauthsessioninfo",
..descriptor::RANDOM_SMALL
},
Descriptor {
name: "userdeviceid_tokenexpires",
..descriptor::RANDOM_SMALL
},
Descriptor {
name: "userfilterid_filter",
..descriptor::RANDOM_SMALL
@@ -372,7 +397,7 @@ pub(super) fn open_list(db: &Arc<Engine>, maps: &[Descriptor]) -> Result<Maps> {
},
Descriptor {
name: "userid_blurhash",
..descriptor::RANDOM_SMALL
..descriptor::DROPPED
},
Descriptor {
name: "userid_dehydrateddevice",
@@ -466,4 +491,8 @@ pub(super) fn open_list(db: &Arc<Engine>, maps: &[Descriptor]) -> Result<Maps> {
name: "userroomid_invitesender",
..descriptor::RANDOM_SMALL
},
Descriptor {
name: "websessionid_session",
..descriptor::RANDOM_SMALL
},
];
+2 -2
View File
@@ -297,8 +297,8 @@ fn serialize_u16(self, _v: u16) -> Result<Self::Ok> {
fn serialize_u8(self, v: u8) -> Result<Self::Ok> { self.write(&[v]) }
fn serialize_bool(self, _v: bool) -> Result<Self::Ok> {
unhandled!("serialize bool not implemented")
fn serialize_bool(self, v: bool) -> Result<Self::Ok> {
if v { self.write(&[0x01]) } else { self.write(&[0x00]) }
}
fn serialize_unit(self) -> Result<Self::Ok> { unhandled!("serialize unit not implemented") }
+2 -2
View File
@@ -32,11 +32,11 @@ mod __compile_introspection {
const CRATE_NAME: &str = #crate_name;
/// Register this crate's features with the global registry during static initialization
#[::ctor::ctor]
#[::ctor::ctor(unsafe)]
fn register() {
conduwuit_core::info::introspection::ENABLED_FEATURES.lock().unwrap().insert(#crate_name, &ENABLED);
}
#[::ctor::dtor]
#[::dtor::dtor(unsafe)]
fn unregister() {
conduwuit_core::info::introspection::ENABLED_FEATURES.lock().unwrap().remove(#crate_name);
}
+1 -8
View File
@@ -47,7 +47,6 @@ default = [
"bindgen-runtime", # replace with bindgen-static on alpine
]
standard = [
"blurhashing",
"brotli_compression",
"element_hacks",
"gzip_compression",
@@ -55,7 +54,6 @@ standard = [
"jemalloc",
"jemalloc_conf",
"journald",
"ldap",
"media_thumbnail",
"systemd",
"url_preview",
@@ -72,9 +70,6 @@ full = [
"tokio_console",
]
blurhashing = [
"conduwuit-service/blurhashing",
]
brotli_compression = [
"conduwuit-api/brotli_compression",
"conduwuit-core/brotli_compression",
@@ -126,9 +121,6 @@ jemalloc_stats = [
jemalloc_conf = [
"conduwuit-core/jemalloc_conf",
]
ldap = [
"conduwuit-api/ldap",
]
media_thumbnail = [
"conduwuit-service/media_thumbnail",
]
@@ -217,6 +209,7 @@ conduwuit-macros.workspace = true
clap.workspace = true
ctor.workspace = true
dtor.workspace = true
console-subscriber.optional = true
console-subscriber.workspace = true
const-str.workspace = true
+1 -1
View File
@@ -15,7 +15,7 @@
#[clap(
about,
long_about = None,
name = conduwuit_core::name(),
name = conduwuit_core::BRANDING,
version = conduwuit_core::version(),
)]
pub struct Args {
+1 -1
View File
@@ -110,7 +110,7 @@ pub(crate) fn init(
.with_batch_exporter(exporter)
.build();
let tracer = provider.tracer(conduwuit_core::name());
let tracer = provider.tracer(conduwuit_core::BRANDING);
let telemetry = tracing_opentelemetry::layer().with_tracer(tracer);
+1 -1
View File
@@ -47,7 +47,7 @@ fn options(config: &Config) -> ClientOptions {
traces_sample_rate: config.sentry_traces_sample_rate,
debug: cfg!(debug_assertions),
release: release_name(),
user_agent: conduwuit_core::version::user_agent().into(),
user_agent: conduwuit_core::user_agent().into(),
attach_stacktrace: config.sentry_attach_stacktrace,
before_send: Some(Arc::new(before_send)),
before_breadcrumb: Some(Arc::new(before_breadcrumb)),
+1
View File
@@ -105,6 +105,7 @@ conduwuit-service.workspace = true
conduwuit-web.workspace = true
const-str.workspace = true
ctor.workspace = true
dtor.workspace = true
futures.workspace = true
http.workspace = true
http-body-util.workspace = true
+7 -5
View File
@@ -8,7 +8,7 @@
extract::State,
response::{IntoResponse, Response},
};
use conduwuit::{Result, debug, debug_error, debug_warn, err, error, trace};
use conduwuit::{Result, debug_warn, err, error, info, trace};
use conduwuit_service::Services;
use futures::FutureExt;
use http::{Method, StatusCode, Uri};
@@ -102,17 +102,19 @@ fn handle_result(method: &Method, uri: &Uri, result: Response) -> Result<Respons
let reason = status.canonical_reason().unwrap_or("Unknown Reason");
if status.is_server_error() {
error!(%method, %uri, "{code} {reason}");
info!(%method, %uri, "{code} {reason}");
} else if status.is_client_error() {
debug_error!(%method, %uri, "{code} {reason}");
info!(%method, %uri, "{code} {reason}");
} else if status.is_redirection() {
debug!(%method, %uri, "{code} {reason}");
trace!(%method, %uri, "{code} {reason}");
} else {
trace!(%method, %uri, "{code} {reason}");
}
if status == StatusCode::METHOD_NOT_ALLOWED {
return Ok(err!(Request(Unrecognized("Method Not Allowed"))).into_response());
return Ok(
err!(Request(Unrecognized("Method not allowed"), METHOD_NOT_ALLOWED)).into_response()
);
}
Ok(result)

Some files were not shown because too many files have changed in this diff Show More