Compare commits

...

178 Commits

Author SHA1 Message Date
Renovate Bot 91e8c0ea71 chore(deps): update https://github.com/softprops/action-gh-release action to v3 2026-05-01 17:50:20 +00:00
Renovate Bot d963b89a07 chore(deps): update rust-zerover-patch-updates 2026-05-01 17:40:13 +00:00
Ginger 680c972b44 chore: News fragment 2026-05-01 13:17:00 -04:00
Ginger 88b59eb053 fix: Include target user's membership when building stripped state 2026-05-01 13:15:55 -04:00
timedout 4a99de0d28 fix: Store incoming federated invite membership events correctly
Co-Authored-By: Ginger <ginger@gingershaped.computer>
Reviewed-By: Ginger <ginger@gingershaped.computer>
2026-05-01 14:49:27 +01:00
Renovate Bot 0e1f0683c6 chore(deps): update pre-commit hook crate-ci/typos to v1.46.0 2026-05-01 05:04:01 +00:00
Renovate Bot cec4abc7cd chore(deps): update ruma digest to 5742fec 2026-04-30 05:05:10 +00:00
Ginger e6cae5b8ed fix: Fix membership check in kick handler 2026-04-29 12:45:15 -04:00
Ginger 02ccf64d2e fix: Properly create room summary stripped state 2026-04-29 12:44:57 -04:00
Renovate Bot 4d4d875231 chore(deps): update node-patch-updates to v2.0.10 2026-04-29 14:14:58 +00:00
Renovate Bot cdf05b9a8b chore(deps): update rust crate serde-saphyr to 0.0.25 2026-04-29 14:14:17 +00:00
Ginger 9491be928d fix: Fix panic when creating rooms 2026-04-29 09:26:13 -04:00
Ginger 049babc7ca fix: Fix appservice authentication 2026-04-29 09:09:09 -04:00
Ginger 7b99757337 chore: Update news fragment 2026-04-28 09:16:57 -04:00
Ginger d09de005e3 refactor: Drop unused MSC3843 endpoint definition 2026-04-28 09:16:57 -04:00
Ginger e34fd76dc0 fix: Re-add support for MSC4293 2026-04-28 09:16:57 -04:00
Ginger 72dfe579ec docs: Remove ruwuma mention from development.md 2026-04-28 09:16:57 -04:00
Ginger cfae9a34f4 fix: Panic on PL content deserialization failures 2026-04-28 09:16:57 -04:00
Ginger 0a4808ea79 fix: Add stable routes for admin suspension endpoints 2026-04-28 09:16:57 -04:00
Ginger a9a18fc5f0 fix: Re-add support for custom room IDs 2026-04-28 09:16:57 -04:00
Ginger c1434c7935 refactor: Remove mystery initial state hack 2026-04-28 09:16:57 -04:00
Ginger 2e98ba3ed8 fix: Increase max length for report reasons 2026-04-28 09:16:57 -04:00
Ginger 551cf48642 fix: Add bounds checking for profile data 2026-04-28 09:16:57 -04:00
Ginger d256a1c1fa fix: Add bounds checking for profile data 2026-04-28 09:16:57 -04:00
Ginger 5578144da9 refactor: Clean up api/client/membership/kick.rs 2026-04-28 09:16:57 -04:00
Ginger 5309a064e8 refactor: Remove old project name in api/client/membership/ 2026-04-28 09:16:57 -04:00
Ginger 56d35b4e39 refactor: Clean up api/client/membership.ban.rs 2026-04-28 09:16:57 -04:00
Ginger 7375d1cad4 fix: Check existing key equality when uploading new E2EE keys 2026-04-28 09:16:57 -04:00
Ginger 80baf948ae refactor: Switch back to upstream Ruma 2026-04-28 09:16:57 -04:00
Ginger ed37696cef fix: Enable compatibility feature to fix federation with old conduit-family homeservers 2026-04-28 09:16:57 -04:00
Ginger 0a04c60f31 refactor: Improve summary service logging 2026-04-28 09:16:57 -04:00
Ginger e44ac230a6 fix: Fix failing test 2026-04-28 09:16:57 -04:00
Ginger 57c4567380 chore: Changelog 2026-04-28 09:16:57 -04:00
Ginger a8a8e1ea51 chore: Clippy fixes 2026-04-28 09:16:57 -04:00
Ginger 02f69a7160 fix: FIx code that was causing rustc to panic somehow 2026-04-28 09:16:56 -04:00
Ginger f68205a341 refactor: Remove pointless assert 2026-04-28 09:16:56 -04:00
Ginger 9899632b8b chore: Clippy fixes 2026-04-28 09:16:56 -04:00
Ginger a0524a9566 refactor: Fix errors in api/client/directory.rs, again 2026-04-28 09:16:56 -04:00
Ginger e70004c98f chore: Clippy fixes 2026-04-28 09:16:56 -04:00
Ginger e185f56f3a refactor: Fix errors in admin/processor.rs 2026-04-28 09:16:56 -04:00
Ginger 5058b7979a refactor: Fix errors in admin/user/ 2026-04-28 09:16:56 -04:00
Ginger 7f06a61242 refactor: Fix errors in admin/room/ 2026-04-28 09:16:56 -04:00
Ginger 54fefb421b refactor: Fix errors in admin/query/ 2026-04-28 09:16:56 -04:00
Ginger 9d39321deb refactor: Fix errors in admin/media/commands.rs 2026-04-28 09:16:56 -04:00
Ginger c64a4a71bc refactor: Resolve errors in admin/federation/commands.rs 2026-04-28 09:16:56 -04:00
Ginger 385b4b10d1 refactor: Fix errors in admin/debug/commands.rs 2026-04-28 09:16:56 -04:00
Ginger c12dd20431 refactor: Fix errors in admin/check/commands.rs 2026-04-28 09:16:56 -04:00
Ginger 3ad7c3b30d refactor: Fix remaining errors in api/ (and temporarily switch to a fork of ruma) 2026-04-28 09:16:56 -04:00
Ginger 7a58074a0d refactor: Fix errors in web/ 2026-04-28 09:16:56 -04:00
Ginger 0c7abd792d refactor: Fix errors in api/router/ 2026-04-28 09:16:56 -04:00
Ginger 0f64e6b49c refactor: Fix errors in api/server/well_known.rs 2026-04-28 09:16:56 -04:00
Ginger e7a1c71a25 refactor: Fix errors in api/server/version.rs 2026-04-28 09:16:56 -04:00
Ginger cd3b97ea26 refactor: Fix errors in api/server/user.rs 2026-04-28 09:16:56 -04:00
Ginger 845b731f8c refactor: Fix errors in api/server/state.rs 2026-04-28 09:16:56 -04:00
Ginger 97d2388717 refactor: Fix errors in api/server/state_ids.rs 2026-04-28 09:16:56 -04:00
Ginger 962a4aedc6 refactor: Fix errors in api/server/send.rs 2026-04-28 09:16:56 -04:00
Ginger 0eee63f7a1 refactor: Fix errors in api/server/send_leave.rs 2026-04-28 09:16:52 -04:00
Ginger eba38c2fa0 refactor: Fix errors in api/server/send_knock.rs 2026-04-28 09:16:52 -04:00
Ginger 338cdc2a75 refactor: Fix errors in api/server/send_join.rs 2026-04-28 09:16:52 -04:00
Ginger 2dacb8e071 refactor: Fix errors in api/server/query.rs 2026-04-28 09:16:52 -04:00
Ginger 398f73b690 refactor: Fix errors in api/server/publicrooms.rs 2026-04-28 09:16:52 -04:00
Ginger 78d9c29a05 refactor: Fix errors in api/server/media.rs 2026-04-28 09:16:52 -04:00
Ginger 0406f755c2 refactor: Fix errors in api/server/make_leave.rs 2026-04-28 09:16:52 -04:00
Ginger 1827888f09 refactor: Fix errors in api/server/make_knock.rs 2026-04-28 09:16:52 -04:00
Ginger 8871b1f74b refactor: Fix most errors in api/server/make_join.rs 2026-04-28 09:16:52 -04:00
Ginger c7489fd008 refactor: Fix errors in api/server/key.rs 2026-04-28 09:16:52 -04:00
Ginger 7f5f4df64e refactor: Fix errors in api/server/invite.rs 2026-04-28 09:16:52 -04:00
Ginger 15d87c00bf refactor: Fix errors in api/server/get_missing_events.rs 2026-04-28 09:16:52 -04:00
Ginger 7cae42634e refactor: Fix errors in api/server/event.rs 2026-04-28 09:16:52 -04:00
Ginger bd94ec4033 refactor: Fix errors in api/server/event_auth.rs 2026-04-28 09:16:52 -04:00
Ginger db7d378a2e refactor: Fix errors in api/server/backfill.rs 2026-04-28 09:16:52 -04:00
Ginger 39b2e461be refactor: Fix remaining errors in api/cient/message.rs 2026-04-28 09:16:52 -04:00
Ginger ca358438ee refactor: Fix mystery weirdness in api/client/sync/v3/mod.rs 2026-04-28 09:16:52 -04:00
Ginger 4282d60181 refactor: Fix errors in api/client/well_known.rs 2026-04-28 09:16:52 -04:00
Ginger 10dbea72e8 refactor: Fix errors in api/client/voip.rs 2026-04-28 09:16:52 -04:00
Ginger aa7c2ea1ad refactor: Fix errors in api/client/user_directory.rs 2026-04-28 09:16:52 -04:00
Ginger 698d959407 refactor: Fix errors in api/client/unversioned.rs 2026-04-28 09:16:52 -04:00
Ginger 4c831c3531 refactor: Fix errors in api/client/typing.rs 2026-04-28 09:16:52 -04:00
Ginger 4dfdce303f refactor: Fix errors in api/client/to_device.rs 2026-04-28 09:16:52 -04:00
Ginger 8d8c310a64 refactor: Fix errors in api/client/threads.rs 2026-04-28 09:16:52 -04:00
Ginger e50e24e22d refactor: Fix errors in api/client/thirdparty.rs 2026-04-28 09:16:52 -04:00
Ginger a215b63077 refactor: Fix errors in api/client/tag.rs 2026-04-28 09:16:52 -04:00
Ginger 1d39210a0c refactor: Fix errors in api/client/state.rs 2026-04-28 09:16:52 -04:00
Ginger 360e0dada8 refactor: Fix errors in api/client/session.rs 2026-04-28 09:16:52 -04:00
Ginger cbf24a9483 refactor: Fix errors in api/client/send.rs 2026-04-28 09:16:52 -04:00
Ginger 6cb3f909c9 refactor: Fix errors in api/client/search.rs 2026-04-28 09:16:52 -04:00
Ginger b7c9ef89f0 refactor: Fix errors in api/client/report.rs 2026-04-28 09:16:52 -04:00
Ginger 64f7791ddb refactor: Fix errors in api/client/relations.rs 2026-04-28 09:16:52 -04:00
Ginger 836047b54e refactor: Fix errors in api/client/redact.rs 2026-04-28 09:16:52 -04:00
Ginger 256f8f679d refactor: Fix errors in api/client/read_marker.rs 2026-04-28 09:16:52 -04:00
Ginger 154cda35f3 refactor: Fix errors in api/client/push.rs 2026-04-28 09:16:52 -04:00
Ginger 1bf6d2a117 refactor: Fix errors in api/client/profile.rs and api/client/unstable.rs 2026-04-28 09:16:52 -04:00
Ginger 69d33931fa refactor: Fix errors in api/client/presence.rs 2026-04-28 09:16:52 -04:00
Ginger 83902a584b refactor: Fix errors in api/client/openid.rs 2026-04-28 09:16:52 -04:00
Ginger bcff259875 refactor: Fix most errors in api/client/messages.rs 2026-04-28 09:16:52 -04:00
Ginger 496ca80393 refactor: Fix errors in api/client/media.rs 2026-04-28 09:16:52 -04:00
Ginger 34b992fc40 refactor: Fix errors in api/client/media_legacy.rs
Sent from my Steam Deck
2026-04-28 09:16:52 -04:00
Ginger 1ea9330df8 refactor: Fix errors in api/client/keys.rs 2026-04-28 09:16:52 -04:00
Ginger 267e1c5d65 refactor: Fix errors in api/client/directory.rs 2026-04-28 09:16:52 -04:00
Ginger 36285e7784 refactor: Fix errors in api/client/device.rs 2026-04-28 09:16:52 -04:00
Ginger 53ab20d1cd refactor: Fix errors in api/client/dehydrated_device.rs 2026-04-28 09:16:52 -04:00
Ginger 96adf034e6 refactor: Fix errors in api/client/context.rs 2026-04-28 09:16:51 -04:00
Ginger a75bf32a34 refactor: Fix errors in api/client/capabilities.rs 2026-04-28 09:16:51 -04:00
Ginger c89ecd7b63 refactor: Fix errors in api/client/backup.rs 2026-04-28 09:16:51 -04:00
Ginger 7f30f8419b refactor: Fix errors in api/client/appservice.rs 2026-04-28 09:16:51 -04:00
Ginger 0a81f4d629 refactor: Fix errors in api/client/account_data.rs 2026-04-28 09:16:51 -04:00
Ginger 4e456249ac refactor: Fix errors in api/client/sync 2026-04-28 09:16:51 -04:00
Ginger 01e403f05f refactor: Resolve remaining errors in threepid.rs 2026-04-28 09:16:51 -04:00
Ginger a2f6141f4b refactor: Fix errors in api/client/room/ 2026-04-28 09:16:51 -04:00
Ginger 97a01a1500 refactor: Rename PduBuilder to PartialPdu 2026-04-28 09:16:51 -04:00
Ginger bf9c9716eb refactor: Add function to state_accessor to get create event 2026-04-28 09:16:51 -04:00
Ginger 471eb54c66 refactor: Consolidate hierarchy and summary logic in a new service 2026-04-28 09:16:51 -04:00
Ginger 755006c66d refactor: Fix errors in api/client/membership/ 2026-04-28 09:16:51 -04:00
Ginger ccd6072f2d refactor: Fix (most) errors in api/client/account/ 2026-04-28 09:16:51 -04:00
Ginger 24f7e1d658 chore: Clippy fixes 2026-04-28 09:16:51 -04:00
Ginger d62eeda130 refactor: Replace more uses of RoomVersionId with RoomVersionRules 2026-04-28 09:16:51 -04:00
Ginger 3e1f97487f fix: Resolve errors in recently added services 2026-04-28 09:16:51 -04:00
Jade Ellis a4e64383b7 refactor: Ruma upstraming, bake a little more 2026-04-28 09:16:51 -04:00
Ginger 204bc1367e refactor: Ruma upstreaming, half-baked edition
Co-authored-by: Jade Ellis <jade@ellis.link>
2026-04-28 09:16:51 -04:00
Jade Ellis 1cc9dbf2a4 chore: Update lockfile 2026-04-28 09:27:00 +01:00
Renovate Bot 2cf28baf03 chore(deps): update pre-commit hook crate-ci/typos to v1.45.2 2026-04-28 05:03:35 +00:00
timedout f3fb218652 style: Clippy conflicts with cargo fmt, apparently 2026-04-27 22:15:52 +00:00
timedout 0924b7d27e style: Use debug assert instead of a normal assert 2026-04-27 22:15:52 +00:00
timedout 8575f191a0 style: Simplify build_local_dag return 2026-04-27 22:15:52 +00:00
timedout fe7cfd96e7 feat: Assert that no events were dropped during sorting 2026-04-27 22:15:52 +00:00
timedout 8b0e86a05d fix: Don't consider out-of-scope nodes as prev events before sorting incoming events 2026-04-27 22:15:52 +00:00
Jade Ellis 8b8fef998c fix(deps): Enable rustls roots on old rustls 2026-04-27 22:51:21 +01:00
Jade Ellis decd6083a0 fix(deps): Enable a TLS backend for outdated reqwest 2026-04-27 13:10:47 +01:00
Renovate Bot 06184d8c9f chore(deps): update https://github.com/taiki-e/install-action digest to 787505c 2026-04-25 12:21:19 +01:00
Renovate Bot 7c20e22b75 chore(deps): pin https://github.com/dorny/paths-filter action to fbd0ab8 2026-04-25 11:19:09 +00:00
Jade Ellis 3f862b58cb ci: Fix unstable builds for repo packages 2026-04-25 11:33:25 +01:00
Jade Ellis 046a6356f3 ci: Automaticallly upload release binaries 2026-04-25 11:17:43 +01:00
Jade Ellis 3af0240ff5 style: Fix clipy lint 2026-04-25 10:07:17 +01:00
ginger 5dcfff51cf chore: Admin announcement 2026-04-24 20:33:07 +00:00
Ginger b9989f1713 chore: Release 2026-04-24 15:21:47 -04:00
Ginger 1d3e3e7e62 chore: Update changelog 2026-04-24 15:21:40 -04:00
Jade Ellis 0adf3aa956 fix: Revert 7b1aabda9f
Yeah that didn't work sadly.
2026-04-24 16:22:46 +01:00
Jade Ellis 7b1aabda9f feat: Re-enable http3
This required the previous commit, and relies on
the included flag to make fat LTO builds
work correctly.
2026-04-24 14:51:11 +01:00
Jade Ellis e31c5997b7 fix: Explicitly set TLS backends
Dependency updates mean we have to set a custom TLS backend sooner.
Also some groundwork for being able to use aws-lc in future
2026-04-24 14:19:12 +01:00
Jade Ellis 7ca0d137c4 chore: Replace ring for sha256 with sha2 2026-04-24 12:56:05 +01:00
Jade Ellis 0344bf71d8 chore: Disable http3 by default
Unfortunately h3 requires aws-lc since the last version of reqwest.
aws-lc currently breaks the build.
2026-04-24 12:11:48 +01:00
Jade Ellis a07d3e24ea fix(deps): corrext aws-lc-rs dependencies and direct-tls 2026-04-24 10:06:47 +01:00
Jade Ellis 1bc7950748 fix: Update direct-tls server handle 2026-04-24 09:34:16 +01:00
tokii 0fd43ff6fa docs: Update Nomad deployment docs to include volume configuration changes 2026-04-23 20:07:53 +00:00
tokii 796136f1a6 docs: Update Nomad deployment docs for HTTPS and Traefik changes 2026-04-23 20:07:53 +00:00
tokii 447608985b docs: Add deployment documentation for Nomad 2026-04-23 20:07:53 +00:00
timedout 5f4cd47d88 fix: Add workaround for handling malformed PDUs
Signed-off-by: timedout <git@nexy7574.co.uk>
Reviewed-On: https://forgejo.ellis.link/continuwuation/continuwuity-sec/pulls/7
Reviewed-By: Jade Ellis <jade@ellis.link>
2026-04-23 20:48:11 +01:00
stratself a7244bdb68 docs(docker): Detailed port exposure docs for other reverse proxies 2026-04-23 19:47:46 +00:00
stratself 91f2900463 docs(docker): More compose cleanups
* Stringify and use long URLs for image names
* Use read-only docker socket in traefik mount
* Shorten some comments
2026-04-23 19:47:46 +00:00
stratself e44ae3bac9 docs(delegation): Add compose examples
Previous projects used split-domain examples, so it's good to add back
2026-04-23 19:47:46 +00:00
stratself b692f9e6e7 fix(docs): Fix one wrong config filename and title all the composes 2026-04-23 19:47:46 +00:00
stratself 695333fe5b chore: Renumber changelog PR and fix trailing whitespace 2026-04-23 19:47:31 +00:00
stratself bc7a6c148f fix(docs): Small wording fixes 2026-04-23 19:47:31 +00:00
ky-bean bd3944573b docs(docker): Add note for required config setting 2026-04-23 19:47:31 +00:00
ky-bean 21ac3c5a86 chore: Add news fragment for #1553 2026-04-23 19:47:31 +00:00
ky-bean 3976849b97 docs(docker): fix typos (psuedo=>pseudo, decleration=>declaration) 2026-04-23 19:47:31 +00:00
ky-bean a1e3619291 docs(docker): update wording, implement suggestions from @lveneris 2026-04-23 19:47:31 +00:00
ky-bean a92fc78a90 docs(docker): Detail how to access the server's console 2026-04-23 19:47:31 +00:00
ky-bean fc429ea564 docs: explain admin console for docker deployments 2026-04-23 19:47:31 +00:00
stratself 69c931e18a docs(generic): Highlight important /_continuwuity features + typofixes 2026-04-23 19:46:57 +00:00
stratself 284e0ce1e5 chore: Add changelog for #1677 2026-04-23 19:46:57 +00:00
stratself a13779a051 docs(generic): Remove Nix build section and further wording fixes
* Add spec link to well-known support endpoint
* Prioritize simpler "route everything" approach for other RProxies
2026-04-23 19:46:57 +00:00
stratself 7163714697 docs(generic): Fix links for CI binaries 2026-04-23 19:46:57 +00:00
stratself 3998a14c32 docs(generic): Rewrite sections on server initialization and testing
* Rename "You're done" to "Starting Your Server"
* Add instructions for initial registration token flow
* Shorten "How do I know it works" section
* Beautify "What's Next" section
2026-04-23 19:46:57 +00:00
stratself c79f2a3057 docs(generic): Fix router + reverse proxy + docker build sections
* Link docker builds to section in dev pages
* Delete old section on port forwarding
* Create new section on port exposing, near reverse proxy section
* Rewrite Other Reverse Proxies section to update specified routes
* Move reverse proxy software caveats into its own subsection
* Other wording and structure fixes and improvements
2026-04-23 19:46:57 +00:00
stratself 17837c51a0 docs(generic): Various fixes for consistency with other pages
* Use indirect URLs
* Change your.server.name to canonical example.com
* Put Getting help section in a tip admonition
* Remove statement on Caddy preference
* Clean up "What's next" section
2026-04-23 19:46:57 +00:00
stratself 99a7be0222 fix(docs): Correct ports for LiveKit TURN range 2026-04-23 19:46:16 +00:00
stratself 41ed2eb167 docs(livekit): Add/correct client discovery paths and extra fixes
* Use de facto unstasble path instead of the standard one
* Document both the unstable and well-known paths as being exposed by
  Continuwuity when set
* Make curling said path one of the Testing steps
* Removing confusing console `~$` prefix symbols, move commands and
  response into separate code blocks
* Number the MSCs
2026-04-23 19:46:16 +00:00
Renovate Bot 2b08460b16 chore(deps): update https://github.com/actions/setup-node digest to 48b55a0 2026-04-23 19:45:52 +00:00
Renovate Bot 4cf8f6e05b chore(deps): update https://github.com/taiki-e/install-action digest to 74e87cb 2026-04-23 19:45:46 +00:00
Jade Ellis ae37acb228 ci: Don't run clippy & tests if rust files haven't changed 2026-04-23 20:44:57 +01:00
Jade Ellis 10c3045f5f ci: Run Renovate on resolvematrix 2026-04-23 20:44:56 +01:00
Jade Ellis 8242718571 ci: Label PRs touching dependencies 2026-04-23 20:44:56 +01:00
Renovate Bot 03db067aab chore(deps): update ghcr.io/renovatebot/renovate docker tag to v43.140.0 2026-04-23 19:23:09 +00:00
Jade Ellis b28ddde1eb chore: Update lockfile 2026-04-23 20:04:05 +01:00
Jade Ellis 0134f69bf9 chore: Update incompatible dependenceis 2026-04-23 20:02:48 +01:00
Jade Ellis 15878371bf chore: Update reqwest 2026-04-23 18:53:25 +01:00
Getz Mikalsen 980bd475b6 feat: Add TLS options for LDAP (#1389)
Optional StartTLS for LDAP and add option to skip TLS verification.

Co-authored-by: Jade Ellis <jade@ellis.link>
Reviewed-on: https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1389
Reviewed-by: Jade Ellis <jade@ellis.link>
2026-04-23 17:39:25 +00:00
300 changed files with 7953 additions and 8080 deletions
+1 -1
View File
@@ -71,7 +71,7 @@ runs:
- name: Install timelord-cli and git-warp-time - name: Install timelord-cli and git-warp-time
if: steps.check-binaries.outputs.need-install == 'true' if: steps.check-binaries.outputs.need-install == 'true'
uses: https://github.com/taiki-e/install-action@a2352fc6ce487f030a3aa709482d57823eadfb37 # v2 uses: https://github.com/taiki-e/install-action@787505cde8a44ea468a00478fe52baf23b15bccd # v2
with: with:
tool: git-warp-time,timelord-cli@3.0.1 tool: git-warp-time,timelord-cli@3.0.1
+3
View File
@@ -37,6 +37,9 @@ jobs:
if (file.startsWith('pkg/') || file.startsWith('nix/') || file === 'flake.nix' || file === 'flake.lock' || file.startsWith('docker/')) { if (file.startsWith('pkg/') || file.startsWith('nix/') || file === 'flake.nix' || file === 'flake.lock' || file.startsWith('docker/')) {
labelsToAdd.add('Meta/Packaging'); labelsToAdd.add('Meta/Packaging');
} }
if (file === 'Cargo.lock') {
labelsToAdd.add('Dependencies');
}
} }
if (labelsToAdd.size > 0) { if (labelsToAdd.size > 0) {
+1 -1
View File
@@ -96,7 +96,7 @@ jobs:
if [[ ${{ forge.ref_name }} =~ ^v+[0-9]\.+[0-9]\.+[0-9]$ ]]; then if [[ ${{ forge.ref_name }} =~ ^v+[0-9]\.+[0-9]\.+[0-9]$ ]]; then
# Use the "stable" component for tagged semver releases # Use the "stable" component for tagged semver releases
COMPONENT="stable" COMPONENT="stable"
elif [[ ${{ forge.ref }} =~ ^refs/tags/^v+[0-9]\.+[0-9]\.+[0-9] ]]; then elif [[ ${{ forge.ref_name }} =~ ^v+[0-9]\.+[0-9]\.+[0-9] ]]; then
# Use the "unstable" component for tagged semver pre-releases # Use the "unstable" component for tagged semver pre-releases
COMPONENT="unstable" COMPONENT="unstable"
else else
+6 -2
View File
@@ -105,7 +105,7 @@ jobs:
RELEASE_SUFFIX="" RELEASE_SUFFIX=""
TAG_NAME="${{ github.ref_name }}" TAG_NAME="${{ github.ref_name }}"
# Extract version from tag (remove v prefix if present) # Extract version from tag (remove v prefix if present)
TAG_VERSION=$(echo "$TAG_NAME" | sed 's/^v//') TAG_VERSION=$(echo "$TAG_NAME" | sed 's/^v//' | tr '-' '~')
# Create spec file with tag version # Create spec file with tag version
sed -e "s/^Version:.*$/Version: $TAG_VERSION/" \ sed -e "s/^Version:.*$/Version: $TAG_VERSION/" \
@@ -270,9 +270,13 @@ jobs:
# Determine the group based on ref type and branch # Determine the group based on ref type and branch
if [[ "${{ github.ref }}" == "refs/tags/"* ]]; then if [[ "${{ github.ref }}" == "refs/tags/"* ]]; then
GROUP="stable"
# For tags, extract the tag name for version info # For tags, extract the tag name for version info
TAG_NAME="${{ github.ref_name }}" TAG_NAME="${{ github.ref_name }}"
if [[ "$TAG_NAME" == *"-"* ]]; then
GROUP="unstable"
else
GROUP="stable"
fi
elif [ "${{ github.ref_name }}" = "main" ]; then elif [ "${{ github.ref_name }}" = "main" ]; then
GROUP="dev" GROUP="dev"
else else
+1 -1
View File
@@ -32,7 +32,7 @@ jobs:
- name: Setup Node.js - name: Setup Node.js
if: steps.runner-env.outputs.node_major == '' || steps.runner-env.outputs.node_major < '20' if: steps.runner-env.outputs.node_major == '' || steps.runner-env.outputs.node_major < '20'
uses: https://github.com/actions/setup-node@53b83947a5a98c8d113130e565377fae1a50d02f # v6 uses: https://github.com/actions/setup-node@48b55a011bda9f5d6aeb4c2d9c7362e8dae4041e # v6
with: with:
node-version: 22 node-version: 22
+1 -1
View File
@@ -24,7 +24,7 @@ jobs:
steps: steps:
- name: 📦 Setup Node.js - name: 📦 Setup Node.js
uses: https://github.com/actions/setup-node@53b83947a5a98c8d113130e565377fae1a50d02f # v6 uses: https://github.com/actions/setup-node@48b55a011bda9f5d6aeb4c2d9c7362e8dae4041e # v6
with: with:
node-version: "22" node-version: "22"
+23
View File
@@ -9,6 +9,7 @@ on:
permissions: permissions:
contents: read contents: read
pull-requests: read
jobs: jobs:
fast-checks: fast-checks:
@@ -40,10 +41,32 @@ jobs:
cargo +nightly fmt --all -- --check && \ cargo +nightly fmt --all -- --check && \
echo "✅ Formatting check passed" || \ echo "✅ Formatting check passed" || \
exit 1 exit 1
check-changes:
name: Check changed files
runs-on: ubuntu-latest
outputs:
rust: ${{ steps.filter.outputs.rust }}
steps:
- name: Checkout repository
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
with:
persist-credentials: false
- name: Check for file changes
uses: https://github.com/dorny/paths-filter@fbd0ab8f3e69293af611ebaee6363fc25e6d187d # v4
id: filter
with:
filters: |
rust:
- '**/*.rs'
- '**/Cargo.toml'
- '**/Cargo.lock'
clippy-and-tests: clippy-and-tests:
name: Clippy and Cargo Tests name: Clippy and Cargo Tests
runs-on: ubuntu-latest runs-on: ubuntu-latest
needs: check-changes
if: needs.check-changes.outputs.rust == 'true'
steps: steps:
- name: Checkout repository - name: Checkout repository
+22
View File
@@ -199,6 +199,28 @@ jobs:
registry_user: ${{ vars.BUILTIN_REGISTRY_USER || github.actor }} registry_user: ${{ vars.BUILTIN_REGISTRY_USER || github.actor }}
registry_password: ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }} registry_password: ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }}
release-binaries:
name: "Release Binaries"
runs-on: ubuntu-latest
needs:
- build-release
- build-maxperf
permissions:
contents: write
if: startsWith(github.ref, 'refs/tags/')
steps:
- name: Download binary artifacts
uses: forgejo/download-artifact@v4
with:
pattern: conduwuit*
path: binaries
merge-multiple: true
- name: Create Release and Upload
uses: https://github.com/softprops/action-gh-release@b4309332981a82ec1c5618f44dd2e27cc8bfbfda # v3
with:
draft: true
files: binaries/*
mirror_images: mirror_images:
name: "Mirror Images" name: "Mirror Images"
runs-on: ubuntu-latest runs-on: ubuntu-latest
+3 -3
View File
@@ -43,7 +43,7 @@ jobs:
name: Renovate name: Renovate
runs-on: ubuntu-latest runs-on: ubuntu-latest
container: container:
image: ghcr.io/renovatebot/renovate:43.111.0@sha256:da5fcac20c48d9792aac9c61fd234531bfa8df61263a39387cd8920263ca4768 image: ghcr.io/renovatebot/renovate:43.140.0@sha256:61303c28b10a491c559529fb6f41745850e4755a43a54c04c3ae6848d6eaf5cc
options: --tmpfs /tmp:exec options: --tmpfs /tmp:exec
steps: steps:
- name: Checkout - name: Checkout
@@ -90,12 +90,12 @@ jobs:
RENOVATE_PLATFORM: forgejo RENOVATE_PLATFORM: forgejo
RENOVATE_ENDPOINT: ${{ github.server_url }} RENOVATE_ENDPOINT: ${{ github.server_url }}
RENOVATE_AUTODISCOVER: 'false' RENOVATE_AUTODISCOVER: 'false'
RENOVATE_REPOSITORIES: '["${{ github.repository }}"]' RENOVATE_REPOSITORIES: '["${{ github.repository }}", "continuwuation/resolvematrix"]'
RENOVATE_GIT_TIMEOUT: 60000 RENOVATE_GIT_TIMEOUT: 60000
RENOVATE_REQUIRE_CONFIG: 'required' RENOVATE_REQUIRE_CONFIG: 'required'
RENOVATE_ONBOARDING: 'false' # RENOVATE_ONBOARDING: 'false'
RENOVATE_INHERIT_CONFIG: 'true' RENOVATE_INHERIT_CONFIG: 'true'
RENOVATE_GITHUB_TOKEN_WARN: 'false' RENOVATE_GITHUB_TOKEN_WARN: 'false'
+1 -1
View File
@@ -24,7 +24,7 @@ repos:
- id: check-added-large-files - id: check-added-large-files
- repo: https://github.com/crate-ci/typos - repo: https://github.com/crate-ci/typos
rev: v1.45.1 rev: v1.46.0
hooks: hooks:
- id: typos - id: typos
- id: typos - id: typos
+17
View File
@@ -1,3 +1,20 @@
# Continuwuity 0.5.8 (2026-04-24)
## Features
- LDAP can now optionally be connected to using StartTLS, and you may unsafely skip verification. Contributed by @getz (#1389)
- Users will now be prevented from removing their email if the server is configured to require an email when registering an account.
## Bugfixes
- Fixed a situation where multiple email addresses could be associated with one user when that user changes their email address.
## Improved Documentation
- Updated config docs to state we support room version 12, and set it as default. Contributed by @ezera. (#1622)
- Improve instructions for generic deployments, removing unnecessary parts and documenting the new initial registration token flow. Contributed by @stratself (#1677)
# Continuwuity v0.5.7 (2026-04-17) # Continuwuity v0.5.7 (2026-04-17)
## Features ## Features
Generated
+423 -381
View File
File diff suppressed because it is too large Load Diff
+46 -42
View File
@@ -12,7 +12,7 @@ license = "Apache-2.0"
# See also `rust-toolchain.toml` # See also `rust-toolchain.toml`
readme = "README.md" readme = "README.md"
repository = "https://forgejo.ellis.link/continuwuation/continuwuity" repository = "https://forgejo.ellis.link/continuwuation/continuwuity"
version = "0.5.7" version = "0.5.8"
[workspace.metadata.crane] [workspace.metadata.crane]
name = "conduwuit" name = "conduwuit"
@@ -36,7 +36,7 @@ version = "0.3"
features = ["ffi", "std", "union"] features = ["ffi", "std", "union"]
[workspace.dependencies.const-str] [workspace.dependencies.const-str]
version = "0.7.0" version = "1.1.0"
[workspace.dependencies.ctor] [workspace.dependencies.ctor]
version = "0.10.0" version = "0.10.0"
@@ -47,9 +47,9 @@ default-features = false
features = ["features"] features = ["features"]
[workspace.dependencies.toml] [workspace.dependencies.toml]
version = "0.9.5" version = "1.1.2"
default-features = false default-features = false
features = ["parse"] features = ["parse", "serde"]
[workspace.dependencies.sanitize-filename] [workspace.dependencies.sanitize-filename]
version = "0.6.0" version = "0.6.0"
@@ -68,7 +68,7 @@ default-features = false
version = "0.1.3" version = "0.1.3"
[workspace.dependencies.rand] [workspace.dependencies.rand]
version = "0.10.0" version = "0.10.1"
# Used for the http request / response body type for Ruma endpoints used with reqwest # Used for the http request / response body type for Ruma endpoints used with reqwest
[workspace.dependencies.bytes] [workspace.dependencies.bytes]
@@ -102,15 +102,18 @@ default-features = false
features = ["typed-header", "tracing", "cookie"] features = ["typed-header", "tracing", "cookie"]
[workspace.dependencies.axum-server] [workspace.dependencies.axum-server]
version = "0.7.2" version = "0.8.0"
default-features = false default-features = false
# to listen on both HTTP and HTTPS if listening on TLS dierctly from conduwuit for complement or sytest # to listen on both HTTP and HTTPS if listening on TLS dierctly from conduwuit for complement or sytest
[workspace.dependencies.axum-server-dual-protocol] [workspace.dependencies.axum-server-dual-protocol]
version = "0.7" # version = "0.7"
git = "https://github.com/vinchona/axum-server-dual-protocol.git"
rev = "ca6db055254255b74238673ce4135698e347d71c" # feat!: bump axum_server to 0.8.0
default-features = false
[workspace.dependencies.axum-client-ip] [workspace.dependencies.axum-client-ip]
version = "0.7" version = "1.3"
[workspace.dependencies.tower] [workspace.dependencies.tower]
version = "0.5.2" version = "0.5.2"
@@ -134,13 +137,12 @@ features = [
[workspace.dependencies.rustls] [workspace.dependencies.rustls]
version = "0.23.25" version = "0.23.25"
default-features = false default-features = false
features = ["aws_lc_rs"]
[workspace.dependencies.reqwest] [workspace.dependencies.reqwest]
version = "0.12.15" version = "0.13.2"
default-features = false default-features = false
features = [ features = [
"rustls-tls-native-roots", "rustls-no-provider",
"socks", "socks",
"hickory-dns", "hickory-dns",
"http2", "http2",
@@ -159,7 +161,7 @@ features = ["raw_value"]
# Used for appservice registration files # Used for appservice registration files
[workspace.dependencies.serde-saphyr] [workspace.dependencies.serde-saphyr]
version = "0.0.23" version = "0.0.25"
# Used to load forbidden room/user regex from config # Used to load forbidden room/user regex from config
[workspace.dependencies.serde_regex] [workspace.dependencies.serde_regex]
@@ -167,7 +169,7 @@ version = "1.1.0"
# Used for ruma wrapper # Used for ruma wrapper
[workspace.dependencies.serde_html_form] [workspace.dependencies.serde_html_form]
version = "0.2.6" version = "0.4.0"
# Used for password hashing # Used for password hashing
[workspace.dependencies.argon2] [workspace.dependencies.argon2]
@@ -251,7 +253,7 @@ features = [
] ]
[workspace.dependencies.tokio-metrics] [workspace.dependencies.tokio-metrics]
version = "0.4.0" version = "0.5.0"
[workspace.dependencies.libloading] [workspace.dependencies.libloading]
version = "0.9.0" version = "0.9.0"
@@ -340,51 +342,50 @@ version = "0.1.88"
[workspace.dependencies.lru-cache] [workspace.dependencies.lru-cache]
version = "0.1.2" version = "0.1.2"
[workspace.dependencies.assign]
version = "1.1.1"
# Used for matrix spec type definitions and helpers # Used for matrix spec type definitions and helpers
[workspace.dependencies.ruma] [workspace.dependencies.ruma]
git = "https://forgejo.ellis.link/continuwuation/ruwuma" # version = "0.14.1"
#branch = "conduwuit-changes" git = "https://github.com/ruma/ruma.git"
rev = "d00b51a8669b21689c4eb47fb81f3a8b27c3e371" rev = "5742fec0021b85fedbf5cd1f59c50a00bb5b9f7c"
features = [ features = [
"compat",
"rand",
"appservice-api-c", "appservice-api-c",
"client-api", "client-api",
"federation-api", "federation-api",
"markdown",
"push-gateway-api-c", "push-gateway-api-c",
"unstable-exhaustive-types", "state-res",
"rand",
"markdown",
"ring-compat", "ring-compat",
"compat-upload-signatures", "compat-upload-signatures",
"identifiers-validation", "compat-optional-txn-pdus",
"unstable-unspecified",
"unstable-msc2448", "unstable-msc2448",
"unstable-msc2666", "unstable-msc2666",
"unstable-msc2867", "unstable-msc2867",
"unstable-msc2870", "unstable-msc2870",
"unstable-msc3026",
"unstable-msc3061", "unstable-msc3061",
"unstable-msc3814", "unstable-msc3814",
"unstable-msc3245", "unstable-msc3245",
"unstable-msc3266", "unstable-msc3381",
"unstable-msc3381", # polls "unstable-msc3489",
"unstable-msc3489", # beacon / live location "unstable-msc3930",
"unstable-msc3575",
"unstable-msc3930", # polls push rules
"unstable-msc4075", "unstable-msc4075",
"unstable-msc4095", "unstable-msc4095",
"unstable-msc4121", "unstable-msc4121",
"unstable-msc4125", "unstable-msc4125",
"unstable-msc4155",
"unstable-msc4186", "unstable-msc4186",
"unstable-msc4203", # sending to-device events to appservices "unstable-msc4195",
"unstable-msc4210", # remove legacy mentions "unstable-msc4203",
"unstable-msc4310",
"unstable-msc4373",
"unstable-msc4380",
"unstable-msc4143",
"unstable-msc4293",
"unstable-msc4406",
"unstable-msc4439",
"unstable-extensible-events", "unstable-extensible-events",
"unstable-pdu",
"unstable-msc4155",
"unstable-msc4143", # livekit well_known response
"unstable-msc4284",
"unstable-msc4439", # pgp_key in .well_known/matrix/support
] ]
[workspace.dependencies.rust-rocksdb] [workspace.dependencies.rust-rocksdb]
@@ -429,14 +430,13 @@ features = ["http", "grpc-tonic", "trace", "logs", "metrics"]
# optional sentry metrics for crash/panic reporting # optional sentry metrics for crash/panic reporting
[workspace.dependencies.sentry] [workspace.dependencies.sentry]
version = "0.46.0" version = "0.47.0"
default-features = false default-features = false
features = [ features = [
"backtrace", "backtrace",
"contexts", "contexts",
"debug-images", "debug-images",
"panic", "panic",
"rustls",
"tower", "tower",
"tower-http", "tower-http",
"tracing", "tracing",
@@ -445,9 +445,9 @@ features = [
] ]
[workspace.dependencies.sentry-tracing] [workspace.dependencies.sentry-tracing]
version = "0.46.0" version = "0.47.0"
[workspace.dependencies.sentry-tower] [workspace.dependencies.sentry-tower]
version = "0.46.0" version = "0.47.0"
# jemalloc usage # jemalloc usage
[workspace.dependencies.tikv-jemalloc-sys] [workspace.dependencies.tikv-jemalloc-sys]
@@ -560,7 +560,7 @@ version = "0.15.0"
[workspace.dependencies.lettre] [workspace.dependencies.lettre]
version = "0.11.19" version = "0.11.19"
default-features = false default-features = false
features = ["smtp-transport", "pool", "hostname", "builder", "rustls", "rustls-native-certs", "tokio1", "ring", "tokio1-rustls", "tracing", "serde"] features = ["smtp-transport", "pool", "hostname", "builder", "rustls", "rustls-native-certs", "tokio1", "rustls-no-provider", "tokio1-rustls", "tracing", "serde"]
[workspace.dependencies.governor] [workspace.dependencies.governor]
version = "0.10.4" version = "0.10.4"
@@ -657,6 +657,10 @@ default-features = false
package = "conduwuit" package = "conduwuit"
path = "src/main" path = "src/main"
[workspace.dependencies.ruminuwuity]
package = "ruminuwuity"
path = "src/ruminuwuity"
############################################################################### ###############################################################################
# #
# Release profiles # Release profiles
-1
View File
@@ -1 +0,0 @@
Users will now be prevented from removing their email if the server is configured to require an email when registering an account.
+1
View File
@@ -0,0 +1 @@
The invite recipient's membership event is now included in invite stripped state, which should fix flaky invite display in some clients. Contributed by @ginger
+1
View File
@@ -0,0 +1 @@
Switched from Continuwuity's fork of Ruma back to upstream Ruma. Contributed by @ginger.
-1
View File
@@ -1 +0,0 @@
Fixed a situation where multiple email addresses could be associated with one user when that user changes their email address.
-1
View File
@@ -1 +0,0 @@
Updated config docs to state we support room version 12, and set it as default. Contributed by @ezera.
+1
View File
@@ -0,0 +1 @@
Explain accessing Continuwuity's server console when deployed via Docker.
+1
View File
@@ -0,0 +1 @@
Fixed a bug that caused the server to drop events during processing if several events for the same room were sent in a singular transaction. Contributed by @nex.
+8 -12
View File
@@ -573,18 +573,6 @@
# #
#allow_public_room_directory_over_federation = false #allow_public_room_directory_over_federation = false
# Allow guests/unauthenticated users to access TURN credentials.
#
# This is the equivalent of Synapse's `turn_allow_guests` config option.
# This allows any unauthenticated user to call the endpoint
# `/_matrix/client/v3/voip/turnServer`.
#
# It is unlikely you need to enable this as all major clients support
# authentication for this endpoint and prevents misuse of your TURN server
# from potential bots.
#
#turn_allow_guests = false
# Set this to true to lock down your server's public room directory and # Set this to true to lock down your server's public room directory and
# only allow admins to publish rooms to the room directory. Unpublishing # only allow admins to publish rooms to the room directory. Unpublishing
# is still allowed by all users with this enabled. # is still allowed by all users with this enabled.
@@ -1966,6 +1954,14 @@
# #
#uri = "" #uri = ""
# StartTLS for LDAP connections.
#
#use_starttls = false
# Skip TLS certificate verification, possibly dangerous.
#
#disable_tls_verification = false
# Root of the searches. # Root of the searches.
# #
# example: "ou=users,dc=example,dc=org" # example: "ou=users,dc=example,dc=org"
+3 -1
View File
@@ -17,12 +17,14 @@ ARG LLVM_VERSION=21
# Line one: compiler tools # Line one: compiler tools
# Line two: curl, for downloading binaries and wget because llvm.sh is broken with curl # Line two: curl, for downloading binaries and wget because llvm.sh is broken with curl
# Line three: for xx-verify # Line three: for xx-verify
# golang, cmake: For aws-lc-rs bindgen
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \ RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \ --mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update && apt-get install -y \ apt-get update && apt-get install -y \
pkg-config make jq \ pkg-config make jq \
wget curl git software-properties-common \ wget curl git software-properties-common \
file file
# golang cmake
# LLVM packages # LLVM packages
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \ RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
@@ -162,7 +164,7 @@ ENV CONDUWUIT_VERSION_EXTRA=$CONDUWUIT_VERSION_EXTRA
ENV CONTINUWUITY_VERSION_EXTRA=$CONTINUWUITY_VERSION_EXTRA ENV CONTINUWUITY_VERSION_EXTRA=$CONTINUWUITY_VERSION_EXTRA
ARG RUST_PROFILE=release ARG RUST_PROFILE=release
ARG CARGO_FEATURES="default,http3" ARG CARGO_FEATURES="default"
# Build the binary # Build the binary
RUN --mount=type=cache,target=/usr/local/cargo/registry \ RUN --mount=type=cache,target=/usr/local/cargo/registry \
+50 -5
View File
@@ -50,8 +50,6 @@ # Defaults to members of the admin room if unset
# CONTINUWUITY_WELL_KNOWN__SERVER: matrix.example.com:443 # CONTINUWUITY_WELL_KNOWN__SERVER: matrix.example.com:443
``` ```
## Reverse proxying well-known files to Continuwuity
After doing the steps above, Continuwuity will serve these 3 JSON files: After doing the steps above, Continuwuity will serve these 3 JSON files:
- `/.well-known/matrix/client`: for Client-Server discovery - `/.well-known/matrix/client`: for Client-Server discovery
@@ -60,9 +58,11 @@ ## Reverse proxying well-known files to Continuwuity
To enable full discovery, you will need to reverse proxy these paths from the base domain back to Continuwuity. To enable full discovery, you will need to reverse proxy these paths from the base domain back to Continuwuity.
## Reverse proxying well-known files to Continuwuity
<details> <details>
<summary>For Caddy</summary> <summary>For **Caddy**</summary>
``` ```
matrix.example.com:443 { matrix.example.com:443 {
@@ -78,7 +78,7 @@ ## Reverse proxying well-known files to Continuwuity
<details> <details>
<summary>For Traefik (via Docker labels)</summary> <summary>For **Traefik** (via Docker labels)</summary>
``` ```
services: services:
@@ -93,7 +93,10 @@ ## Reverse proxying well-known files to Continuwuity
</details> </details>
Restart Continuwuity and your reverse proxy. Once that's done, visit these routes and check that the responses match the examples below:
For **Docker** users, consult the compose files in the [Appendix section](#docker-compose-examples).
After applying these changes, restart Continuwuity and your reverse proxy.Visit these routes and check that the responses match the examples below:
<details open> <details open>
@@ -253,3 +256,45 @@ ## Related Documentation
- [Server-to-Server resolution](https://spec.matrix.org/v1.17/server-server-api/#resolving-server-names) (see this for more information on SRV records) - [Server-to-Server resolution](https://spec.matrix.org/v1.17/server-server-api/#resolving-server-names) (see this for more information on SRV records)
- [Client-to-Server resolution](https://spec.matrix.org/v1.17/client-server-api/#server-discovery) - [Client-to-Server resolution](https://spec.matrix.org/v1.17/client-server-api/#server-discovery)
- [MSC1929: Homeserver Admin Contact and Support page](https://github.com/matrix-org/matrix-spec-proposals/pull/1929) - [MSC1929: Homeserver Admin Contact and Support page](https://github.com/matrix-org/matrix-spec-proposals/pull/1929)
## Appendix
### Docker Compose examples
The following Compose files are taken from [Docker instructions](../deploying/docker.mdx) and reconfigured to support split-domain delegation. Note the updated `CONTINUWUITY_WELL_KNOWN` variable and relevant changes in reverse proxy rules.
<details>
<summary>Caddy (using Caddyfile) - delegated.docker-compose.with-caddy.yml ([view raw](/advanced/delegated.docker-compose.with-caddy.yml))</summary>
```yaml file="../public/advanced/delegated.docker-compose.with-caddy.yml"
```
</details>
<details>
<summary>Caddy (using labels) - delegated.docker-compose.with-caddy-labels.yml ([view raw](/advanced/delegated.docker-compose.with-caddy-labels.yml))</summary>
```yaml file="../public/advanced/delegated.docker-compose.with-caddy-labels.yml"
```
</details>
<details>
<summary>Traefik (for existing setup) - delegated.docker-compose.for-traefik.yml ([view raw](/advanced/delegated.docker-compose.for-traefik.yml))</summary>
```yaml file="../public/advanced/delegated.docker-compose.for-traefik.yml"
```
</details>
<details>
<summary>Traefik included - delegated.docker-compose.with-traefik.yml ([view raw](/advanced/delegated.docker-compose.with-traefik.yml))</summary>
```yaml file="../public/advanced/delegated.docker-compose.with-traefik.yml"
```
</details>
+36 -8
View File
@@ -91,7 +91,7 @@ ### 3. Telling clients where to find LiveKit
To tell clients where to find LiveKit, you need to add the address of your `lk-jwt-service` to the `[global.matrix_rtc]` config section using the `foci` option. To tell clients where to find LiveKit, you need to add the address of your `lk-jwt-service` to the `[global.matrix_rtc]` config section using the `foci` option.
The variable should be a list of servers serving as MatrixRTC endpoints. Clients discover these via the `/_matrix/client/v1/rtc/transports` endpoint (MSC4143). The variable should be a list of servers serving as MatrixRTC endpoints. Replace the URL with the address you are deploying your instance of lk-jwt-service to:
```toml ```toml
[global.matrix_rtc] [global.matrix_rtc]
@@ -100,7 +100,10 @@ ### 3. Telling clients where to find LiveKit
] ]
``` ```
Remember to replace the URL with the address you are deploying your instance of lk-jwt-service to. This will expose LiveKit information on the following endpoints for clients to discover:
- `/_matrix/client/unstable/org.matrix.msc4143/rtc/transports` (MSC4143 unstable, behind auth)
- `/.well-known/matrix/client` (fallback, not behind auth. Only enabled if `[global.well_known].client` is set)
### 4. Configure your Reverse Proxy ### 4. Configure your Reverse Proxy
@@ -114,6 +117,7 @@ ### 4. Configure your Reverse Proxy
<details> <details>
<summary>Example caddy config</summary> <summary>Example caddy config</summary>
``` ```
livekit.example.com { livekit.example.com {
@@ -127,10 +131,12 @@ ### 4. Configure your Reverse Proxy
reverse_proxy 127.0.0.1:7880 reverse_proxy 127.0.0.1:7880
} }
``` ```
</details> </details>
<details> <details>
<summary>Example nginx config</summary> <summary>Example nginx config</summary>
``` ```
server { server {
server_name livekit.example.com; server_name livekit.example.com;
@@ -167,16 +173,19 @@ ### 4. Configure your Reverse Proxy
'' close; '' close;
} }
``` ```
</details> </details>
<details> <details>
<summary>Example traefik router</summary> <summary>Example traefik router</summary>
``` ```
# on LiveKit itself # on LiveKit itself
traefik.http.routers.livekit.rule=Host(`livekit.example.com`) traefik.http.routers.livekit.rule=Host(`livekit.example.com`)
# on the JWT service # on the JWT service
traefik.http.routers.livekit-jwt.rule=Host(`livekit.example.com`) && (PathPrefix(`/sfu/get`) || PathPrefix(`/healthz`) || PathPrefix(`/get_token`)) traefik.http.routers.livekit-jwt.rule=Host(`livekit.example.com`) && (PathPrefix(`/sfu/get`) || PathPrefix(`/healthz`) || PathPrefix(`/get_token`))
``` ```
</details> </details>
@@ -210,7 +219,7 @@ ### add these to livekit's docker-compose ###
### if you're using `network_mode: host`, you can skip this part ### if you're using `network_mode: host`, you can skip this part
``` ```
Recreate the LiveKit container (with `docker-compose up -d livekit`) to apply these changes. Remember to allow the new `3478/udp` and `50100:50200/udp` ports through your firewall. Recreate the LiveKit container (with `docker-compose up -d livekit`) to apply these changes. Remember to allow the new `3478/udp` and `50300:50400/udp` ports through your firewall.
### Integration with an external TURN server ### Integration with an external TURN server
@@ -257,11 +266,25 @@ ## Testing
First, you will need an access token for your current login session. These can be found in your client's settings or obtained via [this website](https://timedout.uk/mxtoken.html). First, you will need an access token for your current login session. These can be found in your client's settings or obtained via [this website](https://timedout.uk/mxtoken.html).
Then, using that token, request another OpenID token for use with the lk-jwt-service: Then, using that token, fetch the discovery endpoints for MatrixRTC services
```bash ```bash
~$ curl -X POST -H "Authorization: Bearer <session-access-token>" \ curl -X POST -H "Authorization: Bearer <session-access-token>" \
https://matrix.example.com/_matrix/client/unstable/org.matrix.msc4143/rtc/transports
```
In the output, you should see the LiveKit URL matching the one [configured above](#3-telling-clients-where-to-find-livekit).
With the same token, request another OpenID token for use with the lk-jwt-service:
```bash
curl -X POST -H "Authorization: Bearer <session-access-token>" \
https://matrix.example.com/_matrix/client/v3/user/@user:example.com/openid/request_token https://matrix.example.com/_matrix/client/v3/user/@user:example.com/openid/request_token
```
You will see a response as below:
```json
{"access_token":"<openid_access_token>","token_type":"Bearer","matrix_server_name":"example.com","expires_in":3600} {"access_token":"<openid_access_token>","token_type":"Bearer","matrix_server_name":"example.com","expires_in":3600}
``` ```
@@ -296,10 +319,15 @@ ## Testing
```bash ```bash
~$ curl -X POST -d @payload.json https://livekit.example.com/get_token ~$ curl -X POST -d @payload.json https://livekit.example.com/get_token
```
The lk-jwt-service will, after checking against Continuwuity, answer with a `jwt` token to create a LiveKit media room:
```json
{"url":"wss://livekit.example.com","jwt":"a_really_really_long_string"} {"url":"wss://livekit.example.com","jwt":"a_really_really_long_string"}
``` ```
The lk-jwt-service will, after checking against Continuwuity, answer with a `jwt` token to create a LiveKit media room. Use this token to test at the [LiveKit Connection Tester](https://livekit.io/connection-test). If everything works there, then you have set up LiveKit successfully! Use this token to test at the [LiveKit Connection Tester](https://livekit.io/connection-test). If everything works there, then you have set up LiveKit successfully!
## Troubleshooting ## Troubleshooting
@@ -363,8 +391,8 @@ ## Related Documentation
Specifications: Specifications:
- [MatrixRTC proposal](https://github.com/matrix-org/matrix-spec-proposals/pull/4143) - [MSC4143 - MatrixRTC proposal](https://github.com/matrix-org/matrix-spec-proposals/pull/4143)
- [LiveKit proposal](https://github.com/matrix-org/matrix-spec-proposals/pull/4195) - [MSC4195 - LiveKit proposal](https://github.com/matrix-org/matrix-spec-proposals/pull/4195)
Source code: Source code:
+5
View File
@@ -34,6 +34,11 @@
"name": "kubernetes", "name": "kubernetes",
"label": "Kubernetes" "label": "Kubernetes"
}, },
{
"type": "file",
"name": "nomad",
"label": "Nomad"
},
{ {
"type": "file", "type": "file",
"name": "freebsd", "name": "freebsd",
+24 -3
View File
@@ -148,7 +148,7 @@ #### For other reverse proxies
</details> </details>
You will then need to point your reverse proxy towards Continuwuity at `127.0.0.1:8008`. See the [Other reverse proxies](generic.mdx#setting-up-the-reverse-proxy) section of the Generic page for further routing details. See the [Other reverse proxies](generic.mdx#setting-up-the-reverse-proxy) section of the Generic page for further routing details.
### Starting Your Server ### Starting Your Server
@@ -243,9 +243,30 @@ ### (Optional) Building Custom Images
[Building Docker Images](../development/index.mdx#building-docker-images) [Building Docker Images](../development/index.mdx#building-docker-images)
section in the development documentation. section in the development documentation.
### Accessing the Server's Console
Before you can access the server's console and [send admin commands](../reference/admin/index.md) from the CLI, you will need to make the container interactive and allocate a pseudo-tty. Make sure you set `admin_console_automatic` to `true` in [the config](../reference/config.mdx) as well for Continuwuity to activate the CLI on startup.
For Docker Compose deployments this means adding `stdin_open: true` and `tty: true` to the container's declaration:
```yaml
services:
homeserver:
stdin_open: true
tty: true
# ...
```
If you choose to deploy via `docker run`, add the flags `-i`/`--interactive` and `-t`/`--tty` to the command.
From there you can access the server's console by running `docker attach <container-name>`, which will show the server's prompt `uwu> `. To exit `docker attach`, press `CTRL+p` then `CTRL+q`.
Note that using `CTRL+c` within `docker attach`'s context will forward the signal to the server, stopping it. See [Docker's reference][docker-attach-reference] for more information.
[docker-attach-reference]: https://docs.docker.com/reference/cli/docker/container/attach/
## Next steps ## Next steps
- For smooth federation, set up a caching resolver according to the [**DNS tuning guide**](../advanced/dns.mdx) (recommended) - For smooth federation, set up a caching resolver according to the [**DNS tuning guide**](../advanced/dns.mdx) (recommended)
- To set up Audio/Video communication, see the [**Calls**](../calls.mdx) page. - To set up Audio/Video communication, see the [**Calls**](../calls.mdx) page.
- If you want to set up an appservice, take a look at the [**Appservice - If you want to set up an appservice, take a look at the [**Appservice Guide**](../appservices.mdx).
Guide**](../appservices.mdx).
+105 -105
View File
@@ -1,10 +1,12 @@
# Generic deployment documentation # Generic deployment documentation
> ### Getting help :::tip Getting help
> If you run into any problems while setting up Continuwuity, ask us in
> If you run into any problems while setting up Continuwuity, ask us in `#continuwuity:continuwuity.org` or [open an issue on
> `#continuwuity:continuwuity.org` or [open an issue on Forgejo][forgejo-new-issue].
> Forgejo](https://forgejo.ellis.link/continuwuation/continuwuity/issues/new). :::
[forgejo-new-issue]: https://forgejo.ellis.link/continuwuation/continuwuity/issues/new
## Installing Continuwuity ## Installing Continuwuity
@@ -15,17 +17,16 @@ ### Prebuilt binary
Prebuilt binaries are available from: Prebuilt binaries are available from:
- **Tagged releases**: [Latest release page](https://forgejo.ellis.link/continuwuation/continuwuity/releases/latest) - **Tagged releases**: [see Release page][release-page]
- **Development builds**: CI artifacts from the `main` branch - **Development builds**: CI artifacts from the `main` branch,
(includes Debian/Ubuntu packages) [see `release-image.yml` for details][release-image]
When browsing CI artifacts, `ci-bins` contains binaries organised
by commit hash, while `releases` contains tagged versions. Sort
by last modified date to find the most recent builds.
The binaries require jemalloc and io_uring on the host system. Currently The binaries require jemalloc and io_uring on the host system. Currently
we can't cross-build static binaries - contributions are welcome here. we can't cross-build static binaries - contributions are welcome here.
[release-page]: https://forgejo.ellis.link/continuwuation/continuwuity/releases/
[release-image]: https://forgejo.ellis.link/continuwuation/continuwuity/actions/?workflow=release-image.yml
#### Performance-optimised builds #### Performance-optimised builds
For x86_64 systems with CPUs from the last ~15 years, use the For x86_64 systems with CPUs from the last ~15 years, use the
@@ -38,11 +39,12 @@ #### Performance-optimised builds
If you're using Docker instead, equivalent performance-optimised If you're using Docker instead, equivalent performance-optimised
images are available with the `-maxperf` suffix (e.g. images are available with the `-maxperf` suffix (e.g.
`forgejo.ellis.link/continuwuation/continuwuity:latest-maxperf`). `forgejo.ellis.link/continuwuation/continuwuity:latest-maxperf`).
These images use the `release-max-perf` These images use the `release-max-perf` build profile with
build profile with [link-time optimisation (LTO)][lto-rust-docs]
[link-time optimisation (LTO)](https://doc.rust-lang.org/cargo/reference/profiles.html#lto)
and, for amd64, target the haswell CPU architecture. and, for amd64, target the haswell CPU architecture.
[lto-rust-docs]: https://doc.rust-lang.org/cargo/reference/profiles.html#lto
### Nix ### Nix
Theres a Nix package defined in our flake, available for Linux and MacOS. Add continuwuity as an input to your flake, and use `inputs.continuwuity.packages.${system}.default` to get a working Continuwuity package. Theres a Nix package defined in our flake, available for Linux and MacOS. Add continuwuity as an input to your flake, and use `inputs.continuwuity.packages.${system}.default` to get a working Continuwuity package.
@@ -55,7 +57,8 @@ ### Compiling
#### Using Docker #### Using Docker
If you would like to build using docker, you can run the command `docker build -f ./docker/Dockerfile -t forgejo.ellis.link/continuwuation/continuwuity:main .` to compile continuwuity. See the [Building Docker Images](../development/index.mdx#building-docker-images)
section in the development documentation.
#### Manual #### Manual
@@ -69,7 +72,7 @@ ##### Dependencies
##### Build ##### Build
You can build Continuwuity using `cargo build --release`. You can now build Continuwuity using `cargo build --release`.
Continuwuity supports various optional features that can be enabled during compilation. Please see the Cargo.toml file for a comprehensive list, or ask in our rooms. Continuwuity supports various optional features that can be enabled during compilation. Please see the Cargo.toml file for a comprehensive list, or ask in our rooms.
@@ -91,27 +94,6 @@ ## Adding a Continuwuity user
sudo useradd -r --shell /usr/bin/nologin --no-create-home continuwuity sudo useradd -r --shell /usr/bin/nologin --no-create-home continuwuity
``` ```
## Forwarding ports in the firewall or the router
Matrix's default federation port is 8448, and clients must use port 443.
If you would like to use only port 443 or a different port, you will need to set up
delegation. Continuwuity has configuration options for delegation, or you can configure
your reverse proxy to manually serve the necessary JSON files for delegation
(see the `[global.well_known]` config section).
If Continuwuity runs behind a router or in a container and has a different public
IP address than the host system, you need to forward these public ports directly
or indirectly to the port mentioned in the configuration.
Note for NAT users: if you have trouble connecting to your server from inside
your network, check if your router supports "NAT
hairpinning" or "NAT loopback".
If your router does not support this feature, you need to research doing local
DNS overrides and force your Matrix DNS records to use your local IP internally.
This can be done at the host level using `/etc/hosts`. If you need this to be
on the network level, consider something like NextDNS or Pi-Hole.
## Setting up a systemd service ## Setting up a systemd service
You can find an example unit for continuwuity below. You can find an example unit for continuwuity below.
@@ -123,7 +105,7 @@ ## Setting up a systemd service
`/etc/rsyslog.conf` to allow color in logs. `/etc/rsyslog.conf` to allow color in logs.
If you are using a different `database_path` than the systemd unit's If you are using a different `database_path` than the systemd unit's
configured default `/var/lib/conduwuit`, you need to add your path to the configured default (`/var/lib/conduwuit`), you need to add your path to the
systemd unit's `ReadWritePaths=`. You can do this by either directly editing systemd unit's `ReadWritePaths=`. You can do this by either directly editing
`conduwuit.service` and reloading systemd, or by running `systemctl edit conduwuit.service` `conduwuit.service` and reloading systemd, or by running `systemctl edit conduwuit.service`
and entering the following: and entering the following:
@@ -144,7 +126,9 @@ ### Example systemd Unit File
</details> </details>
You can also [view the file on Foregejo](https://forgejo.ellis.link/continuwuation/continuwuity/src/branch/main/pkg/conduwuit.service). You can also [view the file on Foregejo][systemd-file].
[systemd-file]: https://forgejo.ellis.link/continuwuation/continuwuity/src/branch/main/pkg/conduwuit.service
## Creating the Continuwuity configuration file ## Creating the Continuwuity configuration file
@@ -155,9 +139,7 @@ ## Creating the Continuwuity configuration file
**Please take a moment to read the config. You need to change at least the **Please take a moment to read the config. You need to change at least the
server name.** server name.**
RocksDB is the only supported database backend. ### Setting the correct file permissions
## Setting the correct file permissions
If you are using a dedicated user for Continuwuity, you need to allow it to If you are using a dedicated user for Continuwuity, you need to allow it to
read the configuration. To do this, run: read the configuration. To do this, run:
@@ -175,22 +157,29 @@ ## Setting the correct file permissions
sudo chmod 700 /var/lib/conduwuit/ sudo chmod 700 /var/lib/conduwuit/
``` ```
## Setting up the Reverse Proxy ## Exposing ports in the firewall or the router
We recommend Caddy as a reverse proxy because it is trivial to use and handles TLS certificates, reverse proxy headers, etc. transparently with proper defaults. Matrix's default federation port is **:8448**, and clients use port **:443**. You will need to
For other software, please refer to their respective documentation or online guides. expose these ports on your firewall or router. If you use UFW, the commands to allow them
are: `ufw allow 8448/tcp` and `ufw allow 443/tcp`.
:::tip Alternative port/domain setups
If you would like to use only port 443, a different port, or a subdomain for the homeserver, you will need to set up `.well-known` delegation. Consult the `[global.well_known]` section of the config file, and the [**Delegation/Split-domain**](../advanced/delegation) page to learn more about these kinds of deployments.
:::
## Setting up the Reverse Proxy
### Caddy ### Caddy
After installing Caddy via your preferred method, create `/etc/caddy/conf.d/conduwuit_caddyfile` Caddy is the recommended reverse proxy as it is easy to use, has good defaults,
and enter the following (substitute your actual server name): and handle TLS certificates automatically. After installing Caddy via your preferred
method, create `/etc/caddy/conf.d/conduwuit_caddyfile` and enter the following
(substitute `example.com` with your actual server name):
``` ```
your.server.name, your.server.name:8448 { example.com, example.com:8448 {
# TCP reverse_proxy # TCP reverse_proxy
reverse_proxy 127.0.0.1:6167 reverse_proxy 127.0.0.1:8008
# UNIX socket
#reverse_proxy unix//run/conduwuit/conduwuit.sock
} }
``` ```
@@ -202,51 +191,45 @@ ### Caddy
### Other Reverse Proxies ### Other Reverse Proxies
As we prefer our users to use Caddy, we do not provide configuration files for other proxies. Normally, your reverse proxy should route everything from port :8448 and :443 back to Continuwuity.
You will need to reverse proxy everything under the following routes: For more granular controls, you will need to proxy everything under these following routes:
- `/_matrix/` - core Matrix C-S and S-S APIs - `/_matrix/` - core Matrix APIs, which includes:
- `/_conduwuit/` and/or `/_continuwuity/` - ad-hoc Continuwuity routes such as `/local_user_count` and
`/server_version` - `/_matrix/federation` and `/_matrix/key` - core Server-Server APIs. These should be available on port :8448
- `/_matrix/client` - core Client-Server APIs. These should be available on port :443
- `/_conduwuit/` and `/_continuwuity/` - ad-hoc Continuwuity routes for password resets, email verification, and server details such as `/local_user_count` and `/server_version`.
You can optionally reverse proxy the following individual routes: You can optionally reverse proxy the following individual routes:
- `/.well-known/matrix/client` and `/.well-known/matrix/server` if using - `/.well-known/matrix/client` and `/.well-known/matrix/server` if using
Continuwuity to perform delegation (see the `[global.well_known]` config section) Continuwuity to perform delegation (see the `[global.well_known]` config section)
- `/.well-known/matrix/support` if using Continuwuity to send the homeserver admin - `/.well-known/matrix/support` if using Continuwuity to send the homeserver admin
contact and support page (formerly known as MSC1929) [contact and support page][well-known-support]
- `/` if you would like to see `hewwo from conduwuit woof!` at the root - `/` and `/_continuwuity/logo.svg` if you would like to see the Continuwuity landing page
See the following spec pages for more details on these files: Refer to the respective software's documentation and online guides on how to do so.
- [`/.well-known/matrix/server`](https://spec.matrix.org/latest/client-server-api/#getwell-knownmatrixserver) [well-known-support]: https://spec.matrix.org/v1.18/client-server-api/#getwell-knownmatrixsupport
- [`/.well-known/matrix/client`](https://spec.matrix.org/latest/client-server-api/#getwell-knownmatrixclient)
- [`/.well-known/matrix/support`](https://spec.matrix.org/latest/client-server-api/#getwell-knownmatrixsupport)
Examples of delegation: #### Caveats for specific reverse proxies
- https://continuwuity.org/.well-known/matrix/server - Lighttpd is not supported as it appears to interfere with the `X-Matrix` Authorization
- https://continuwuity.org/.well-known/matrix/client
- https://ellis.link/.well-known/matrix/server
- https://ellis.link/.well-known/matrix/client
For Apache and Nginx there are many examples available online.
Lighttpd is not supported as it appears to interfere with the `X-Matrix` Authorization
header, making federation non-functional. If you find a workaround, please share it so we can add it to this documentation. header, making federation non-functional. If you find a workaround, please share it so we can add it to this documentation.
If using Apache, you need to use `nocanon` in your `ProxyPass` directive to prevent httpd from interfering with the `X-Matrix` header (note that Apache is not ideal as a general reverse proxy, so we discourage using it if alternatives are available). - If using Apache, you need to use `nocanon` in your `ProxyPass` directive to prevent httpd from interfering with the `X-Matrix` header (note that Apache is not ideal as a general reverse proxy, so we discourage using it if alternatives are available).
If using Nginx, you need to pass the request URI to Continuwuity using `$request_uri`, like this: - If using Nginx, you need to pass the request URI to Continuwuity using `$request_uri`, like this:
- `proxy_pass http://127.0.0.1:6167$request_uri;` - `proxy_pass http://127.0.0.1:6167$request_uri;`
- `proxy_pass http://127.0.0.1:6167;` - `proxy_pass http://127.0.0.1:6167;`
Nginx users need to increase the `client_max_body_size` setting (default is 1M) to match the Furthermore, Nginx users need to increase the `client_max_body_size` setting (default is 1M) to match the `max_request_size` defined in conduwuit.toml.
`max_request_size` defined in conduwuit.toml.
## You're done ## Starting Your Server
Now you can start Continuwuity with: Now you can start Continuwuity with:
@@ -260,36 +243,53 @@ ## You're done
sudo systemctl enable conduwuit sudo systemctl enable conduwuit
``` ```
## How do I know it works? Check Continuwuity logs with the following command:
You can open [a Matrix client](https://matrix.org/ecosystem/clients), enter your
homeserver address, and try to register.
You can also use these commands as a quick health check (replace
`your.server.name`).
```bash ```bash
curl https://your.server.name/_conduwuit/server_version sudo journalctl -u conduwuit.service
# If using port 8448
curl https://your.server.name:8448/_conduwuit/server_version
# If federation is enabled
curl https://your.server.name:8448/_matrix/federation/v1/version
``` ```
- To check if your server can communicate with other homeservers, use the If Continuwuity has successfully initialized, you'll see output as below.
[Matrix Federation Tester](https://federationtester.mtrnord.blog/). If you can
register but cannot join federated rooms, check your configuration and verify ```
that port 8448 is open and forwarded correctly. In order to use your new homeserver, you need to create its
first user account.
Open your Matrix client of choice and register an account
on example.com using registration token x5keUZ811RqvLsNa .
Pick your own username and password!
```
You can then open [a Matrix client][matrix-clients],
enter your homeserver address, and try to register with the provided token.
By default, the first user is the instance's first admin. They will be added
to the `#admin:example.com` room and be able to [issue admin commands](../reference/admin/index.md).
[matrix-clients]: https://matrix.org/ecosystem/clients
## How do I know it works?
To check if your server can communicate with other homeservers, use the
[Matrix Federation Tester](https://federationtester.mtrnord.blog/). If you can
register your account but cannot join federated rooms, check your configuration
and verify that your federation endpoints are opened and forwarded correctly.
As a quick health check, you can also use these cURL commands:
```bash
curl https://example.com/_conduwuit/server_version
# If using port 8448
curl https://example.com:8448/_conduwuit/server_version
# If federation is enabled
curl https://example.com:8448/_matrix/federation/v1/version
# For client-server endpoints
curl https://example.com/_matrix/client/versions
```
## What's next? ## What's next?
### Audio/Video calls - For smooth federation, set up a caching resolver according to the [**DNS tuning guide**](../advanced/dns.mdx) (recommended)
- For Audio/Video call functionality see the [**Calls**](../calls.md) page.
For Audio/Video call functionality see the [Calls](../calls.md) page. - If you want to set up an appservice, take a look at the [**Appservice Guide**](../appservices.md).
### Appservices
If you want to set up an appservice, take a look at the [Appservice
Guide](../appservices.md).
+118
View File
@@ -0,0 +1,118 @@
# Continuwuity for Nomad
You can either pass the configuration as environment variables or mount a file containing the configuration from consul.
This given configuration assumes that you have a traefik reverse proxy running.
## Persistence
The database being a RockDB file, it is recommended to use a volume to persist the data.
The example below uses a volume, you need to configure the CSI driver on your cluster.
| Volume Name | Mount Path | Purpose |
|-------------|------------|---------|
| continuwuity-volume | `/var/lib/continuwuity` | Store the database |
| continuwuity-media-volume | `/var/lib/continuwuity/media` | Store uploaded media |
## Configuration
### Using environment variables
```hcl
job "continuwuity" {
datacenters = ["dc1"]
type = "service"
node_pool = "default"
group "continuwuity" {
count = 1
network {
port "http" {
static = 6167
}
}
service {
name = "continuwuity"
port = "http"
tags = [
"traefik.enable=true",
"traefik.http.routers.continuwuity.rule=(Host(`matrix.example.com`) || (Host(`example.com`) && PathPrefix(`/.well-known/matrix`)))",
"traefik.http.routers.continuwuity.entrypoints=https",
"traefik.http.routers.continuwuity.tls=true",
"traefik.http.routers.continuwuity.tls.certresolver=letsencrypt",
"traefik.http.routers.continuwuity-http.rule=(Host(`matrix.example.com`) || (Host(`example.com`) && PathPrefix(`/.well-known/matrix`)))",
"traefik.http.routers.continuwuity-http.entrypoints=http",
"traefik.http.routers.continuwuity-http.middlewares=continuwuity-redirect",
"traefik.http.middlewares.continuwuity-redirect.redirectscheme.scheme=https",
"traefik.http.middlewares.continuwuity-redirect.redirectscheme.permanent=true",
]
}
volume "continuwuity-volume" {
type = "csi"
read_only = false
source = "continuwuity-volume"
attachment_mode = "file-system"
access_mode = "single-node-writer"
per_alloc = false
}
volume "continuwuity-media-volume" {
type = "csi"
read_only = false
source = "continuwuity-media-volume"
attachment_mode = "file-system"
access_mode = "single-node-writer"
per_alloc = false
mount_options {
mount_flags = []
}
}
task "continuwuity" {
driver = "docker"
env {
CONTINUWUITY_SERVER_NAME = "matrix.example.com"
CONTINUWUITY_TRUSTED_SERVERS = "[\"matrix.org\", \"mozilla.org\"]"
CONTINUWUITY_ALLOW_REGISTRATION = false
CONTINUWUITY_ADDRESS = "0.0.0.0"
CONTINUWUITY_PORT = 6167
CONTINUWUITY_DATABASE_PATH = "/var/lib/continuwuity"
CONTINUWUITY_WELL_KNOWN = <<EOF
{
client=https://matrix.example.com,
server=matrix.example.com:443
}
EOF
}
config {
image = "forgejo.ellis.link/continuwuation/continuwuity:latest"
ports = ["http"]
}
volume_mount {
volume = "continuwuity-volume"
destination = "/var/lib/continuwuity"
}
volume_mount {
volume = "continuwuity-media-volume"
destination = "/var/lib/continuwuity/media"
}
}
}
}
```
### Using consul
```hcl
...
template {
data = <<EOF
{{key "config/continuwuity"}}
EOF
destination = "local/conduwuit.toml"
}
...
```
-2
View File
@@ -81,8 +81,6 @@ ## List of forked dependencies
All forked dependencies are maintained under the All forked dependencies are maintained under the
[continuwuation organization on Forgejo](https://forgejo.ellis.link/continuwuation): [continuwuation organization on Forgejo](https://forgejo.ellis.link/continuwuation):
- [ruwuma][continuwuation-ruwuma] - Fork of [ruma/ruma][ruma] with various
performance improvements, more features and better client/server interop
- [rocksdb][continuwuation-rocksdb] - Fork of [facebook/rocksdb][rocksdb] via - [rocksdb][continuwuation-rocksdb] - Fork of [facebook/rocksdb][rocksdb] via
[`@zaidoon1`][8] with liburing build fixes and GCC debug build fixes [`@zaidoon1`][8] with liburing build fixes and GCC debug build fixes
- [jemallocator][continuwuation-jemallocator] - Fork of - [jemallocator][continuwuation-jemallocator] - Fork of
@@ -6,10 +6,10 @@
"message": "Welcome to Continuwuity! Important announcements about the project will appear here." "message": "Welcome to Continuwuity! Important announcements about the project will appear here."
}, },
{ {
"id": 11, "id": 12,
"mention_room": false, "mention_room": false,
"date": "2026-04-17", "date": "2026-04-24",
"message": "[v0.5.7](https://forgejo.ellis.link/continuwuation/continuwuity/releases/tag/v0.5.7) is out! Email verification! Terms and Conditions! Deleting notification pushers! So much good stuff. Go grab the release and read the changelog!" "message": "[v0.5.8](https://forgejo.ellis.link/continuwuation/continuwuity/releases/tag/v0.5.8) is out! This is a patch release which fixes a bug in 0.5.7's email support -- upgrade soon if you use that feature."
} }
] ]
} }
@@ -0,0 +1,43 @@
# Continuwuity - Behind Traefik Reverse Proxy
services:
homeserver:
image: "forgejo.ellis.link/continuwuation/continuwuity:latest"
restart: unless-stopped
command: /sbin/conduwuit
volumes:
- db:/var/lib/continuwuity
- ./continuwuity-resolv.conf:/etc/resolv.conf # use custom resolvers rather than Docker's
#- ./continuwuity.toml:/etc/continuwuity.toml
networks:
- proxy
labels:
- "traefik.enable=true"
- "traefik.http.routers.continuwuity.rule=(Host(`matrix.example.com`) || (Host(`example.com`) && PathPrefix(`/.well-known/matrix`)))"
- "traefik.http.routers.continuwuity.entrypoints=websecure" # your HTTPS entry point
- "traefik.http.routers.continuwuity.tls=true"
- "traefik.http.routers.continuwuity.service=continuwuity"
- "traefik.http.services.continuwuity.loadbalancer.server.port=8008"
# possibly, depending on your config:
# - "traefik.http.routers.continuwuity.tls.certresolver=letsencrypt"
environment:
CONTINUWUITY_SERVER_NAME: example.com # EDIT THIS
CONTINUWUITY_DATABASE_PATH: /var/lib/continuwuity
CONTINUWUITY_ADDRESS: 0.0.0.0
CONTINUWUITY_PORT: 8008 # This must match with traefik's loadbalancer label
#CONTINUWUITY_CONFIG: '/etc/continuwuity.toml' # Uncomment if you mapped config toml above
# Serve .well-known files to tell others to reach Continuwuity on port :443
CONTINUWUITY_WELL_KNOWN: |
{
client=https://matrix.example.com,
server=matrix.example.com:443
}
volumes:
db:
networks:
# This must match the network name that Traefik listens on
proxy:
external: true
@@ -0,0 +1,54 @@
# Continuwuity - With Caddy Labels
services:
caddy:
# This compose file uses caddy-docker-proxy as the reverse proxy for Continuwuity!
# For more info, visit https://github.com/lucaslorentz/caddy-docker-proxy
image: "docker.io/lucaslorentz/caddy-docker-proxy:ci-alpine"
ports:
- 80:80
- 443:443
environment:
- CADDY_INGRESS_NETWORKS=caddy
networks:
- caddy
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- ./data:/data
restart: unless-stopped
labels:
caddy: example.com
caddy.reverse_proxy: /.well-known/matrix/* homeserver:8008
homeserver:
image: "forgejo.ellis.link/continuwuation/continuwuity:latest"
restart: unless-stopped
command: /sbin/conduwuit
volumes:
- db:/var/lib/continuwuity
- ./continuwuity-resolv.conf:/etc/resolv.conf # use custom resolvers rather than Docker's
#- ./continuwuity.toml:/etc/continuwuity.toml
environment:
CONTINUWUITY_SERVER_NAME: example.com # EDIT THIS
CONTINUWUITY_DATABASE_PATH: /var/lib/continuwuity
CONTINUWUITY_ADDRESS: 0.0.0.0
CONTINUWUITY_PORT: 8008
#CONTINUWUITY_CONFIG: '/etc/continuwuity.toml' # Uncomment if you mapped config toml above
# Serve .well-known files to tell others to reach Continuwuity on port :443
CONTINUWUITY_WELL_KNOWN: |
{
client=https://matrix.example.com,
server=matrix.example.com:443
}
networks:
- caddy
labels:
caddy: matrix.example.com
caddy.reverse_proxy: "{{upstreams 8008}}"
volumes:
db:
networks:
caddy:
@@ -0,0 +1,57 @@
# Continuwuity - Using Caddy Docker Image
services:
caddy:
image: "docker.io/caddy:latest"
ports:
- 80:80
- 443:443
networks:
- caddy
volumes:
- ./data:/data
restart: unless-stopped
configs:
- source: Caddyfile
target: /etc/caddy/Caddyfile
homeserver:
image: "forgejo.ellis.link/continuwuation/continuwuity:latest"
restart: unless-stopped
command: /sbin/conduwuit
volumes:
- db:/var/lib/continuwuity
- ./continuwuity-resolv.conf:/etc/resolv.conf # use custom resolvers rather than Docker's
#- ./continuwuity.toml:/etc/continuwuity.toml
environment:
CONTINUWUITY_SERVER_NAME: example.com
CONTINUWUITY_DATABASE_PATH: /var/lib/continuwuity
CONTINUWUITY_ADDRESS: 0.0.0.0
CONTINUWUITY_PORT: 8008
#CONTINUWUITY_CONFIG: '/etc/continuwuity.toml' # Uncomment if you mapped config toml above
## Serve .well-known files to tell others to reach Continuwuity on port :443
CONTINUWUITY_WELL_KNOWN: |
{
client=https://matrix.example.com,
server=matrix.example.com:443
}
networks:
- caddy
networks:
caddy:
volumes:
db:
configs:
Caddyfile:
content: |
https://matrix.example.com:443 {
reverse_proxy http://homeserver:8008
}
https://example.com:443 {
reverse_proxy /.well-known/matrix* http://homeserver:8008
}
@@ -0,0 +1,85 @@
# Continuwuity - With Traefik Reverse Proxy
services:
homeserver:
image: "forgejo.ellis.link/continuwuation/continuwuity:latest"
restart: unless-stopped
command: /sbin/conduwuit
volumes:
- db:/var/lib/continuwuity
- ./continuwuity-resolv.conf:/etc/resolv.conf # use custom resolvers rather than Docker's
#- ./continuwuity.toml:/etc/continuwuity.toml
networks:
- proxy
labels:
- "traefik.enable=true"
- "traefik.http.routers.continuwuity.rule=(Host(`matrix.example.com`) || (Host(`example.com`) && PathPrefix(`/.well-known/matrix`)))"
- "traefik.http.routers.continuwuity.entrypoints=websecure"
- "traefik.http.routers.continuwuity.tls.certresolver=letsencrypt"
- "traefik.http.services.continuwuity.loadbalancer.server.port=8008"
environment:
CONTINUWUITY_SERVER_NAME: example.com # EDIT THIS
CONTINUWUITY_DATABASE_PATH: /var/lib/continuwuity
CONTINUWUITY_ADDRESS: 0.0.0.0
CONTINUWUITY_PORT: 8008 # This must match with traefik's loadbalancer label
#CONTINUWUITY_CONFIG: '/etc/continuwuity.toml' # Uncomment if you mapped config toml above
# Serve .well-known files to tell others to reach Continuwuity on port :443
CONTINUWUITY_WELL_KNOWN: |
{
client=https://matrix.example.com,
server=matrix.example.com:443
}
traefik:
image: "docker.io/traefik:latest"
container_name: "traefik"
restart: "unless-stopped"
ports:
- "80:80"
- "443:443"
volumes:
- "/var/run/docker.sock:/var/run/docker.sock:ro"
- "acme:/etc/traefik/acme"
labels:
- "traefik.enable=true"
# middleware redirect
- "traefik.http.middlewares.redirect-to-https.redirectscheme.scheme=https"
# global redirect to https
- "traefik.http.routers.redirs.rule=hostregexp(`{host:.+}`)"
- "traefik.http.routers.redirs.entrypoints=web"
- "traefik.http.routers.redirs.middlewares=redirect-to-https"
environment:
TRAEFIK_LOG_LEVEL: DEBUG
TRAEFIK_ENTRYPOINTS_WEB: true
TRAEFIK_ENTRYPOINTS_WEB_ADDRESS: ":80"
TRAEFIK_ENTRYPOINTS_WEB_HTTP_REDIRECTIONS_ENTRYPOINT_TO: websecure
TRAEFIK_ENTRYPOINTS_WEBSECURE: true
TRAEFIK_ENTRYPOINTS_WEBSECURE_ADDRESS: ":443"
TRAEFIK_ENTRYPOINTS_WEBSECURE_HTTP_TLS_CERTRESOLVER: letsencrypt
TRAEFIK_CERTIFICATESRESOLVERS_LETSENCRYPT: true
# CHANGE THIS to desired email for ACME
TRAEFIK_CERTIFICATESRESOLVERS_LETSENCRYPT_ACME_EMAIL: user@example.com
TRAEFIK_CERTIFICATESRESOLVERS_LETSENCRYPT_ACME_HTTPCHALLENGE: true
TRAEFIK_CERTIFICATESRESOLVERS_LETSENCRYPT_ACME_HTTPCHALLENGE_ENTRYPOINT: web
TRAEFIK_CERTIFICATESRESOLVERS_LETSENCRYPT_ACME_STORAGE: "/etc/traefik/acme/acme.json"
# Since Traefik 3.6.3, paths with certain "encoded characters" are now blocked by default; we need a couple, or else things *will* break
TRAEFIK_ENTRYPOINTS_WEBSECURE_HTTP_ENCODEDCHARACTERS_ALLOWENCODEDSLASH: true
TRAEFIK_ENTRYPOINTS_WEBSECURE_HTTP_ENCODEDCHARACTERS_ALLOWENCODEDHASH: true
TRAEFIK_PROVIDERS_DOCKER: true
TRAEFIK_PROVIDERS_DOCKER_ENDPOINT: "unix:///var/run/docker.sock"
TRAEFIK_PROVIDERS_DOCKER_EXPOSEDBYDEFAULT: false
volumes:
db:
acme:
networks:
proxy:
@@ -2,7 +2,7 @@
services: services:
homeserver: homeserver:
image: forgejo.ellis.link/continuwuation/continuwuity:latest image: "forgejo.ellis.link/continuwuation/continuwuity:latest"
restart: unless-stopped restart: unless-stopped
command: /sbin/conduwuit command: /sbin/conduwuit
volumes: volumes:
@@ -38,7 +38,6 @@ volumes:
db: db:
networks: networks:
# This is the network Traefik listens to, if your network has a different # This must match the network name that Traefik listens on
# name, don't forget to change it here and in the docker-compose.override.yml
proxy: proxy:
external: true external: true
@@ -1,4 +1,4 @@
# Continuwuity - Traefik Reverse Proxy Labels # Continuwuity - Traefik Reverse Proxy Labels (override file)
services: services:
homeserver: homeserver:
@@ -14,13 +14,10 @@ services:
# This must match with CONTINUWUITY_PORT (default: 8008) # This must match with CONTINUWUITY_PORT (default: 8008)
- "traefik.http.services.to_continuwuity.loadbalancer.server.port=8008" - "traefik.http.services.to_continuwuity.loadbalancer.server.port=8008"
- "traefik.http.middlewares.cors-headers.headers.accessControlAllowOriginList=*"
- "traefik.http.middlewares.cors-headers.headers.accessControlAllowHeaders=Origin, X-Requested-With, Content-Type, Accept, Authorization"
- "traefik.http.middlewares.cors-headers.headers.accessControlAllowMethods=GET, POST, PUT, DELETE, OPTIONS"
# If you want to have your account on <DOMAIN>, but host Continuwuity on a subdomain, # If you want to have your account on <DOMAIN>, but host Continuwuity on a subdomain,
# you can let it only handle the well known file on that domain instead # you can let it only handle the well known file on the base domain instead
#- "traefik.http.routers.to-matrix-wellknown.rule=Host(`example.com`) && PathPrefix(`/.well-known/matrix`)" #
# - "traefik.http.routers.to-matrix-wellknown.rule=Host(`example.com`) && PathPrefix(`/.well-known/matrix`)"
#- "traefik.http.routers.to-matrix-wellknown.tls=true" #- "traefik.http.routers.to-matrix-wellknown.tls=true"
#- "traefik.http.routers.to-matrix-wellknown.tls.certresolver=letsencrypt" #- "traefik.http.routers.to-matrix-wellknown.tls.certresolver=letsencrypt"
#- "traefik.http.routers.to-matrix-wellknown.middlewares=cors-headers@docker" #- "traefik.http.routers.to-matrix-wellknown.middlewares=cors-headers@docker"
@@ -1,8 +1,10 @@
# Continuwuity - With Caddy Labels
services: services:
caddy: caddy:
# This compose file uses caddy-docker-proxy as the reverse proxy for Continuwuity! # This compose file uses caddy-docker-proxy as the reverse proxy for Continuwuity!
# For more info, visit https://github.com/lucaslorentz/caddy-docker-proxy # For more info, visit https://github.com/lucaslorentz/caddy-docker-proxy
image: lucaslorentz/caddy-docker-proxy:ci-alpine image: "docker.io/lucaslorentz/caddy-docker-proxy:ci-alpine"
ports: ports:
- 80:80 - 80:80
- 443:443 - 443:443
@@ -16,7 +18,7 @@ services:
restart: unless-stopped restart: unless-stopped
homeserver: homeserver:
image: forgejo.ellis.link/continuwuation/continuwuity:latest image: "forgejo.ellis.link/continuwuation/continuwuity:latest"
restart: unless-stopped restart: unless-stopped
command: /sbin/conduwuit command: /sbin/conduwuit
volumes: volumes:
@@ -1,6 +1,8 @@
# Continuwuity - Using Caddy Docker Image
services: services:
caddy: caddy:
image: docker.io/caddy:latest image: "docker.io/caddy:latest"
ports: ports:
- 80:80 - 80:80
- 443:443 - 443:443
@@ -15,7 +17,7 @@ services:
target: /etc/caddy/Caddyfile target: /etc/caddy/Caddyfile
homeserver: homeserver:
image: forgejo.ellis.link/continuwuation/continuwuity:latest image: "forgejo.ellis.link/continuwuation/continuwuity:latest"
restart: unless-stopped restart: unless-stopped
command: /sbin/conduwuit command: /sbin/conduwuit
volumes: volumes:
@@ -37,7 +39,6 @@ services:
# server=example.com:443 # server=example.com:443
# } # }
networks: networks:
- caddy - caddy
@@ -48,8 +49,8 @@ volumes:
db: db:
configs: configs:
dynamic.yml: Caddyfile:
content: | content: |
https://example.com, https://example.com:8448 { https://example.com:443, https://example.com:8448 {
reverse_proxy http://homeserver:8008 reverse_proxy http://homeserver:8008
} }
@@ -1,8 +1,8 @@
# Continuwuity - Behind Traefik Reverse Proxy # Continuwuity - With Traefik Reverse Proxy
services: services:
homeserver: homeserver:
image: forgejo.ellis.link/continuwuation/continuwuity:latest image: "forgejo.ellis.link/continuwuation/continuwuity:latest"
restart: unless-stopped restart: unless-stopped
command: /sbin/conduwuit command: /sbin/conduwuit
volumes: volumes:
@@ -32,14 +32,14 @@ services:
} }
traefik: traefik:
image: "traefik:latest" image: "docker.io/traefik:latest"
container_name: "traefik" container_name: "traefik"
restart: "unless-stopped" restart: "unless-stopped"
ports: ports:
- "80:80" - "80:80"
- "443:443" - "443:443"
volumes: volumes:
- "/var/run/docker.sock:/var/run/docker.sock:z" - "/var/run/docker.sock:/var/run/docker.sock:ro"
- "acme:/etc/traefik/acme" - "acme:/etc/traefik/acme"
labels: labels:
- "traefik.enable=true" - "traefik.enable=true"
@@ -52,6 +52,7 @@ services:
- "traefik.http.routers.redirs.middlewares=redirect-to-https" - "traefik.http.routers.redirs.middlewares=redirect-to-https"
environment: environment:
TRAEFIK_LOG_LEVEL: DEBUG TRAEFIK_LOG_LEVEL: DEBUG
TRAEFIK_ENTRYPOINTS_WEB: true TRAEFIK_ENTRYPOINTS_WEB: true
TRAEFIK_ENTRYPOINTS_WEB_ADDRESS: ":80" TRAEFIK_ENTRYPOINTS_WEB_ADDRESS: ":80"
+13 -3
View File
@@ -1,12 +1,23 @@
# Continuwuity # Continuwuity - Bare Configuration (for other reverse proxies)
services: services:
homeserver: homeserver:
image: forgejo.ellis.link/continuwuation/continuwuity:latest image: "forgejo.ellis.link/continuwuation/continuwuity:latest"
restart: unless-stopped restart: unless-stopped
command: /sbin/conduwuit command: /sbin/conduwuit
ports: ports:
# If your reverse proxy is on the host, use this
# and configure it to connect to `127.0.0.1:8008`
- 127.0.0.1:8008:8008 - 127.0.0.1:8008:8008
# If your reverse proxy is on another machine, use this
# and configure it to connect to <this-machine-ip>:8008
# - 8008:8008
# If your reverse proxy is a docker container on the same network,
# comment out the entire `ports` section, and configure it to connect to `continuwuity:8008`
volumes: volumes:
- db:/var/lib/continuwuity - db:/var/lib/continuwuity
- ./continuwuity-resolv.conf:/etc/resolv.conf # use custom resolvers rather than Docker's - ./continuwuity-resolv.conf:/etc/resolv.conf # use custom resolvers rather than Docker's
@@ -26,6 +37,5 @@ services:
# server=example.com:443 # server=example.com:443
# } # }
volumes: volumes:
db: db:
+1 -1
View File
@@ -7,7 +7,7 @@ ## Running commands
* All commands listed here may be used by server administrators in the admin room by sending them as messages. * All commands listed here may be used by server administrators in the admin room by sending them as messages.
* If the `admin_escape_commands` configuration option is enabled, server administrators may run certain commands in public rooms by prefixing them with a single backslash. These commands will only run on _their_ homeserver, even if they are a member of another homeserver's admin room. Some sensitive commands cannot be used outside the admin room and will return an error. * If the `admin_escape_commands` configuration option is enabled, server administrators may run certain commands in public rooms by prefixing them with a single backslash. These commands will only run on _their_ homeserver, even if they are a member of another homeserver's admin room. Some sensitive commands cannot be used outside the admin room and will return an error.
* All commands listed here may be used in the server's console, if it is enabled. Commands entered in the console do not require the `!admin` prefix. * All commands listed here may be used in the server's console, if it is enabled. Commands entered in the console do not require the `!admin` prefix. If Continuwuity is deployed via Docker, be sure to set the appropriate options detailed in [the Docker deployment guide](../../deploying/docker.mdx#accessing-the-servers-console) to enable access to the server's console.
## Categories ## Categories
+94 -115
View File
@@ -16,26 +16,24 @@
} }
}, },
"node_modules/@emnapi/core": { "node_modules/@emnapi/core": {
"version": "1.9.2", "version": "1.10.0",
"resolved": "https://registry.npmjs.org/@emnapi/core/-/core-1.9.2.tgz", "resolved": "https://registry.npmjs.org/@emnapi/core/-/core-1.10.0.tgz",
"integrity": "sha512-UC+ZhH3XtczQYfOlu3lNEkdW/p4dsJ1r/bP7H8+rhao3TTTMO1ATq/4DdIi23XuGoFY+Cz0JmCbdVl0hz9jZcA==", "integrity": "sha512-yq6OkJ4p82CAfPl0u9mQebQHKPJkY7WrIuk205cTYnYe+k2Z8YBh11FrbRG/H6ihirqcacOgl2BIO8oyMQLeXw==",
"dev": true, "dev": true,
"license": "MIT", "license": "MIT",
"optional": true, "optional": true,
"peer": true,
"dependencies": { "dependencies": {
"@emnapi/wasi-threads": "1.2.1", "@emnapi/wasi-threads": "1.2.1",
"tslib": "^2.4.0" "tslib": "^2.4.0"
} }
}, },
"node_modules/@emnapi/runtime": { "node_modules/@emnapi/runtime": {
"version": "1.9.2", "version": "1.10.0",
"resolved": "https://registry.npmjs.org/@emnapi/runtime/-/runtime-1.9.2.tgz", "resolved": "https://registry.npmjs.org/@emnapi/runtime/-/runtime-1.10.0.tgz",
"integrity": "sha512-3U4+MIWHImeyu1wnmVygh5WlgfYDtyf0k8AbLhMFxOipihf6nrWC4syIm/SwEeec0mNSafiiNnMJwbza/Is6Lw==", "integrity": "sha512-ewvYlk86xUoGI0zQRNq/mC+16R1QeDlKQy21Ki3oSYXNgLb45GV1P6A0M+/s6nyCuNDqe5VpaY84BzXGwVbwFA==",
"dev": true, "dev": true,
"license": "MIT", "license": "MIT",
"optional": true, "optional": true,
"peer": true,
"dependencies": { "dependencies": {
"tslib": "^2.4.0" "tslib": "^2.4.0"
} }
@@ -47,7 +45,6 @@
"dev": true, "dev": true,
"license": "MIT", "license": "MIT",
"optional": true, "optional": true,
"peer": true,
"dependencies": { "dependencies": {
"tslib": "^2.4.0" "tslib": "^2.4.0"
} }
@@ -109,9 +106,9 @@
} }
}, },
"node_modules/@napi-rs/wasm-runtime": { "node_modules/@napi-rs/wasm-runtime": {
"version": "1.1.2", "version": "1.1.4",
"resolved": "https://registry.npmjs.org/@napi-rs/wasm-runtime/-/wasm-runtime-1.1.2.tgz", "resolved": "https://registry.npmjs.org/@napi-rs/wasm-runtime/-/wasm-runtime-1.1.4.tgz",
"integrity": "sha512-sNXv5oLJ7ob93xkZ1XnxisYhGYXfaG9f65/ZgYuAu3qt7b3NadcOEhLvx28hv31PgX8SZJRYrAIPQilQmFpLVw==", "integrity": "sha512-3NQNNgA1YSlJb/kMH1ildASP9HW7/7kYnRI2szWJaofaS1hWmbGI4H+d3+22aGzXXN9IJ+n+GiFVcGipJP18ow==",
"dev": true, "dev": true,
"license": "MIT", "license": "MIT",
"optional": true, "optional": true,
@@ -128,13 +125,13 @@
} }
}, },
"node_modules/@rsbuild/core": { "node_modules/@rsbuild/core": {
"version": "2.0.0-rc.1", "version": "2.0.3",
"resolved": "https://registry.npmjs.org/@rsbuild/core/-/core-2.0.0-rc.1.tgz", "resolved": "https://registry.npmjs.org/@rsbuild/core/-/core-2.0.3.tgz",
"integrity": "sha512-eqxtRlQiFSm/ibCNGiPj8ozsGSNK91NY+GksmPuTCPmWQExGtPqM1V+s13UYeWZS6fYbMRs7NlQKD896e0QkKA==", "integrity": "sha512-2myp7jUgGen50saxW8OJD/eMVKp7HnuBN5MUzwRb6mDbRZZVpoorfI4LQqiGSBNjGLB6jltvx/R2yHmcmnchwg==",
"dev": true, "dev": true,
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
"@rspack/core": "2.0.0-rc.1", "@rspack/core": "~2.0.1",
"@swc/helpers": "^0.5.21" "@swc/helpers": "^0.5.21"
}, },
"bin": { "bin": {
@@ -153,17 +150,17 @@
} }
}, },
"node_modules/@rsbuild/plugin-react": { "node_modules/@rsbuild/plugin-react": {
"version": "1.4.6", "version": "2.0.0",
"resolved": "https://registry.npmjs.org/@rsbuild/plugin-react/-/plugin-react-1.4.6.tgz", "resolved": "https://registry.npmjs.org/@rsbuild/plugin-react/-/plugin-react-2.0.0.tgz",
"integrity": "sha512-LAT6xHlEyZKA0VjF/ph5d50iyG+WSmBx+7g98HNZUwb94VeeTMZFB8qVptTkbIRMss3BNKOXmHOu71Lhsh9oEw==", "integrity": "sha512-/1gzt39EGUSFEqB83g46QoOwsgv172HI18i6au1b6lgIaX4sv9stuX4ijdHbHCp8PqYEq+MyQ99jIQMO6I+etg==",
"dev": true, "dev": true,
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
"@rspack/plugin-react-refresh": "^1.6.1", "@rspack/plugin-react-refresh": "2.0.0",
"react-refresh": "^0.18.0" "react-refresh": "^0.18.0"
}, },
"peerDependencies": { "peerDependencies": {
"@rsbuild/core": "^1.0.0 || ^2.0.0-0" "@rsbuild/core": "^2.0.0-0"
}, },
"peerDependenciesMeta": { "peerDependenciesMeta": {
"@rsbuild/core": { "@rsbuild/core": {
@@ -172,28 +169,28 @@
} }
}, },
"node_modules/@rspack/binding": { "node_modules/@rspack/binding": {
"version": "2.0.0-rc.1", "version": "2.0.1",
"resolved": "https://registry.npmjs.org/@rspack/binding/-/binding-2.0.0-rc.1.tgz", "resolved": "https://registry.npmjs.org/@rspack/binding/-/binding-2.0.1.tgz",
"integrity": "sha512-rhJqtbyiRPOjTAZW0xTZFbOrS5yP5yL1SF0DPE9kvFfzePz30IqjMDMxL0KuhkDZd/M1eUINJyoqd8NTbR9wHw==", "integrity": "sha512-ynV1gw4KqFtQ0P+ZZh76SUj49wBb2FuHW3zSmHverHWuxBhzvrZS6/dZ+fCFQG8bTTPtrPz0RQUTN3uEDbPVBQ==",
"dev": true, "dev": true,
"license": "MIT", "license": "MIT",
"optionalDependencies": { "optionalDependencies": {
"@rspack/binding-darwin-arm64": "2.0.0-rc.1", "@rspack/binding-darwin-arm64": "2.0.1",
"@rspack/binding-darwin-x64": "2.0.0-rc.1", "@rspack/binding-darwin-x64": "2.0.1",
"@rspack/binding-linux-arm64-gnu": "2.0.0-rc.1", "@rspack/binding-linux-arm64-gnu": "2.0.1",
"@rspack/binding-linux-arm64-musl": "2.0.0-rc.1", "@rspack/binding-linux-arm64-musl": "2.0.1",
"@rspack/binding-linux-x64-gnu": "2.0.0-rc.1", "@rspack/binding-linux-x64-gnu": "2.0.1",
"@rspack/binding-linux-x64-musl": "2.0.0-rc.1", "@rspack/binding-linux-x64-musl": "2.0.1",
"@rspack/binding-wasm32-wasi": "2.0.0-rc.1", "@rspack/binding-wasm32-wasi": "2.0.1",
"@rspack/binding-win32-arm64-msvc": "2.0.0-rc.1", "@rspack/binding-win32-arm64-msvc": "2.0.1",
"@rspack/binding-win32-ia32-msvc": "2.0.0-rc.1", "@rspack/binding-win32-ia32-msvc": "2.0.1",
"@rspack/binding-win32-x64-msvc": "2.0.0-rc.1" "@rspack/binding-win32-x64-msvc": "2.0.1"
} }
}, },
"node_modules/@rspack/binding-darwin-arm64": { "node_modules/@rspack/binding-darwin-arm64": {
"version": "2.0.0-rc.1", "version": "2.0.1",
"resolved": "https://registry.npmjs.org/@rspack/binding-darwin-arm64/-/binding-darwin-arm64-2.0.0-rc.1.tgz", "resolved": "https://registry.npmjs.org/@rspack/binding-darwin-arm64/-/binding-darwin-arm64-2.0.1.tgz",
"integrity": "sha512-fYbeDDDg6QKZzXYt/J0/j0Qhr01wQLuISUsYnNhu5MLwdXVUSVcqz+CTqgF3d0EQVVn6FqLV63lbNRzUGfSq9g==", "integrity": "sha512-CGFO5zmajD1Itch1lxAI7+gvKiagzyqXopHv/jHG9Su2WWQ2/Nhn2/rkSpdp6ptE9ri6+6tCOOahf099/v/Xog==",
"cpu": [ "cpu": [
"arm64" "arm64"
], ],
@@ -205,9 +202,9 @@
] ]
}, },
"node_modules/@rspack/binding-darwin-x64": { "node_modules/@rspack/binding-darwin-x64": {
"version": "2.0.0-rc.1", "version": "2.0.1",
"resolved": "https://registry.npmjs.org/@rspack/binding-darwin-x64/-/binding-darwin-x64-2.0.0-rc.1.tgz", "resolved": "https://registry.npmjs.org/@rspack/binding-darwin-x64/-/binding-darwin-x64-2.0.1.tgz",
"integrity": "sha512-MvXi9kr8xXn1y0PD1WI/4YphRNOdbykJjKdEsAG4JxEVoERmhIHOTwKvUqlejajizAwlVZcxQl/FacoPLsKN5Q==", "integrity": "sha512-2vvBNBoS09/PurupBwSrlTZd8283o00B8v20ncsNUdEff41uCR/hzIrYoTIVWnVST+Gt5O1+cfcfORp397lajg==",
"cpu": [ "cpu": [
"x64" "x64"
], ],
@@ -219,9 +216,9 @@
] ]
}, },
"node_modules/@rspack/binding-linux-arm64-gnu": { "node_modules/@rspack/binding-linux-arm64-gnu": {
"version": "2.0.0-rc.1", "version": "2.0.1",
"resolved": "https://registry.npmjs.org/@rspack/binding-linux-arm64-gnu/-/binding-linux-arm64-gnu-2.0.0-rc.1.tgz", "resolved": "https://registry.npmjs.org/@rspack/binding-linux-arm64-gnu/-/binding-linux-arm64-gnu-2.0.1.tgz",
"integrity": "sha512-j6WsHEwGSdUoiy4BsQBW0RjFl+MBzozdybSYhkiyVSoHlbm7CPt3XaaS3elH5YcwuLHORmVHPP91QhwWl9UFJg==", "integrity": "sha512-uvNXk6ahE3AH3h2avnd1Mgno68YQpS4cfX1OkOGWIC/roL+NrOP2XVXV4yfVAoydPALDO7AfbIfN0QdmBK3rsA==",
"cpu": [ "cpu": [
"arm64" "arm64"
], ],
@@ -236,9 +233,9 @@
] ]
}, },
"node_modules/@rspack/binding-linux-arm64-musl": { "node_modules/@rspack/binding-linux-arm64-musl": {
"version": "2.0.0-rc.1", "version": "2.0.1",
"resolved": "https://registry.npmjs.org/@rspack/binding-linux-arm64-musl/-/binding-linux-arm64-musl-2.0.0-rc.1.tgz", "resolved": "https://registry.npmjs.org/@rspack/binding-linux-arm64-musl/-/binding-linux-arm64-musl-2.0.1.tgz",
"integrity": "sha512-MPoZE0aS8oH+Wr0R5tIYch8gbUwYYf4LsiGdP6enMKMTrmpJyOVGlhPHVSwsrFgBg7fjTGOuxHuibtsvDUdLOQ==", "integrity": "sha512-S/a6uN9PiZ5O/PjSqyIXhuRC1lVzeJkJV69NeLk5sIEUiDQ/aQGZG97uN+tluwpbo1tPbLJkdHYETfjspOX4Pg==",
"cpu": [ "cpu": [
"arm64" "arm64"
], ],
@@ -253,9 +250,9 @@
] ]
}, },
"node_modules/@rspack/binding-linux-x64-gnu": { "node_modules/@rspack/binding-linux-x64-gnu": {
"version": "2.0.0-rc.1", "version": "2.0.1",
"resolved": "https://registry.npmjs.org/@rspack/binding-linux-x64-gnu/-/binding-linux-x64-gnu-2.0.0-rc.1.tgz", "resolved": "https://registry.npmjs.org/@rspack/binding-linux-x64-gnu/-/binding-linux-x64-gnu-2.0.1.tgz",
"integrity": "sha512-gOlPCwtIg9GsFG/8ZdUyV5SyXDaGq2kmtXmyyFU7RO33MaalltNEBMf2hevRPj9z39eSzxwgJDonMOdx5Fo0Og==", "integrity": "sha512-C13Kk0OkZiocZVj187Sf753UH6pDXnuEu6vzUvi3qv9ltibG1ki0H2Y8isXBYL2cHQOV+hk0g1S6/4z3TTB97A==",
"cpu": [ "cpu": [
"x64" "x64"
], ],
@@ -270,9 +267,9 @@
] ]
}, },
"node_modules/@rspack/binding-linux-x64-musl": { "node_modules/@rspack/binding-linux-x64-musl": {
"version": "2.0.0-rc.1", "version": "2.0.1",
"resolved": "https://registry.npmjs.org/@rspack/binding-linux-x64-musl/-/binding-linux-x64-musl-2.0.0-rc.1.tgz", "resolved": "https://registry.npmjs.org/@rspack/binding-linux-x64-musl/-/binding-linux-x64-musl-2.0.1.tgz",
"integrity": "sha512-K6Swk1rfP4z4b6bp84NlikGlUWMOPpIWCtlPr/W0TWgc2C/cd844oHdoIu7WtmOH7y9AwB5UG2bWpgFAVwykCw==", "integrity": "sha512-TQsiBFpEDGkuvK9tNdGj/Uc+AIytzqhxXH/1jKU6M24cWB1DTw/Cx7DdrkCBDyq3129K3POLdujvbWCGqBzQUw==",
"cpu": [ "cpu": [
"x64" "x64"
], ],
@@ -287,9 +284,9 @@
] ]
}, },
"node_modules/@rspack/binding-wasm32-wasi": { "node_modules/@rspack/binding-wasm32-wasi": {
"version": "2.0.0-rc.1", "version": "2.0.1",
"resolved": "https://registry.npmjs.org/@rspack/binding-wasm32-wasi/-/binding-wasm32-wasi-2.0.0-rc.1.tgz", "resolved": "https://registry.npmjs.org/@rspack/binding-wasm32-wasi/-/binding-wasm32-wasi-2.0.1.tgz",
"integrity": "sha512-aa9oUTqOb1QjwsHVlMr5sV+7mcBI4MLQ/xhFO2CIEcfVnJIPl8XpKUbDEgqMwcFlzcgzKmHg5cVmIvd82BLgow==", "integrity": "sha512-wk3gyUgBW/ayP49bI54bkY8+EQnfBHxdoe9dz3oobSTZQc8AOWwmUUDEPltW8rUvPOM6dfHECTOUMnfaf2f5yA==",
"cpu": [ "cpu": [
"wasm32" "wasm32"
], ],
@@ -297,13 +294,15 @@
"license": "MIT", "license": "MIT",
"optional": true, "optional": true,
"dependencies": { "dependencies": {
"@napi-rs/wasm-runtime": "1.1.2" "@emnapi/core": "1.10.0",
"@emnapi/runtime": "1.10.0",
"@napi-rs/wasm-runtime": "1.1.4"
} }
}, },
"node_modules/@rspack/binding-win32-arm64-msvc": { "node_modules/@rspack/binding-win32-arm64-msvc": {
"version": "2.0.0-rc.1", "version": "2.0.1",
"resolved": "https://registry.npmjs.org/@rspack/binding-win32-arm64-msvc/-/binding-win32-arm64-msvc-2.0.0-rc.1.tgz", "resolved": "https://registry.npmjs.org/@rspack/binding-win32-arm64-msvc/-/binding-win32-arm64-msvc-2.0.1.tgz",
"integrity": "sha512-+UxF0c7E9bE3siFbMHi+mmoeQJzcTKl1j3x+Y6MY/PJ3V70cU23wOaxMvmSsCyq2JNJBT2RCNZ9HaL+o3kReug==", "integrity": "sha512-rHjLcy3VcAC3+x+PxH+gwhwv6tPe0JdXTNT5eAOs9wgZIM6T9p4wre49+K4Qy98+Fb7TTbLX0ObUitlOkGwTSA==",
"cpu": [ "cpu": [
"arm64" "arm64"
], ],
@@ -315,9 +314,9 @@
] ]
}, },
"node_modules/@rspack/binding-win32-ia32-msvc": { "node_modules/@rspack/binding-win32-ia32-msvc": {
"version": "2.0.0-rc.1", "version": "2.0.1",
"resolved": "https://registry.npmjs.org/@rspack/binding-win32-ia32-msvc/-/binding-win32-ia32-msvc-2.0.0-rc.1.tgz", "resolved": "https://registry.npmjs.org/@rspack/binding-win32-ia32-msvc/-/binding-win32-ia32-msvc-2.0.1.tgz",
"integrity": "sha512-gc0JdkdxSWo+o/b1qTCT6mZ3DrlGe32eW+Ps3xInxcG4UHjUG7hTDgFtOgVQ6VhQ8WMUXG+TQOz0CySVpYjsoQ==", "integrity": "sha512-Ad1vVqMBBnd4T8rsORngu9sl2kyRTlS4kMlvFudjzl1X2UFArEDBe0YVGNN7ZvahM12CErUx2WiN8Sd8pb+qXQ==",
"cpu": [ "cpu": [
"ia32" "ia32"
], ],
@@ -329,9 +328,9 @@
] ]
}, },
"node_modules/@rspack/binding-win32-x64-msvc": { "node_modules/@rspack/binding-win32-x64-msvc": {
"version": "2.0.0-rc.1", "version": "2.0.1",
"resolved": "https://registry.npmjs.org/@rspack/binding-win32-x64-msvc/-/binding-win32-x64-msvc-2.0.0-rc.1.tgz", "resolved": "https://registry.npmjs.org/@rspack/binding-win32-x64-msvc/-/binding-win32-x64-msvc-2.0.1.tgz",
"integrity": "sha512-Dnj0jthyVUikf65MGEyZy3akshtSmR1xsp/Xr0h/NWTo5JFWHKAFNYFE+jFfY0uzC8e4IDcLQLYoFomqV1DsEg==", "integrity": "sha512-oPM2Jtm7HOlmxl/aBfleAVlL6t9VeHx6WvEets7BBJMInemFXAQd4CErRqybf7rXutACzLeUWBOue4Jpd1/ykw==",
"cpu": [ "cpu": [
"x64" "x64"
], ],
@@ -343,13 +342,13 @@
] ]
}, },
"node_modules/@rspack/core": { "node_modules/@rspack/core": {
"version": "2.0.0-rc.1", "version": "2.0.1",
"resolved": "https://registry.npmjs.org/@rspack/core/-/core-2.0.0-rc.1.tgz", "resolved": "https://registry.npmjs.org/@rspack/core/-/core-2.0.1.tgz",
"integrity": "sha512-OIfkYn05/IWtVIdZ8Y/a0y/k4ipzqfApxIZqnJM59G/bGwQKMBrLHpOMGgV2Wmq1j9UMXzF7ZtsFMUbYBhFb9A==", "integrity": "sha512-lgfZiExh8kDR/3obgi3RQKwKG5av1Xf5qDN1aVde777W9pbmx0Pqvrww1qtNvJ+gobEjbrrn5HEZWYGe0VLmcA==",
"dev": true, "dev": true,
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
"@rspack/binding": "2.0.0-rc.1" "@rspack/binding": "2.0.1"
}, },
"engines": { "engines": {
"node": "^20.19.0 || >=22.12.0" "node": "^20.19.0 || >=22.12.0"
@@ -368,36 +367,33 @@
} }
}, },
"node_modules/@rspack/plugin-react-refresh": { "node_modules/@rspack/plugin-react-refresh": {
"version": "1.6.2", "version": "2.0.0",
"resolved": "https://registry.npmjs.org/@rspack/plugin-react-refresh/-/plugin-react-refresh-1.6.2.tgz", "resolved": "https://registry.npmjs.org/@rspack/plugin-react-refresh/-/plugin-react-refresh-2.0.0.tgz",
"integrity": "sha512-k+/VrfTNgo+KirjI6V+8CWRj6y+DH9jOUWv8JorYY4vKf/9xfnZ8xHzuB4iqCwTtoZl9YnxOaOuoyjJipc2tiQ==", "integrity": "sha512-Cf6CxBStNDJbiXMc/GmsvG1G8PRlUpa0MSfWsMTI+e8npzuTN/p8nwLs3shriBZOLciqgkSZpBtPTd10BLpj1g==",
"dev": true, "dev": true,
"license": "MIT", "license": "MIT",
"dependencies": {
"error-stack-parser": "^2.1.4"
},
"peerDependencies": { "peerDependencies": {
"react-refresh": ">=0.10.0 <1.0.0", "@rspack/core": "^2.0.0-0",
"webpack-hot-middleware": "2.x" "react-refresh": ">=0.10.0 <1.0.0"
}, },
"peerDependenciesMeta": { "peerDependenciesMeta": {
"webpack-hot-middleware": { "@rspack/core": {
"optional": true "optional": true
} }
} }
}, },
"node_modules/@rspress/core": { "node_modules/@rspress/core": {
"version": "2.0.9", "version": "2.0.10",
"resolved": "https://registry.npmjs.org/@rspress/core/-/core-2.0.9.tgz", "resolved": "https://registry.npmjs.org/@rspress/core/-/core-2.0.10.tgz",
"integrity": "sha512-cfbqqbWtdimrWIsfeyPnQOTKwJpdNLr8VnwLIL4JYC2ZcRq+xcInpszLXVpV86nONL6qI19usr2Or7uzZJ+ynA==", "integrity": "sha512-DvoV7YUW538x0CVAGyYPKfjUHgEuq7Z8LZq1cpfUgBpA1DynFUK3Ls6spvdoAHAl3l0AN+xxOHpu/sRVhzqi/A==",
"dev": true, "dev": true,
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
"@mdx-js/mdx": "^3.1.1", "@mdx-js/mdx": "^3.1.1",
"@mdx-js/react": "^3.1.1", "@mdx-js/react": "^3.1.1",
"@rsbuild/core": "2.0.0-rc.1", "@rsbuild/core": "^2.0.2",
"@rsbuild/plugin-react": "~1.4.6", "@rsbuild/plugin-react": "~2.0.0",
"@rspress/shared": "2.0.9", "@rspress/shared": "2.0.10",
"@shikijs/rehype": "^4.0.2", "@shikijs/rehype": "^4.0.2",
"@types/unist": "^3.0.3", "@types/unist": "^3.0.3",
"@unhead/react": "^2.1.13", "@unhead/react": "^2.1.13",
@@ -411,8 +407,8 @@
"mdast-util-mdxjs-esm": "^2.0.1", "mdast-util-mdxjs-esm": "^2.0.1",
"medium-zoom": "1.1.0", "medium-zoom": "1.1.0",
"nprogress": "^0.2.0", "nprogress": "^0.2.0",
"react": "^19.2.4", "react": "^19.2.5",
"react-dom": "^19.2.4", "react-dom": "^19.2.5",
"react-lazy-with-preload": "^2.2.1", "react-lazy-with-preload": "^2.2.1",
"react-reconciler": "0.33.0", "react-reconciler": "0.33.0",
"react-render-to-markdown": "19.0.1", "react-render-to-markdown": "19.0.1",
@@ -440,39 +436,39 @@
} }
}, },
"node_modules/@rspress/plugin-client-redirects": { "node_modules/@rspress/plugin-client-redirects": {
"version": "2.0.9", "version": "2.0.10",
"resolved": "https://registry.npmjs.org/@rspress/plugin-client-redirects/-/plugin-client-redirects-2.0.9.tgz", "resolved": "https://registry.npmjs.org/@rspress/plugin-client-redirects/-/plugin-client-redirects-2.0.10.tgz",
"integrity": "sha512-r2GyHzOSt8CeS4UIsy/cPM5Zotekt1JVQFmgOYGapvll5ktUlVcd77HLtXDbZjtpgtj0XlaMLrXueOpV2gsBoQ==", "integrity": "sha512-ImOm3h/cbXiJXIvpwv3Wn9rM91xgdhKbD2WX+WlMlWO4AtQfKR4XFrVhIZZAkrt09eeotRIklA7nu8Nuzzzbsw==",
"dev": true, "dev": true,
"license": "MIT", "license": "MIT",
"engines": { "engines": {
"node": "^20.19.0 || >=22.12.0" "node": "^20.19.0 || >=22.12.0"
}, },
"peerDependencies": { "peerDependencies": {
"@rspress/core": "^2.0.9" "@rspress/core": "^2.0.10"
} }
}, },
"node_modules/@rspress/plugin-sitemap": { "node_modules/@rspress/plugin-sitemap": {
"version": "2.0.9", "version": "2.0.10",
"resolved": "https://registry.npmjs.org/@rspress/plugin-sitemap/-/plugin-sitemap-2.0.9.tgz", "resolved": "https://registry.npmjs.org/@rspress/plugin-sitemap/-/plugin-sitemap-2.0.10.tgz",
"integrity": "sha512-GTuXuySaeaazUZoUxdk2vZ8p0ehIgulPjCP9C7gDg6lIh5JGpUbcjG4def4tWHsxUoKp2rIwu/93bHwKb8T0Mw==", "integrity": "sha512-PZLig9+OlnyLcy6x9BlEqWSRef6TzDWB6Dlh2/hY41FtKlhyb7d7U56RGlLselWaQV54SHVa6H/y611A56ZI2g==",
"dev": true, "dev": true,
"license": "MIT", "license": "MIT",
"engines": { "engines": {
"node": "^20.19.0 || >=22.12.0" "node": "^20.19.0 || >=22.12.0"
}, },
"peerDependencies": { "peerDependencies": {
"@rspress/core": "^2.0.9" "@rspress/core": "^2.0.10"
} }
}, },
"node_modules/@rspress/shared": { "node_modules/@rspress/shared": {
"version": "2.0.9", "version": "2.0.10",
"resolved": "https://registry.npmjs.org/@rspress/shared/-/shared-2.0.9.tgz", "resolved": "https://registry.npmjs.org/@rspress/shared/-/shared-2.0.10.tgz",
"integrity": "sha512-G48n3pC7AVAR58pLqwClUCYj5Nt7ZgYEStR8VTBGFuPgXtzb3+KPfo/gz0hb6wxdKJ1cL5ohPsZ6EXqllu6lew==", "integrity": "sha512-Kx10OAHWqi2jvW7ScmBUbkGjnwv4E6rEoelUchcL8It8nQ4nAVk0xvvES7m64knEon55zDbs8JQumCjbHu801Q==",
"dev": true, "dev": true,
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
"@rsbuild/core": "2.0.0-rc.1", "@rsbuild/core": "^2.0.2",
"@shikijs/rehype": "^4.0.2", "@shikijs/rehype": "^4.0.2",
"unified": "^11.0.5" "unified": "^11.0.5"
} }
@@ -972,16 +968,6 @@
"url": "https://github.com/fb55/entities?sponsor=1" "url": "https://github.com/fb55/entities?sponsor=1"
} }
}, },
"node_modules/error-stack-parser": {
"version": "2.1.4",
"resolved": "https://registry.npmjs.org/error-stack-parser/-/error-stack-parser-2.1.4.tgz",
"integrity": "sha512-Sk5V6wVazPhq5MhpO+AUxJn5x7XSXGl1R93Vn7i+zS15KDVxQijejNCrz8340/2bgLBjR9GtEG8ZVKONDjcqGQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"stackframe": "^1.3.4"
}
},
"node_modules/esast-util-from-estree": { "node_modules/esast-util-from-estree": {
"version": "2.0.0", "version": "2.0.0",
"resolved": "https://registry.npmjs.org/esast-util-from-estree/-/esast-util-from-estree-2.0.0.tgz", "resolved": "https://registry.npmjs.org/esast-util-from-estree/-/esast-util-from-estree-2.0.0.tgz",
@@ -3218,13 +3204,6 @@
"url": "https://github.com/sponsors/wooorm" "url": "https://github.com/sponsors/wooorm"
} }
}, },
"node_modules/stackframe": {
"version": "1.3.4",
"resolved": "https://registry.npmjs.org/stackframe/-/stackframe-1.3.4.tgz",
"integrity": "sha512-oeVtt7eWQS+Na6F//S4kJ2K2VbRlS9D43mAlMyVpVWovy9o+jfgH8O9agzANzaiLjclA0oYzUXEM4PurhSUChw==",
"dev": true,
"license": "MIT"
},
"node_modules/stringify-entities": { "node_modules/stringify-entities": {
"version": "4.0.4", "version": "4.0.4",
"resolved": "https://registry.npmjs.org/stringify-entities/-/stringify-entities-4.0.4.tgz", "resolved": "https://registry.npmjs.org/stringify-entities/-/stringify-entities-4.0.4.tgz",
+5 -1
View File
@@ -20,7 +20,11 @@ export default defineConfig({
'/deploying/docker-compose.for-traefik.yml', '/deploying/docker-compose.for-traefik.yml',
'/deploying/docker-compose.with-traefik.yml', '/deploying/docker-compose.with-traefik.yml',
`/deploying/docker-compose.override.yml`, `/deploying/docker-compose.override.yml`,
`/deploying/docker-compose.yml` `/deploying/docker-compose.yml`,
'/advanced/delegated.docker-compose.with-caddy.yml',
'/advanced/delegated.docker-compose.with-caddy-labels.yml',
'/advanced/delegated.docker-compose.for-traefik.yml',
'/advanced/delegated.docker-compose.with-traefik.yml',
] ]
}, },
}, },
+1
View File
@@ -84,6 +84,7 @@ ctor.workspace = true
futures.workspace = true futures.workspace = true
lettre.workspace = true lettre.workspace = true
log.workspace = true log.workspace = true
assign.workspace = true
ruma.workspace = true ruma.workspace = true
serde_json.workspace = true serde_json.workspace = true
serde-saphyr.workspace = true serde-saphyr.workspace = true
+1 -1
View File
@@ -7,7 +7,7 @@
#[implement(Context, params = "<'_>")] #[implement(Context, params = "<'_>")]
pub(super) async fn check_all_users(&self) -> Result { pub(super) async fn check_all_users(&self) -> Result {
let timer = tokio::time::Instant::now(); let timer = tokio::time::Instant::now();
let users = self.services.users.iter().collect::<Vec<_>>().await; let users = self.services.users.stream().collect::<Vec<_>>().await;
let query_time = timer.elapsed(); let query_time = timer.elapsed();
let total = users.len(); let total = users.len();
+37 -24
View File
@@ -79,12 +79,14 @@ pub(super) async fn parse_pdu(&self) -> Result {
} }
let string = self.body[1..self.body.len().saturating_sub(1)].join("\n"); let string = self.body[1..self.body.len().saturating_sub(1)].join("\n");
let room_version_rules = RoomVersionId::V12.rules().unwrap();
match serde_json::from_str(&string) { match serde_json::from_str(&string) {
| Err(e) => return Err!("Invalid json in command body: {e}"), | Err(e) => return Err!("Invalid json in command body: {e}"),
| Ok(value) => match ruma::signatures::reference_hash(&value, &RoomVersionId::V6) { | Ok(value) => match ruma::signatures::reference_hash(&value, &room_version_rules) {
| Err(e) => return Err!("Could not parse PDU JSON: {e:?}"), | Err(e) => return Err!("Could not parse PDU JSON: {e:?}"),
| Ok(hash) => { | Ok(hash) => {
let event_id = OwnedEventId::parse(format!("${hash}")); let event_id = EventId::parse(format!("${hash}"));
match serde_json::from_value::<PduEvent>(serde_json::to_value(value)?) { match serde_json::from_value::<PduEvent>(serde_json::to_value(value)?) {
| Err(e) => return Err!("EventId: {event_id:?}\nCould not parse event: {e}"), | Err(e) => return Err!("EventId: {event_id:?}\nCould not parse event: {e}"),
| Ok(pdu) => write!(self, "EventId: {event_id:?}\n{pdu:#?}"), | Ok(pdu) => write!(self, "EventId: {event_id:?}\n{pdu:#?}"),
@@ -119,7 +121,7 @@ pub(super) async fn get_pdu(&self, event_id: OwnedEventId) -> Result {
} else { } else {
"PDU found in our database" "PDU found in our database"
}; };
write!(self, "{msg}\n```json\n{text}\n```",) write!(self, "{msg}\n```json\n{text}\n```")
}, },
} }
.await .await
@@ -187,10 +189,7 @@ pub(super) async fn get_remote_pdu_list(&self, server: OwnedServerName, force: b
for event_id in list { for event_id in list {
if force { if force {
match self match self.get_remote_pdu(event_id.clone(), server.clone()).await {
.get_remote_pdu(event_id.to_owned(), server.clone())
.await
{
| Err(e) => { | Err(e) => {
failed_count = failed_count.saturating_add(1); failed_count = failed_count.saturating_add(1);
self.services self.services
@@ -205,7 +204,7 @@ pub(super) async fn get_remote_pdu_list(&self, server: OwnedServerName, force: b
}, },
} }
} else { } else {
self.get_remote_pdu(event_id.to_owned(), server.clone()) self.get_remote_pdu(event_id.clone(), server.clone())
.await?; .await?;
success_count = success_count.saturating_add(1); success_count = success_count.saturating_add(1);
} }
@@ -237,10 +236,10 @@ pub(super) async fn get_remote_pdu(
match self match self
.services .services
.sending .sending
.send_federation_request(&server, ruma::api::federation::event::get_event::v1::Request { .send_federation_request(
event_id: event_id.clone(), &server,
include_unredacted_content: None, ruma::api::federation::event::get_event::v1::Request::new(event_id.clone()),
}) )
.await .await
{ {
| Err(e) => { | Err(e) => {
@@ -330,9 +329,9 @@ pub(super) async fn ping(&self, server: OwnedServerName) -> Result {
match self match self
.services .services
.sending .sending
.send_federation_request( .send_unauthenticated_request(
&server, &server,
ruma::api::federation::discovery::get_server_version::v1::Request {}, ruma::api::federation::discovery::get_server_version::v1::Request::new(),
) )
.await .await
{ {
@@ -361,7 +360,7 @@ pub(super) async fn force_device_list_updates(&self) -> Result {
self.services self.services
.users .users
.stream() .stream()
.for_each(|user_id| self.services.users.mark_device_key_update(user_id)) .for_each(async |user_id| self.services.users.mark_device_key_update(&user_id).await)
.await; .await;
write!(self, "Marked all devices for all users as having new keys to update").await write!(self, "Marked all devices for all users as having new keys to update").await
@@ -430,9 +429,16 @@ pub(super) async fn verify_json(&self) -> Result {
} }
let string = self.body[1..self.body.len().checked_sub(1).unwrap()].join("\n"); let string = self.body[1..self.body.len().checked_sub(1).unwrap()].join("\n");
let room_version_rules = RoomVersionId::V12.rules().unwrap();
match serde_json::from_str::<CanonicalJsonObject>(&string) { match serde_json::from_str::<CanonicalJsonObject>(&string) {
| Err(e) => return Err!("Invalid json: {e}"), | Err(e) => return Err!("Invalid json: {e}"),
| Ok(value) => match self.services.server_keys.verify_json(&value, None).await { | Ok(value) => match self
.services
.server_keys
.verify_json(&value, &room_version_rules)
.await
{
| Err(e) => return Err!("Signature verification failed: {e}"), | Err(e) => return Err!("Signature verification failed: {e}"),
| Ok(()) => write!(self, "Signature correct"), | Ok(()) => write!(self, "Signature correct"),
}, },
@@ -445,9 +451,15 @@ pub(super) async fn verify_pdu(&self, event_id: OwnedEventId) -> Result {
use ruma::signatures::Verified; use ruma::signatures::Verified;
let mut event = self.services.rooms.timeline.get_pdu_json(&event_id).await?; let mut event = self.services.rooms.timeline.get_pdu_json(&event_id).await?;
let room_version_rules = RoomVersionId::V12.rules().unwrap();
event.remove("event_id"); event.remove("event_id");
let msg = match self.services.server_keys.verify_event(&event, None).await { let msg = match self
.services
.server_keys
.verify_event(&event, &room_version_rules)
.await
{
| Err(e) => return Err(e), | Err(e) => return Err(e),
| Ok(Verified::Signatures) => "signatures OK, but content hash failed (redaction).", | Ok(Verified::Signatures) => "signatures OK, but content hash failed (redaction).",
| Ok(Verified::All) => "signatures and hashes OK.", | Ok(Verified::All) => "signatures and hashes OK.",
@@ -544,16 +556,17 @@ pub(super) async fn force_set_room_state_from_server(
}; };
let room_version = self.services.rooms.state.get_room_version(&room_id).await?; let room_version = self.services.rooms.state.get_room_version(&room_id).await?;
let room_version_rules = room_version.rules().unwrap();
let mut state: HashMap<u64, OwnedEventId> = HashMap::new(); let mut state: HashMap<u64, OwnedEventId> = HashMap::new();
let remote_state_response = self let remote_state_response = self
.services .services
.sending .sending
.send_federation_request(&server_name, get_room_state::v1::Request { .send_federation_request(
room_id: room_id.clone(), &server_name,
event_id: at_event_id, get_room_state::v1::Request::new(at_event_id, room_id.clone()),
}) )
.await?; .await?;
for pdu in remote_state_response.pdus.clone() { for pdu in remote_state_response.pdus.clone() {
@@ -576,7 +589,7 @@ pub(super) async fn force_set_room_state_from_server(
for result in remote_state_response.pdus.iter().map(|pdu| { for result in remote_state_response.pdus.iter().map(|pdu| {
self.services self.services
.server_keys .server_keys
.validate_and_add_event_id(pdu, &room_version) .validate_and_add_event_id(pdu, &room_version_rules)
}) { }) {
let Ok((event_id, value)) = result.await else { let Ok((event_id, value)) = result.await else {
continue; continue;
@@ -608,7 +621,7 @@ pub(super) async fn force_set_room_state_from_server(
for result in remote_state_response.auth_chain.iter().map(|pdu| { for result in remote_state_response.auth_chain.iter().map(|pdu| {
self.services self.services
.server_keys .server_keys
.validate_and_add_event_id(pdu, &room_version) .validate_and_add_event_id(pdu, &room_version_rules)
}) { }) {
let Ok((event_id, value)) = result.await else { let Ok((event_id, value)) = result.await else {
continue; continue;
@@ -625,7 +638,7 @@ pub(super) async fn force_set_room_state_from_server(
.services .services
.rooms .rooms
.event_handler .event_handler
.resolve_state(&room_id, &room_version, state) .resolve_state(&room_id, &room_version_rules, state)
.await?; .await?;
info!("Compressing new room state"); info!("Compressing new room state");
+2 -2
View File
@@ -111,7 +111,7 @@ pub(super) async fn remote_user_in_rooms(&self, user_id: OwnedUserId) -> Result
.rooms .rooms
.state_cache .state_cache
.rooms_joined(&user_id) .rooms_joined(&user_id)
.then(|room_id| get_room_info(self.services, room_id)) .then(async |room_id| get_room_info(self.services, &room_id).await)
.collect() .collect()
.await; .await;
@@ -129,6 +129,6 @@ pub(super) async fn remote_user_in_rooms(&self, user_id: OwnedUserId) -> Result
.collect::<Vec<_>>() .collect::<Vec<_>>()
.join("\n"); .join("\n");
self.write_str(&format!("Rooms {user_id} shares with us ({num}):\n```\n{body}\n```",)) self.write_str(&format!("Rooms {user_id} shares with us ({num}):\n```\n{body}\n```"))
.await .await
} }
+5 -4
View File
@@ -6,7 +6,8 @@
warn, warn,
}; };
use conduwuit_service::media::Dim; use conduwuit_service::media::Dim;
use ruma::{Mxc, OwnedEventId, OwnedMxcUri, OwnedServerName}; use ruma::{OwnedEventId, OwnedMxcUri, OwnedServerName};
use service::media::mxc::Mxc;
use crate::{admin_command, utils::parse_local_user_id}; use crate::{admin_command, utils::parse_local_user_id};
@@ -261,7 +262,7 @@ pub(super) async fn delete_past_remote_media(
) )
.await?; .await?;
self.write_str(&format!("Deleted {deleted_count} total files.",)) self.write_str(&format!("Deleted {deleted_count} total files."))
.await .await
} }
@@ -271,7 +272,7 @@ pub(super) async fn delete_all_from_user(&self, username: String) -> Result {
let deleted_count = self.services.media.delete_from_user(&user_id).await?; let deleted_count = self.services.media.delete_from_user(&user_id).await?;
self.write_str(&format!("Deleted {deleted_count} total files.",)) self.write_str(&format!("Deleted {deleted_count} total files."))
.await .await
} }
@@ -330,7 +331,7 @@ pub(super) async fn delete_all_from_server(
} }
} }
self.write_str(&format!("Deleted {deleted_count} total files.",)) self.write_str(&format!("Deleted {deleted_count} total files."))
.await .await
} }
+5 -5
View File
@@ -16,8 +16,8 @@
use ruma::{ use ruma::{
EventId, EventId,
events::{ events::{
relation::InReplyTo, relation::{InReplyTo, Reply},
room::message::{Relation::Reply, RoomMessageEventContent}, room::message::{Relation, RoomMessageEventContent},
}, },
}; };
use service::{ use service::{
@@ -38,6 +38,7 @@ pub(super) fn dispatch(services: Arc<Services>, command: CommandInput) -> Proces
} }
#[tracing::instrument(skip_all, name = "admin", level = "info")] #[tracing::instrument(skip_all, name = "admin", level = "info")]
#[allow(clippy::result_large_err)]
async fn handle_command(services: Arc<Services>, command: CommandInput) -> ProcessorResult { async fn handle_command(services: Arc<Services>, command: CommandInput) -> ProcessorResult {
AssertUnwindSafe(Box::pin(process_command(services, &command))) AssertUnwindSafe(Box::pin(process_command(services, &command)))
.catch_unwind() .catch_unwind()
@@ -277,9 +278,8 @@ fn reply(
mut content: RoomMessageEventContent, mut content: RoomMessageEventContent,
reply_id: Option<&EventId>, reply_id: Option<&EventId>,
) -> RoomMessageEventContent { ) -> RoomMessageEventContent {
content.relates_to = reply_id.map(|event_id| Reply { content.relates_to =
in_reply_to: InReplyTo { event_id: event_id.to_owned() }, reply_id.map(|event_id| Relation::Reply(Reply::new(InReplyTo::new(event_id.to_owned()))));
});
content content
} }
+2 -2
View File
@@ -50,7 +50,7 @@ async fn destinations_cache(&self, server_name: Option<OwnedServerName>) -> Resu
while let Some((name, CachedDest { dest, host, expire })) = destinations.next().await { while let Some((name, CachedDest { dest, host, expire })) = destinations.next().await {
if let Some(server_name) = server_name.as_ref() { if let Some(server_name) = server_name.as_ref() {
if name != server_name { if name != *server_name {
continue; continue;
} }
} }
@@ -76,7 +76,7 @@ async fn overrides_cache(&self, server_name: Option<String>) -> Result {
overrides.next().await overrides.next().await
{ {
if let Some(server_name) = server_name.as_ref() { if let Some(server_name) = server_name.as_ref() {
if name != server_name { if name != *server_name {
continue; continue;
} }
} }
+1 -2
View File
@@ -41,7 +41,6 @@ pub(super) async fn process(subcommand: RoomAliasCommand, context: &Context<'_>)
.rooms .rooms
.alias .alias
.local_aliases_for_room(&room_id) .local_aliases_for_room(&room_id)
.map(ToOwned::to_owned)
.collect() .collect()
.await; .await;
let query_time = timer.elapsed(); let query_time = timer.elapsed();
@@ -54,7 +53,7 @@ pub(super) async fn process(subcommand: RoomAliasCommand, context: &Context<'_>)
.rooms .rooms
.alias .alias
.all_local_aliases() .all_local_aliases()
.map(|(room_id, alias)| (room_id.to_owned(), alias.to_owned())) .map(|(room_id, alias)| (room_id, alias.to_owned()))
.collect::<Vec<_>>() .collect::<Vec<_>>()
.await; .await;
let query_time = timer.elapsed(); let query_time = timer.elapsed();
-8
View File
@@ -101,7 +101,6 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Context
.rooms .rooms
.state_cache .state_cache
.room_servers(&room_id) .room_servers(&room_id)
.map(ToOwned::to_owned)
.collect() .collect()
.await; .await;
let query_time = timer.elapsed(); let query_time = timer.elapsed();
@@ -118,7 +117,6 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Context
.rooms .rooms
.state_cache .state_cache
.server_rooms(&server) .server_rooms(&server)
.map(ToOwned::to_owned)
.collect() .collect()
.await; .await;
let query_time = timer.elapsed(); let query_time = timer.elapsed();
@@ -135,7 +133,6 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Context
.rooms .rooms
.state_cache .state_cache
.room_members(&room_id) .room_members(&room_id)
.map(ToOwned::to_owned)
.collect() .collect()
.await; .await;
let query_time = timer.elapsed(); let query_time = timer.elapsed();
@@ -152,7 +149,6 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Context
.rooms .rooms
.state_cache .state_cache
.local_users_in_room(&room_id) .local_users_in_room(&room_id)
.map(ToOwned::to_owned)
.collect() .collect()
.await; .await;
let query_time = timer.elapsed(); let query_time = timer.elapsed();
@@ -169,7 +165,6 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Context
.rooms .rooms
.state_cache .state_cache
.active_local_users_in_room(&room_id) .active_local_users_in_room(&room_id)
.map(ToOwned::to_owned)
.collect() .collect()
.await; .await;
let query_time = timer.elapsed(); let query_time = timer.elapsed();
@@ -212,7 +207,6 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Context
.rooms .rooms
.state_cache .state_cache
.room_useroncejoined(&room_id) .room_useroncejoined(&room_id)
.map(ToOwned::to_owned)
.collect() .collect()
.await; .await;
let query_time = timer.elapsed(); let query_time = timer.elapsed();
@@ -229,7 +223,6 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Context
.rooms .rooms
.state_cache .state_cache
.room_members_invited(&room_id) .room_members_invited(&room_id)
.map(ToOwned::to_owned)
.collect() .collect()
.await; .await;
let query_time = timer.elapsed(); let query_time = timer.elapsed();
@@ -276,7 +269,6 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Context
.rooms .rooms
.state_cache .state_cache
.rooms_joined(&user_id) .rooms_joined(&user_id)
.map(ToOwned::to_owned)
.collect() .collect()
.await; .await;
let query_time = timer.elapsed(); let query_time = timer.elapsed();
+1 -4
View File
@@ -104,7 +104,6 @@ async fn get_shared_rooms(&self, user_a: OwnedUserId, user_b: OwnedUserId) -> Re
.rooms .rooms
.state_cache .state_cache
.get_shared_rooms(&user_a, &user_b) .get_shared_rooms(&user_a, &user_b)
.map(ToOwned::to_owned)
.collect() .collect()
.await; .await;
let query_time = timer.elapsed(); let query_time = timer.elapsed();
@@ -217,8 +216,7 @@ async fn iter_users2(&self) -> Result {
let result: Vec<_> = self.services.users.stream().collect().await; let result: Vec<_> = self.services.users.stream().collect().await;
let result: Vec<_> = result let result: Vec<_> = result
.into_iter() .into_iter()
.map(ruma::UserId::as_bytes) .map(|user_id| String::from_utf8_lossy(user_id.as_bytes()).into_owned())
.map(String::from_utf8_lossy)
.collect(); .collect();
let query_time = timer.elapsed(); let query_time = timer.elapsed();
@@ -254,7 +252,6 @@ async fn list_devices(&self, user_id: OwnedUserId) -> Result {
.services .services
.users .users
.all_device_ids(&user_id) .all_device_ids(&user_id)
.map(ToOwned::to_owned)
.collect::<Vec<_>>() .collect::<Vec<_>>()
.await; .await;
+3 -3
View File
@@ -3,7 +3,7 @@
use clap::Subcommand; use clap::Subcommand;
use conduwuit::{Err, Result}; use conduwuit::{Err, Result};
use futures::StreamExt; use futures::StreamExt;
use ruma::{OwnedRoomAliasId, OwnedRoomId}; use ruma::{OwnedRoomAliasId, OwnedRoomId, RoomAliasId};
use crate::Context; use crate::Context;
@@ -52,7 +52,7 @@ pub(super) async fn process(command: RoomAliasCommand, context: &Context<'_>) ->
| RoomAliasCommand::Which { ref room_alias_localpart } => { | RoomAliasCommand::Which { ref room_alias_localpart } => {
let room_alias_str = let room_alias_str =
format!("#{}:{}", room_alias_localpart, services.globals.server_name()); format!("#{}:{}", room_alias_localpart, services.globals.server_name());
let room_alias = match OwnedRoomAliasId::parse(room_alias_str) { let room_alias = match RoomAliasId::parse(room_alias_str) {
| Ok(alias) => alias, | Ok(alias) => alias,
| Err(err) => { | Err(err) => {
return Err!("Failed to parse alias: {err}"); return Err!("Failed to parse alias: {err}");
@@ -139,7 +139,7 @@ pub(super) async fn process(command: RoomAliasCommand, context: &Context<'_>) ->
.rooms .rooms
.alias .alias
.all_local_aliases() .all_local_aliases()
.map(|(room_id, localpart)| (room_id.into(), localpart.into())) .map(|(room_id, localpart)| (room_id, localpart.into()))
.collect::<Vec<(OwnedRoomId, String)>>() .collect::<Vec<(OwnedRoomId, String)>>()
.await; .await;
+4 -4
View File
@@ -22,14 +22,14 @@ pub(super) async fn list_rooms(
.metadata .metadata
.iter_ids() .iter_ids()
.filter_map(|room_id| async move { .filter_map(|room_id| async move {
(!exclude_disabled || !self.services.rooms.metadata.is_disabled(room_id).await) (!exclude_disabled || !self.services.rooms.metadata.is_disabled(&room_id).await)
.then_some(room_id) .then_some(room_id)
}) })
.filter_map(|room_id| async move { .filter_map(|room_id| async move {
(!exclude_banned || !self.services.rooms.metadata.is_banned(room_id).await) (!exclude_banned || !self.services.rooms.metadata.is_banned(&room_id).await)
.then_some(room_id) .then_some(room_id)
}) })
.then(|room_id| get_room_info(self.services, room_id)) .then(async |room_id| get_room_info(self.services, &room_id).await)
.then(|(room_id, total_members, name)| async move { .then(|(room_id, total_members, name)| async move {
let local_members: Vec<_> = self let local_members: Vec<_> = self
.services .services
@@ -72,7 +72,7 @@ pub(super) async fn list_rooms(
.collect::<Vec<_>>() .collect::<Vec<_>>()
.join("\n"); .join("\n");
self.write_str(&format!("Rooms ({}):\n```\n{body}\n```", rooms.len(),)) self.write_str(&format!("Rooms ({}):\n```\n{body}\n```", rooms.len()))
.await .await
} }
+2 -2
View File
@@ -43,7 +43,7 @@ pub(super) async fn process(command: RoomDirectoryCommand, context: &Context<'_>
.rooms .rooms
.directory .directory
.public_rooms() .public_rooms()
.then(|room_id| get_room_info(services, room_id)) .then(async |room_id| get_room_info(services, &room_id).await)
.collect() .collect()
.await; .await;
@@ -67,7 +67,7 @@ pub(super) async fn process(command: RoomDirectoryCommand, context: &Context<'_>
.join("\n"); .join("\n");
context context
.write_str(&format!("Rooms (page {page}):\n```\n{body}\n```",)) .write_str(&format!("Rooms (page {page}):\n```\n{body}\n```"))
.await .await
}, },
} }
+1 -2
View File
@@ -46,7 +46,6 @@ async fn list_joined_members(&self, room_id: OwnedRoomId, local_only: bool) -> R
.then(|| self.services.globals.user_is_local(user_id)) .then(|| self.services.globals.user_is_local(user_id))
.unwrap_or(true) .unwrap_or(true)
}) })
.map(ToOwned::to_owned)
.filter_map(|user_id| async move { .filter_map(|user_id| async move {
Some(( Some((
self.services self.services
@@ -67,7 +66,7 @@ async fn list_joined_members(&self, room_id: OwnedRoomId, local_only: bool) -> R
.collect::<Vec<_>>() .collect::<Vec<_>>()
.join("\n"); .join("\n");
self.write_str(&format!("{num} Members in Room \"{room_name}\":\n```\n{body}\n```",)) self.write_str(&format!("{num} Members in Room \"{room_name}\":\n```\n{body}\n```"))
.await .await
} }
+10 -14
View File
@@ -71,7 +71,7 @@ async fn ban_room(&self, room: OwnedRoomOrAliasId) -> Result {
debug!("Room specified is a room ID, banning room ID"); debug!("Room specified is a room ID, banning room ID");
room_id.to_owned() room_id.clone()
} else if room.is_room_alias_id() { } else if room.is_room_alias_id() {
let room_alias = match RoomAliasId::parse(&room) { let room_alias = match RoomAliasId::parse(&room) {
| Ok(room_alias) => room_alias, | Ok(room_alias) => room_alias,
@@ -89,7 +89,7 @@ async fn ban_room(&self, room: OwnedRoomOrAliasId) -> Result {
locally, if not using get_alias_helper to fetch room ID remotely" locally, if not using get_alias_helper to fetch room ID remotely"
); );
match self.services.rooms.alias.resolve_alias(room_alias).await { match self.services.rooms.alias.resolve_alias(&room_alias).await {
| Ok((room_id, servers)) => { | Ok((room_id, servers)) => {
debug!( debug!(
%room_id, %room_id,
@@ -116,7 +116,6 @@ async fn ban_room(&self, room: OwnedRoomOrAliasId) -> Result {
.rooms .rooms
.state_cache .state_cache
.room_members(&room_id) .room_members(&room_id)
.map(ToOwned::to_owned)
.ready_filter(|user| self.services.globals.user_is_local(user)) .ready_filter(|user| self.services.globals.user_is_local(user))
.boxed(); .boxed();
@@ -140,7 +139,6 @@ async fn ban_room(&self, room: OwnedRoomOrAliasId) -> Result {
.rooms .rooms
.alias .alias
.local_aliases_for_room(&room_id) .local_aliases_for_room(&room_id)
.map(ToOwned::to_owned)
.for_each(|local_alias| async move { .for_each(|local_alias| async move {
self.services self.services
.rooms .rooms
@@ -205,7 +203,7 @@ async fn ban_list_of_rooms(&self) -> Result {
}, },
}; };
room_ids.push(room_id.to_owned()); room_ids.push(room_id.clone());
} }
if room_alias_or_id.is_room_alias_id() { if room_alias_or_id.is_room_alias_id() {
@@ -215,7 +213,7 @@ async fn ban_list_of_rooms(&self) -> Result {
.services .services
.rooms .rooms
.alias .alias
.resolve_local_alias(room_alias) .resolve_local_alias(&room_alias)
.await .await
{ {
| Ok(room_id) => room_id, | Ok(room_id) => room_id,
@@ -229,7 +227,7 @@ async fn ban_list_of_rooms(&self) -> Result {
.services .services
.rooms .rooms
.alias .alias
.resolve_alias(room_alias) .resolve_alias(&room_alias)
.await .await
{ {
| Ok((room_id, servers)) => { | Ok((room_id, servers)) => {
@@ -284,7 +282,6 @@ async fn ban_list_of_rooms(&self) -> Result {
.rooms .rooms
.state_cache .state_cache
.room_members(&room_id) .room_members(&room_id)
.map(ToOwned::to_owned)
.ready_filter(|user| self.services.globals.user_is_local(user)) .ready_filter(|user| self.services.globals.user_is_local(user))
.boxed(); .boxed();
@@ -309,7 +306,6 @@ async fn ban_list_of_rooms(&self) -> Result {
.rooms .rooms
.alias .alias
.local_aliases_for_room(&room_id) .local_aliases_for_room(&room_id)
.map(ToOwned::to_owned)
.for_each(|local_alias| async move { .for_each(|local_alias| async move {
self.services self.services
.rooms .rooms
@@ -348,9 +344,9 @@ async fn unban_room(&self, room: OwnedRoomOrAliasId) -> Result {
}; };
debug!("Room specified is a room ID, unbanning room ID"); debug!("Room specified is a room ID, unbanning room ID");
self.services.rooms.metadata.ban_room(room_id, false); self.services.rooms.metadata.ban_room(&room_id, false);
room_id.to_owned() room_id.clone()
} else if room.is_room_alias_id() { } else if room.is_room_alias_id() {
let room_alias = match RoomAliasId::parse(&room) { let room_alias = match RoomAliasId::parse(&room) {
| Ok(room_alias) => room_alias, | Ok(room_alias) => room_alias,
@@ -372,7 +368,7 @@ async fn unban_room(&self, room: OwnedRoomOrAliasId) -> Result {
.services .services
.rooms .rooms
.alias .alias
.resolve_local_alias(room_alias) .resolve_local_alias(&room_alias)
.await .await
{ {
| Ok(room_id) => room_id, | Ok(room_id) => room_id,
@@ -382,7 +378,7 @@ async fn unban_room(&self, room: OwnedRoomOrAliasId) -> Result {
room ID over federation" room ID over federation"
); );
match self.services.rooms.alias.resolve_alias(room_alias).await { match self.services.rooms.alias.resolve_alias(&room_alias).await {
| Ok((room_id, servers)) => { | Ok((room_id, servers)) => {
debug!( debug!(
%room_id, %room_id,
@@ -453,6 +449,6 @@ async fn list_banned_rooms(&self, no_details: bool) -> Result {
.collect::<Vec<_>>() .collect::<Vec<_>>()
.join("\n"); .join("\n");
self.write_str(&format!("Rooms Banned ({num}):\n```\n{body}\n```",)) self.write_str(&format!("Rooms Banned ({num}):\n```\n{body}\n```"))
.await .await
} }
+2 -2
View File
@@ -159,8 +159,8 @@ pub(super) async fn list_features(&self) -> Result {
let mut enabled_features = conduwuit::info::introspection::ENABLED_FEATURES let mut enabled_features = conduwuit::info::introspection::ENABLED_FEATURES
.lock() .lock()
.expect("locked") .expect("locked")
.iter() .values()
.flat_map(|(_, f)| f.iter()) .flat_map(|f| f.iter())
.collect::<Vec<_>>(); .collect::<Vec<_>>();
enabled_features.sort_unstable(); enabled_features.sort_unstable();
+54 -62
View File
@@ -9,20 +9,18 @@
}; };
use conduwuit::{ use conduwuit::{
Err, Result, debug_warn, error, info, Err, Result, debug_warn, error, info,
matrix::{Event, pdu::PduBuilder}, matrix::{Event, pdu::PartialPdu},
utils::{self, ReadyExt}, utils::{self, ReadyExt},
warn, warn,
}; };
use futures::{FutureExt, StreamExt}; use futures::{FutureExt, StreamExt};
use lettre::Address; use lettre::Address;
use ruma::{ use ruma::{
OwnedEventId, OwnedRoomId, OwnedRoomOrAliasId, OwnedServerName, OwnedUserId, UserId, OwnedEventId, OwnedRoomId, OwnedRoomOrAliasId, OwnedServerName, OwnedUserId, ServerName,
UserId, assign,
events::{ events::{
RoomAccountDataEventType, StateEventType, RoomAccountDataEventType,
room::{ room::{power_levels::RoomPowerLevelsEventContent, redaction::RoomRedactionEventContent},
power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent},
redaction::RoomRedactionEventContent,
},
tag::{TagEvent, TagEventContent, TagInfo}, tag::{TagEvent, TagEventContent, TagInfo},
}, },
}; };
@@ -41,7 +39,7 @@ pub(super) async fn list_users(&self) -> Result {
.services .services
.users .users
.list_local_users() .list_local_users()
.map(ToString::to_string) .map(|id| id.as_str().to_owned())
.collect() .collect()
.await; .await;
@@ -103,11 +101,12 @@ pub(super) async fn create_user(&self, username: String, password: Option<String
ruma::events::GlobalAccountDataEventType::PushRules ruma::events::GlobalAccountDataEventType::PushRules
.to_string() .to_string()
.into(), .into(),
&serde_json::to_value(ruma::events::push_rules::PushRulesEvent { &serde_json::to_value(ruma::events::push_rules::PushRulesEvent::new(
content: ruma::events::push_rules::PushRulesEventContent { ruma::events::push_rules::PushRulesEventContent::new(
global: ruma::push::Ruleset::server_default(&user_id), ruma::push::Ruleset::server_default(&user_id),
}, ),
})?, ))
.unwrap(),
) )
.await?; .await?;
@@ -292,7 +291,12 @@ pub(super) async fn reset_password(
self.services self.services
.users .users
.all_device_ids(&user_id) .all_device_ids(&user_id)
.for_each(|device_id| self.services.users.remove_device(&user_id, device_id)) .for_each(async |device_id| {
self.services
.users
.remove_device(&user_id, &device_id)
.await;
})
.await; .await;
write!(self, "\nAll existing sessions have been logged out.").await?; write!(self, "\nAll existing sessions have been logged out.").await?;
} }
@@ -437,7 +441,7 @@ pub(super) async fn list_joined_rooms(&self, user_id: String) -> Result {
.rooms .rooms
.state_cache .state_cache
.rooms_joined(&user_id) .rooms_joined(&user_id)
.then(|room_id| get_room_info(self.services, room_id)) .then(async |room_id| get_room_info(self.services, &room_id).await)
.collect() .collect()
.await; .await;
@@ -454,7 +458,7 @@ pub(super) async fn list_joined_rooms(&self, user_id: String) -> Result {
.collect::<Vec<_>>() .collect::<Vec<_>>()
.join("\n"); .join("\n");
self.write_str(&format!("Rooms {user_id} Joined ({}):\n```\n{body}\n```", rooms.len(),)) self.write_str(&format!("Rooms {user_id} Joined ({}):\n```\n{body}\n```", rooms.len()))
.await .await
} }
@@ -506,7 +510,7 @@ pub(super) async fn force_join_list_of_local_users(
.rooms .rooms
.state_cache .state_cache
.room_members(&room_id) .room_members(&room_id)
.ready_any(|user_id| server_admins.contains(&user_id.to_owned())) .ready_any(|user_id| server_admins.contains(&user_id))
.await .await
{ {
return Err!("There is not a single server admin in the room.",); return Err!("There is not a single server admin in the room.",);
@@ -620,7 +624,7 @@ pub(super) async fn force_join_all_local_users(
.rooms .rooms
.state_cache .state_cache
.room_members(&room_id) .room_members(&room_id)
.ready_any(|user_id| server_admins.contains(&user_id.to_owned())) .ready_any(|user_id| server_admins.contains(&user_id))
.await .await
{ {
return Err!("There is not a single server admin in the room.",); return Err!("There is not a single server admin in the room.",);
@@ -633,7 +637,6 @@ pub(super) async fn force_join_all_local_users(
.services .services
.users .users
.list_local_users() .list_local_users()
.map(UserId::to_owned)
.collect::<Vec<_>>() .collect::<Vec<_>>()
.await .await
{ {
@@ -684,7 +687,7 @@ pub(super) async fn force_join_room(
); );
join_room_by_id_helper(self.services, &user_id, &room_id, None, &servers, &None).await?; join_room_by_id_helper(self.services, &user_id, &room_id, None, &servers, &None).await?;
self.write_str(&format!("{user_id} has been joined to {room_id}.",)) self.write_str(&format!("{user_id} has been joined to {room_id}."))
.await .await
} }
@@ -716,7 +719,7 @@ pub(super) async fn force_leave_room(
.boxed() .boxed()
.await?; .await?;
self.write_str(&format!("{user_id} has left {room_id}.",)) self.write_str(&format!("{user_id} has left {room_id}."))
.await .await
} }
@@ -730,42 +733,34 @@ pub(super) async fn force_demote(&self, user_id: String, room_id: OwnedRoomOrAli
"Parsed user_id must be a local user" "Parsed user_id must be a local user"
); );
let state_lock = self.services.rooms.state.mutex.lock(&room_id).await; let state_lock = self.services.rooms.state.mutex.lock(room_id.as_str()).await;
let room_power_levels: Option<RoomPowerLevelsEventContent> = self let mut room_power_levels = self
.services .services
.rooms .rooms
.state_accessor .state_accessor
.room_state_get_content(&room_id, &StateEventType::RoomPowerLevels, "") .get_room_power_levels(&room_id)
.await .await;
.ok();
let user_can_demote_self = room_power_levels let user_can_demote_self =
.as_ref() room_power_levels.user_can_change_user_power_level(&user_id, &user_id);
.is_some_and(|power_levels_content| {
RoomPowerLevels::from(power_levels_content.clone())
.user_can_change_user_power_level(&user_id, &user_id)
}) || self
.services
.rooms
.state_accessor
.room_state_get(&room_id, &StateEventType::RoomCreate, "")
.await
.is_ok_and(|event| event.sender() == user_id);
if !user_can_demote_self { if !user_can_demote_self {
return Err!("User is not allowed to modify their own power levels in the room.",); return Err!("User is not allowed to modify their own power levels in the room.",);
} }
let mut power_levels_content = room_power_levels.unwrap_or_default(); room_power_levels.users.remove(&user_id);
power_levels_content.users.remove(&user_id);
let event_id = self let event_id = self
.services .services
.rooms .rooms
.timeline .timeline
.build_and_append_pdu( .build_and_append_pdu(
PduBuilder::state(String::new(), &power_levels_content), PartialPdu::state(
String::new(),
&RoomPowerLevelsEventContent::try_from(room_power_levels)
.expect("PLs should be valid for room version"),
),
&user_id, &user_id,
Some(&room_id), Some(&room_id),
&state_lock, &state_lock,
@@ -793,7 +788,7 @@ pub(super) async fn make_user_admin(&self, user_id: String) -> Result {
.boxed() .boxed()
.await?; .await?;
self.write_str(&format!("{user_id} has been granted admin privileges.",)) self.write_str(&format!("{user_id} has been granted admin privileges."))
.await .await
} }
@@ -811,9 +806,7 @@ pub(super) async fn put_room_tag(
.account_data .account_data
.get_room(&room_id, &user_id, RoomAccountDataEventType::Tag) .get_room(&room_id, &user_id, RoomAccountDataEventType::Tag)
.await .await
.unwrap_or(TagEvent { .unwrap_or_else(|_| TagEvent::new(TagEventContent::new(BTreeMap::new())));
content: TagEventContent { tags: BTreeMap::new() },
});
tags_event tags_event
.content .content
@@ -850,9 +843,7 @@ pub(super) async fn delete_room_tag(
.account_data .account_data
.get_room(&room_id, &user_id, RoomAccountDataEventType::Tag) .get_room(&room_id, &user_id, RoomAccountDataEventType::Tag)
.await .await
.unwrap_or(TagEvent { .unwrap_or_else(|_| TagEvent::new(TagEventContent::new(BTreeMap::new())));
content: TagEventContent { tags: BTreeMap::new() },
});
tags_event.content.tags.remove(&tag.clone().into()); tags_event.content.tags.remove(&tag.clone().into());
@@ -882,9 +873,7 @@ pub(super) async fn get_room_tags(&self, user_id: String, room_id: OwnedRoomId)
.account_data .account_data
.get_room(&room_id, &user_id, RoomAccountDataEventType::Tag) .get_room(&room_id, &user_id, RoomAccountDataEventType::Tag)
.await .await
.unwrap_or(TagEvent { .unwrap_or_else(|_| TagEvent::new(TagEventContent::new(BTreeMap::new())));
content: TagEventContent { tags: BTreeMap::new() },
});
self.write_str(&format!("```\n{:#?}\n```", tags_event.content.tags)) self.write_str(&format!("```\n{:#?}\n```", tags_event.content.tags))
.await .await
@@ -921,19 +910,19 @@ pub(super) async fn redact_event(&self, event_id: OwnedEventId) -> Result {
.rooms .rooms
.state .state
.mutex .mutex
.lock(&event.room_id_or_hash()) .lock(event.room_id_or_hash().as_str())
.await; .await;
self.services self.services
.rooms .rooms
.timeline .timeline
.build_and_append_pdu( .build_and_append_pdu(
PduBuilder { PartialPdu {
redacts: Some(event.event_id().to_owned()), redacts: Some(event.event_id().to_owned()),
..PduBuilder::timeline(&RoomRedactionEventContent { ..PartialPdu::timeline(&assign!(RoomRedactionEventContent::new_v1(), {
redacts: Some(event.event_id().to_owned()), redacts: Some(event.event_id().to_owned()),
reason: Some(reason), reason: Some(reason),
}) }))
}, },
event.sender(), event.sender(),
Some(&event.room_id_or_hash()), Some(&event.room_id_or_hash()),
@@ -963,7 +952,7 @@ pub(super) async fn force_leave_remote_room(
.resolve_with_servers( .resolve_with_servers(
&room_id, &room_id,
if let Some(v) = via.clone() { if let Some(v) = via.clone() {
Some(vec![OwnedServerName::parse(v)?]) Some(vec![ServerName::parse(v)?])
} else { } else {
None None
}, },
@@ -976,7 +965,7 @@ pub(super) async fn force_leave_remote_room(
); );
let mut vias: HashSet<OwnedServerName> = HashSet::new(); let mut vias: HashSet<OwnedServerName> = HashSet::new();
if let Some(via) = via { if let Some(via) = via {
vias.insert(OwnedServerName::parse(via)?); vias.insert(ServerName::parse(via)?);
} }
for server in vias_raw { for server in vias_raw {
vias.insert(server); vias.insert(server);
@@ -1051,7 +1040,12 @@ pub(super) async fn logout(&self, user_id: String) -> Result {
self.services self.services
.users .users
.all_device_ids(&user_id) .all_device_ids(&user_id)
.for_each(|device_id| self.services.users.remove_device(&user_id, device_id)) .for_each(async |device_id| {
self.services
.users
.remove_device(&user_id, &device_id)
.await;
})
.await; .await;
self.write_str(&format!("User {user_id} has been logged out from all devices.")) self.write_str(&format!("User {user_id} has been logged out from all devices."))
.await .await
@@ -1129,10 +1123,8 @@ pub(super) async fn get_user_by_email(&self, email: String) -> Result {
match self.services.threepid.get_localpart_for_email(&email).await { match self.services.threepid.get_localpart_for_email(&email).await {
| Some(localpart) => { | Some(localpart) => {
let user_id = OwnedUserId::parse(format!( let user_id =
"@{localpart}:{}", UserId::parse(format!("@{localpart}:{}", self.services.globals.server_name()))
self.services.globals.server_name()
))
.unwrap(); .unwrap();
self.write_str(&format!("{email} belongs to {user_id}.")) self.write_str(&format!("{email} belongs to {user_id}."))
+2 -4
View File
@@ -29,10 +29,6 @@ gzip_compression = [
"conduwuit-service/gzip_compression", "conduwuit-service/gzip_compression",
"reqwest/gzip", "reqwest/gzip",
] ]
http3 = [
"conduwuit-core/http3",
"conduwuit-service/http3",
]
io_uring = [ io_uring = [
"conduwuit-service/io_uring", "conduwuit-service/io_uring",
] ]
@@ -92,7 +88,9 @@ lettre.workspace = true
log.workspace = true log.workspace = true
rand.workspace = true rand.workspace = true
reqwest.workspace = true reqwest.workspace = true
assign.workspace = true
ruma.workspace = true ruma.workspace = true
ruminuwuity.workspace = true
serde_html_form.workspace = true serde_html_form.workspace = true
serde_json.workspace = true serde_json.workspace = true
serde.workspace = true serde.workspace = true
+4 -7
View File
@@ -1,10 +1,8 @@
use axum::extract::State; use axum::extract::State;
use conduwuit::{Err, Result, info, utils::ReadyExt, warn}; use conduwuit::{Err, Result, info, utils::ReadyExt, warn};
use futures::{FutureExt, StreamExt}; use futures::{FutureExt, StreamExt};
use ruma::{ use ruma::{OwnedRoomAliasId, events::room::message::RoomMessageEventContent};
OwnedRoomAliasId, continuwuity_admin_api::rooms, use ruminuwuity::admin::continuwuity::rooms;
events::room::message::RoomMessageEventContent,
};
use crate::{Ruma, client::leave_room}; use crate::{Ruma, client::leave_room};
@@ -36,7 +34,6 @@ pub(crate) async fn ban_room(
.rooms .rooms
.state_cache .state_cache
.room_members(&body.room_id) .room_members(&body.room_id)
.map(ToOwned::to_owned)
.ready_filter(|user| services.globals.user_is_local(user)) .ready_filter(|user| services.globals.user_is_local(user))
.boxed(); .boxed();
let mut evicted = Vec::new(); let mut evicted = Vec::new();
@@ -63,9 +60,9 @@ pub(crate) async fn ban_room(
.rooms .rooms
.alias .alias
.local_aliases_for_room(&body.room_id) .local_aliases_for_room(&body.room_id)
.map(ToOwned::to_owned) .collect()
.collect::<Vec<_>>()
.await; .await;
for alias in &aliases { for alias in &aliases {
info!("Removing alias {} for banned room {}", alias, body.room_id); info!("Removing alias {} for banned room {}", alias, body.room_id);
services services
+4 -3
View File
@@ -1,7 +1,8 @@
use axum::extract::State; use axum::extract::State;
use conduwuit::{Err, Result}; use conduwuit::{Err, Result};
use futures::StreamExt; use futures::StreamExt;
use ruma::{OwnedRoomId, continuwuity_admin_api::rooms}; use ruma::OwnedRoomId;
use ruminuwuity::admin::continuwuity::rooms;
use crate::Ruma; use crate::Ruma;
@@ -22,8 +23,8 @@ pub(crate) async fn list_rooms(
.metadata .metadata
.iter_ids() .iter_ids()
.filter_map(|room_id| async move { .filter_map(|room_id| async move {
if !services.rooms.metadata.is_banned(room_id).await { if !services.rooms.metadata.is_banned(&room_id).await {
Some(room_id.to_owned()) Some(room_id.clone())
} else { } else {
None None
} }
+45 -69
View File
@@ -1,15 +1,15 @@
use axum::extract::State; use axum::extract::State;
use axum_client_ip::InsecureClientIp; use axum_client_ip::ClientIp;
use conduwuit::{ use conduwuit::{
Err, Event, Result, err, info, Err, Result, err, info,
pdu::PduBuilder, pdu::PartialPdu,
utils::{ReadyExt, stream::BroadbandExt}, utils::{ReadyExt, stream::BroadbandExt},
}; };
use conduwuit_service::Services; use conduwuit_service::Services;
use futures::{FutureExt, StreamExt}; use futures::{FutureExt, StreamExt};
use lettre::{Address, message::Mailbox}; use lettre::{Address, message::Mailbox};
use ruma::{ use ruma::{
OwnedRoomId, OwnedUserId, UserId, OwnedRoomId, UserId,
api::client::{ api::client::{
account::{ account::{
ThirdPartyIdRemovalStatus, change_password, check_registration_token_validity, ThirdPartyIdRemovalStatus, change_password, check_registration_token_validity,
@@ -18,12 +18,10 @@
}, },
uiaa::{AuthFlow, AuthType}, uiaa::{AuthFlow, AuthType},
}, },
events::{ assign,
StateEventType, events::room::{
room::{
member::{MembershipState, RoomMemberEventContent}, member::{MembershipState, RoomMemberEventContent},
power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent}, power_levels::RoomPowerLevelsEventContent,
},
}, },
}; };
use service::{mailer::messages, uiaa::Identity}; use service::{mailer::messages, uiaa::Identity};
@@ -48,7 +46,7 @@
#[tracing::instrument(skip_all, fields(%client), name = "register_available", level = "info")] #[tracing::instrument(skip_all, fields(%client), name = "register_available", level = "info")]
pub(crate) async fn get_register_available_route( pub(crate) async fn get_register_available_route(
State(services): State<crate::State>, State(services): State<crate::State>,
InsecureClientIp(client): InsecureClientIp, ClientIp(client): ClientIp,
body: Ruma<get_username_availability::v3::Request>, body: Ruma<get_username_availability::v3::Request>,
) -> Result<get_username_availability::v3::Response> { ) -> Result<get_username_availability::v3::Response> {
// Validate user id // Validate user id
@@ -87,7 +85,7 @@ pub(crate) async fn get_register_available_route(
return Err!(Request(Exclusive("Username is reserved by an appservice."))); return Err!(Request(Exclusive("Username is reserved by an appservice.")));
} }
Ok(get_username_availability::v3::Response { available: true }) Ok(get_username_availability::v3::Response::new(true))
} }
/// # `POST /_matrix/client/r0/account/password` /// # `POST /_matrix/client/r0/account/password`
@@ -110,7 +108,7 @@ pub(crate) async fn get_register_available_route(
#[tracing::instrument(skip_all, fields(%client), name = "change_password", level = "info")] #[tracing::instrument(skip_all, fields(%client), name = "change_password", level = "info")]
pub(crate) async fn change_password_route( pub(crate) async fn change_password_route(
State(services): State<crate::State>, State(services): State<crate::State>,
InsecureClientIp(client): InsecureClientIp, ClientIp(client): ClientIp,
body: Ruma<change_password::v3::Request>, body: Ruma<change_password::v3::Request>,
) -> Result<change_password::v3::Response> { ) -> Result<change_password::v3::Response> {
let identity = if let Some(ref user_id) = body.sender_user { let identity = if let Some(ref user_id) = body.sender_user {
@@ -143,7 +141,7 @@ pub(crate) async fn change_password_route(
.await? .await?
}; };
let sender_user = OwnedUserId::parse(format!( let sender_user = UserId::parse(format!(
"@{}:{}", "@{}:{}",
identity.localpart.expect("localpart should be known"), identity.localpart.expect("localpart should be known"),
services.globals.server_name() services.globals.server_name()
@@ -161,7 +159,7 @@ pub(crate) async fn change_password_route(
.users .users
.all_device_ids(&sender_user) .all_device_ids(&sender_user)
.ready_filter(|id| *id != body.sender_device()) .ready_filter(|id| *id != body.sender_device())
.for_each(|id| services.users.remove_device(&sender_user, id)) .for_each(async |id| services.users.remove_device(&sender_user, &id).await)
.await; .await;
// Remove all pushers except the ones associated with this session // Remove all pushers except the ones associated with this session
@@ -175,8 +173,8 @@ pub(crate) async fn change_password_route(
.get_pusher_device(&pushkey) .get_pusher_device(&pushkey)
.await .await
.ok() .ok()
.filter(|pusher_device| pusher_device != body.sender_device()) .as_ref()
.is_some() .is_some_and(|pusher_device| pusher_device != body.sender_device())
.then_some(pushkey) .then_some(pushkey)
}) })
.for_each(async |pushkey| { .for_each(async |pushkey| {
@@ -194,7 +192,7 @@ pub(crate) async fn change_password_route(
.await; .await;
} }
Ok(change_password::v3::Response {}) Ok(change_password::v3::Response::new())
} }
/// # `POST /_matrix/client/v3/account/password/email/requestToken` /// # `POST /_matrix/client/v3/account/password/email/requestToken`
@@ -215,7 +213,7 @@ pub(crate) async fn request_password_change_token_via_email_route(
}; };
let user_id = let user_id =
OwnedUserId::parse(format!("@{localpart}:{}", services.globals.server_name())).unwrap(); UserId::parse(format!("@{localpart}:{}", services.globals.server_name())).unwrap();
let display_name = services.users.displayname(&user_id).await.ok(); let display_name = services.users.displayname(&user_id).await.ok();
let session = services let session = services
@@ -251,11 +249,10 @@ pub(crate) async fn whoami_route(
.map_err(|_| { .map_err(|_| {
err!(Request(Forbidden("Application service has not registered this user."))) err!(Request(Forbidden("Application service has not registered this user.")))
})? && body.appservice_info.is_none(); })? && body.appservice_info.is_none();
Ok(whoami::v3::Response {
user_id: body.sender_user().to_owned(), Ok(assign!(whoami::v3::Response::new(body.sender_user().to_owned(), is_guest), {
device_id: body.sender_device.clone(), device_id: body.sender_device.clone(),
is_guest, }))
})
} }
/// # `POST /_matrix/client/r0/account/deactivate` /// # `POST /_matrix/client/r0/account/deactivate`
@@ -272,7 +269,7 @@ pub(crate) async fn whoami_route(
#[tracing::instrument(skip_all, fields(%client), name = "deactivate", level = "info")] #[tracing::instrument(skip_all, fields(%client), name = "deactivate", level = "info")]
pub(crate) async fn deactivate_route( pub(crate) async fn deactivate_route(
State(services): State<crate::State>, State(services): State<crate::State>,
InsecureClientIp(client): InsecureClientIp, ClientIp(client): ClientIp,
body: Ruma<deactivate::v3::Request>, body: Ruma<deactivate::v3::Request>,
) -> Result<deactivate::v3::Response> { ) -> Result<deactivate::v3::Response> {
// Authentication for this endpoint is technically optional, // Authentication for this endpoint is technically optional,
@@ -310,9 +307,7 @@ pub(crate) async fn deactivate_route(
.await; .await;
} }
Ok(deactivate::v3::Response { Ok(deactivate::v3::Response::new(ThirdPartyIdRemovalStatus::Success))
id_server_unbind_result: ThirdPartyIdRemovalStatus::Success,
})
} }
/// # `GET /_matrix/client/v1/register/m.login.registration_token/validity` /// # `GET /_matrix/client/v1/register/m.login.registration_token/validity`
@@ -330,7 +325,7 @@ pub(crate) async fn check_registration_token_validity(
.await .await
.is_some(); .is_some();
Ok(check_registration_token_validity::v1::Response { valid }) Ok(check_registration_token_validity::v1::Response::new(valid))
} }
/// Runs through all the deactivation steps: /// Runs through all the deactivation steps:
@@ -354,13 +349,7 @@ pub async fn full_user_deactivate(
.await; .await;
} }
services services.users.clear_profile(user_id).await;
.users
.all_profile_keys(user_id)
.ready_for_each(|(profile_key, _)| {
services.users.set_profile_key(user_id, &profile_key, None);
})
.await;
services services
.pusher .pusher
@@ -372,62 +361,49 @@ pub async fn full_user_deactivate(
// TODO: Rescind all user invites // TODO: Rescind all user invites
let mut pdu_queue: Vec<(PduBuilder, &OwnedRoomId)> = Vec::new(); let mut pdu_queue: Vec<(PartialPdu, &OwnedRoomId)> = Vec::new();
for room_id in all_joined_rooms { for room_id in all_joined_rooms {
let room_power_levels = services let room_power_levels = services
.rooms .rooms
.state_accessor .state_accessor
.room_state_get_content::<RoomPowerLevelsEventContent>( .get_room_power_levels(room_id)
room_id, .await;
&StateEventType::RoomPowerLevels,
"",
)
.await
.ok();
let user_can_demote_self = let user_can_demote_self =
room_power_levels room_power_levels.user_can_change_user_power_level(user_id, user_id);
.as_ref()
.is_some_and(|power_levels_content| {
RoomPowerLevels::from(power_levels_content.clone())
.user_can_change_user_power_level(user_id, user_id)
}) || services
.rooms
.state_accessor
.room_state_get(room_id, &StateEventType::RoomCreate, "")
.await
.is_ok_and(|event| event.sender() == user_id);
if user_can_demote_self { if user_can_demote_self
let mut power_levels_content = room_power_levels.unwrap_or_default(); && let Ok(mut power_levels_content) =
RoomPowerLevelsEventContent::try_from(room_power_levels)
{
power_levels_content.users.remove(user_id); power_levels_content.users.remove(user_id);
let pl_evt = PduBuilder::state(String::new(), &power_levels_content); let pl_evt = PartialPdu::state(String::new(), &power_levels_content);
pdu_queue.push((pl_evt, room_id)); pdu_queue.push((pl_evt, room_id));
} }
// Leave the room // Leave the room
pdu_queue.push(( pdu_queue.push((
PduBuilder::state(user_id.to_string(), &RoomMemberEventContent { PartialPdu::state(
avatar_url: None, user_id.to_string(),
blurhash: None, &RoomMemberEventContent::new(MembershipState::Leave),
membership: MembershipState::Leave, ),
displayname: None,
join_authorized_via_users_server: None,
reason: None,
is_direct: None,
third_party_invite: None,
redact_events: None,
}),
room_id, room_id,
)); ));
// TODO: Redact all messages sent by the user in the room // TODO: Redact all messages sent by the user in the room
} }
super::update_all_rooms(services, pdu_queue, user_id) for (pdu, room_id) in pdu_queue {
.boxed() let state_lock = services.rooms.state.mutex.lock(room_id.as_str()).await;
let _ = services
.rooms
.timeline
.build_and_append_pdu(pdu, user_id, Some(room_id.as_ref()), &state_lock)
.await; .await;
}
for room_id in all_joined_rooms { for room_id in all_joined_rooms {
services.rooms.state_cache.forget(room_id, user_id); services.rooms.state_cache.forget(room_id, user_id);
} }
+17 -20
View File
@@ -1,7 +1,7 @@
use std::{collections::HashMap, fmt::Write}; use std::{collections::HashMap, fmt::Write};
use axum::extract::State; use axum::extract::State;
use axum_client_ip::InsecureClientIp; use axum_client_ip::ClientIp;
use conduwuit::{ use conduwuit::{
Err, Result, debug_info, error, info, Err, Result, debug_info, error, info,
utils::{self}, utils::{self},
@@ -20,7 +20,11 @@
}, },
uiaa::{AuthFlow, AuthType}, uiaa::{AuthFlow, AuthType},
}, },
events::{GlobalAccountDataEventType, room::message::RoomMessageEventContent}, assign,
events::{
GlobalAccountDataEventType, push_rules::PushRulesEvent,
room::message::RoomMessageEventContent,
},
push, push,
}; };
use serde_json::value::RawValue; use serde_json::value::RawValue;
@@ -52,7 +56,7 @@
#[tracing::instrument(skip_all, fields(%client), name = "register", level = "info")] #[tracing::instrument(skip_all, fields(%client), name = "register", level = "info")]
pub(crate) async fn register_route( pub(crate) async fn register_route(
State(services): State<crate::State>, State(services): State<crate::State>,
InsecureClientIp(client): InsecureClientIp, ClientIp(client): ClientIp,
body: Ruma<register::v3::Request>, body: Ruma<register::v3::Request>,
) -> Result<register::v3::Response> { ) -> Result<register::v3::Response> {
let is_guest = body.kind == RegistrationKind::Guest; let is_guest = body.kind == RegistrationKind::Guest;
@@ -209,23 +213,15 @@ pub(crate) async fn register_route(
None, None,
&user_id, &user_id,
GlobalAccountDataEventType::PushRules.to_string().into(), GlobalAccountDataEventType::PushRules.to_string().into(),
&serde_json::to_value(ruma::events::push_rules::PushRulesEvent { &serde_json::to_value(PushRulesEvent::new(
content: ruma::events::push_rules::PushRulesEventContent { push::Ruleset::server_default(&user_id).into(),
global: push::Ruleset::server_default(&user_id), ))
}, .expect("should be able to serialize push rules"),
})?,
) )
.await?; .await?;
// Generate new device id if the user didn't specify one // Generate new device id if the user didn't specify one
let no_device = body.inhibit_login let (token, device) = if !body.inhibit_login {
|| body
.appservice_info
.as_ref()
.is_some_and(|aps| aps.registration.device_management);
let (token, device) = if !no_device {
// Don't create a device for inhibited logins
let device_id = if is_guest { None } else { body.device_id.clone() } let device_id = if is_guest { None } else { body.device_id.clone() }
.unwrap_or_else(|| utils::random_string(DEVICE_ID_LENGTH).into()); .unwrap_or_else(|| utils::random_string(DEVICE_ID_LENGTH).into());
@@ -243,12 +239,14 @@ pub(crate) async fn register_route(
Some(client.to_string()), Some(client.to_string()),
) )
.await?; .await?;
debug_info!(%user_id, %device_id, "User account was created");
(Some(new_token), Some(device_id)) (Some(new_token), Some(device_id))
} else { } else {
// Don't create a device for inhibited logins
(None, None) (None, None)
}; };
debug_info!(%user_id, ?device, "User account was created");
// If the user registered with an email, associate it with their account. // If the user registered with an email, associate it with their account.
if let Some(identity) = identity if let Some(identity) = identity
&& let Some(email) = identity.email && let Some(email) = identity.email
@@ -393,13 +391,12 @@ pub(crate) async fn register_route(
} }
} }
Ok(register::v3::Response { Ok(assign!(register::v3::Response::new(user_id), {
access_token: token, access_token: token,
user_id,
device_id: device, device_id: device,
refresh_token: None, refresh_token: None,
expires_in: None, expires_in: None,
}) }))
} }
/// Determine which flows and parameters should be presented when /// Determine which flows and parameters should be presented when
+2 -6
View File
@@ -141,9 +141,7 @@ pub(crate) async fn delete_3pid_route(
let sender_user = body.sender_user(); let sender_user = body.sender_user();
if body.medium != Medium::Email { if body.medium != Medium::Email {
return Ok(delete_3pid::v3::Response { return Ok(delete_3pid::v3::Response::new(ThirdPartyIdRemovalStatus::NoSupport));
id_server_unbind_result: ThirdPartyIdRemovalStatus::NoSupport,
});
} }
if !services.threepid.email_requirement().may_remove() { if !services.threepid.email_requirement().may_remove() {
@@ -159,7 +157,5 @@ pub(crate) async fn delete_3pid_route(
return Err!(Request(ThreepidNotFound("Your account has no associated email."))); return Err!(Request(ThreepidNotFound("Your account has no associated email.")));
} }
Ok(delete_3pid::v3::Response { Ok(delete_3pid::v3::Response::new(ThirdPartyIdRemovalStatus::Success))
id_server_unbind_result: ThirdPartyIdRemovalStatus::Success,
})
} }
+6 -9
View File
@@ -7,10 +7,7 @@
get_global_account_data, get_room_account_data, set_global_account_data, get_global_account_data, get_room_account_data, set_global_account_data,
set_room_account_data, set_room_account_data,
}, },
events::{ events::{AnyGlobalAccountDataEventContent, AnyRoomAccountDataEventContent},
AnyGlobalAccountDataEventContent, AnyRoomAccountDataEventContent,
RoomAccountDataEventType,
},
serde::Raw, serde::Raw,
}; };
use serde::Deserialize; use serde::Deserialize;
@@ -40,7 +37,7 @@ pub(crate) async fn set_global_account_data_route(
) )
.await?; .await?;
Ok(set_global_account_data::v3::Response {}) Ok(set_global_account_data::v3::Response::new())
} }
/// # `PUT /_matrix/client/r0/user/{userId}/rooms/{roomId}/account_data/{type}` /// # `PUT /_matrix/client/r0/user/{userId}/rooms/{roomId}/account_data/{type}`
@@ -65,7 +62,7 @@ pub(crate) async fn set_room_account_data_route(
) )
.await?; .await?;
Ok(set_room_account_data::v3::Response {}) Ok(set_room_account_data::v3::Response::new())
} }
/// # `GET /_matrix/client/r0/user/{userId}/account_data/{type}` /// # `GET /_matrix/client/r0/user/{userId}/account_data/{type}`
@@ -87,7 +84,7 @@ pub(crate) async fn get_global_account_data_route(
.await .await
.map_err(|_| err!(Request(NotFound("Data not found."))))?; .map_err(|_| err!(Request(NotFound("Data not found."))))?;
Ok(get_global_account_data::v3::Response { account_data: account_data.content }) Ok(get_global_account_data::v3::Response::new(account_data.content))
} }
/// # `GET /_matrix/client/r0/user/{userId}/rooms/{roomId}/account_data/{type}` /// # `GET /_matrix/client/r0/user/{userId}/rooms/{roomId}/account_data/{type}`
@@ -109,7 +106,7 @@ pub(crate) async fn get_room_account_data_route(
.await .await
.map_err(|_| err!(Request(NotFound("Data not found."))))?; .map_err(|_| err!(Request(NotFound("Data not found."))))?;
Ok(get_room_account_data::v3::Response { account_data: account_data.content }) Ok(get_room_account_data::v3::Response::new(account_data.content))
} }
async fn set_account_data( async fn set_account_data(
@@ -119,7 +116,7 @@ async fn set_account_data(
event_type_s: &str, event_type_s: &str,
data: &RawJsonValue, data: &RawJsonValue,
) -> Result { ) -> Result {
if event_type_s == RoomAccountDataEventType::FullyRead.to_cow_str() { if event_type_s == "m.fully_read" {
return Err!(Request(BadJson( return Err!(Request(BadJson(
"This endpoint cannot be used for marking a room as fully read (setting \ "This endpoint cannot be used for marking a room as fully read (setting \
m.fully_read)" m.fully_read)"
+1 -1
View File
@@ -1,7 +1,7 @@
use axum::extract::State; use axum::extract::State;
use conduwuit::{Err, Result}; use conduwuit::{Err, Result};
use futures::future::{join, join3}; use futures::future::{join, join3};
use ruma::api::client::admin::{get_suspended, set_suspended}; use ruminuwuity::admin::{get_suspended, set_suspended};
use crate::Ruma; use crate::Ruma;
+7 -4
View File
@@ -1,6 +1,9 @@
use axum::extract::State; use axum::extract::State;
use conduwuit::{Err, Result, err}; use conduwuit::{Err, Result, err};
use ruma::api::{appservice::ping, client::appservice::request_ping}; use ruma::{
api::{appservice::ping, client::appservice::request_ping},
assign,
};
use crate::Ruma; use crate::Ruma;
@@ -40,12 +43,12 @@ pub(crate) async fn appservice_ping(
.sending .sending
.send_appservice_request( .send_appservice_request(
appservice_info.registration.clone(), appservice_info.registration.clone(),
ping::send_ping::v1::Request { assign!(ping::send_ping::v1::Request::new(), {
transaction_id: body.transaction_id.clone(), transaction_id: body.transaction_id.clone(),
}, }),
) )
.await? .await?
.expect("We already validated if an appservice URL exists above"); .expect("We already validated if an appservice URL exists above");
Ok(request_ping::v1::Response { duration: timer.elapsed() }) Ok(request_ping::v1::Response::new(timer.elapsed()))
} }
+29 -33
View File
@@ -3,7 +3,6 @@
use axum::extract::State; use axum::extract::State;
use conduwuit::{Err, Result, err}; use conduwuit::{Err, Result, err};
use conduwuit_service::Services; use conduwuit_service::Services;
use futures::{FutureExt, future::try_join};
use ruma::{ use ruma::{
UInt, UserId, UInt, UserId,
api::client::backup::{ api::client::backup::{
@@ -28,7 +27,7 @@ pub(crate) async fn create_backup_version_route(
.key_backups .key_backups
.create_backup(body.sender_user(), &body.algorithm)?; .create_backup(body.sender_user(), &body.algorithm)?;
Ok(create_backup_version::v3::Response { version }) Ok(create_backup_version::v3::Response::new(version))
} }
/// # `PUT /_matrix/client/r0/room_keys/version/{version}` /// # `PUT /_matrix/client/r0/room_keys/version/{version}`
@@ -44,7 +43,7 @@ pub(crate) async fn update_backup_version_route(
.update_backup(body.sender_user(), &body.version, &body.algorithm) .update_backup(body.sender_user(), &body.version, &body.algorithm)
.await?; .await?;
Ok(update_backup_version::v3::Response {}) Ok(update_backup_version::v3::Response::new())
} }
/// # `GET /_matrix/client/r0/room_keys/version` /// # `GET /_matrix/client/r0/room_keys/version`
@@ -60,9 +59,9 @@ pub(crate) async fn get_latest_backup_info_route(
.await .await
.map_err(|_| err!(Request(NotFound("Key backup does not exist."))))?; .map_err(|_| err!(Request(NotFound("Key backup does not exist."))))?;
let (count, etag) = get_count_etag(&services, body.sender_user(), &version).await?; let (count, etag) = get_count_etag(&services, body.sender_user(), &version).await;
Ok(get_latest_backup_info::v3::Response { algorithm, count, etag, version }) Ok(get_latest_backup_info::v3::Response::new(algorithm, count, etag, version))
} }
/// # `GET /_matrix/client/v3/room_keys/version/{version}` /// # `GET /_matrix/client/v3/room_keys/version/{version}`
@@ -80,14 +79,9 @@ pub(crate) async fn get_backup_info_route(
err!(Request(NotFound("Key backup does not exist at version {:?}", body.version))) err!(Request(NotFound("Key backup does not exist at version {:?}", body.version)))
})?; })?;
let (count, etag) = get_count_etag(&services, body.sender_user(), &body.version).await?; let (count, etag) = get_count_etag(&services, body.sender_user(), &body.version).await;
Ok(get_backup_info::v3::Response { Ok(get_backup_info::v3::Response::new(algorithm, count, etag, body.version.clone()))
algorithm,
count,
etag,
version: body.version.clone(),
})
} }
/// # `DELETE /_matrix/client/r0/room_keys/version/{version}` /// # `DELETE /_matrix/client/r0/room_keys/version/{version}`
@@ -105,7 +99,7 @@ pub(crate) async fn delete_backup_version_route(
.delete_backup(body.sender_user(), &body.version) .delete_backup(body.sender_user(), &body.version)
.await; .await;
Ok(delete_backup_version::v3::Response {}) Ok(delete_backup_version::v3::Response::new())
} }
/// # `PUT /_matrix/client/r0/room_keys/keys` /// # `PUT /_matrix/client/r0/room_keys/keys`
@@ -140,9 +134,9 @@ pub(crate) async fn add_backup_keys_route(
} }
} }
let (count, etag) = get_count_etag(&services, body.sender_user(), &body.version).await?; let (count, etag) = get_count_etag(&services, body.sender_user(), &body.version).await;
Ok(add_backup_keys::v3::Response { count, etag }) Ok(add_backup_keys::v3::Response::new(etag, count))
} }
/// # `PUT /_matrix/client/r0/room_keys/keys/{roomId}` /// # `PUT /_matrix/client/r0/room_keys/keys/{roomId}`
@@ -175,9 +169,9 @@ pub(crate) async fn add_backup_keys_for_room_route(
.await?; .await?;
} }
let (count, etag) = get_count_etag(&services, body.sender_user(), &body.version).await?; let (count, etag) = get_count_etag(&services, body.sender_user(), &body.version).await;
Ok(add_backup_keys_for_room::v3::Response { count, etag }) Ok(add_backup_keys_for_room::v3::Response::new(etag, count))
} }
/// # `PUT /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}` /// # `PUT /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}`
@@ -275,9 +269,9 @@ pub(crate) async fn add_backup_keys_for_session_route(
.await?; .await?;
} }
let (count, etag) = get_count_etag(&services, body.sender_user(), &body.version).await?; let (count, etag) = get_count_etag(&services, body.sender_user(), &body.version).await;
Ok(add_backup_keys_for_session::v3::Response { count, etag }) Ok(add_backup_keys_for_session::v3::Response::new(etag, count))
} }
/// # `GET /_matrix/client/r0/room_keys/keys` /// # `GET /_matrix/client/r0/room_keys/keys`
@@ -292,7 +286,7 @@ pub(crate) async fn get_backup_keys_route(
.get_all(body.sender_user(), &body.version) .get_all(body.sender_user(), &body.version)
.await; .await;
Ok(get_backup_keys::v3::Response { rooms }) Ok(get_backup_keys::v3::Response::new(rooms))
} }
/// # `GET /_matrix/client/r0/room_keys/keys/{roomId}` /// # `GET /_matrix/client/r0/room_keys/keys/{roomId}`
@@ -307,7 +301,7 @@ pub(crate) async fn get_backup_keys_for_room_route(
.get_room(body.sender_user(), &body.version, &body.room_id) .get_room(body.sender_user(), &body.version, &body.room_id)
.await; .await;
Ok(get_backup_keys_for_room::v3::Response { sessions }) Ok(get_backup_keys_for_room::v3::Response::new(sessions))
} }
/// # `GET /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}` /// # `GET /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}`
@@ -325,7 +319,7 @@ pub(crate) async fn get_backup_keys_for_session_route(
err!(Request(NotFound(debug_error!("Backup key not found for this user's session.")))) err!(Request(NotFound(debug_error!("Backup key not found for this user's session."))))
})?; })?;
Ok(get_backup_keys_for_session::v3::Response { key_data }) Ok(get_backup_keys_for_session::v3::Response::new(key_data))
} }
/// # `DELETE /_matrix/client/r0/room_keys/keys` /// # `DELETE /_matrix/client/r0/room_keys/keys`
@@ -340,9 +334,9 @@ pub(crate) async fn delete_backup_keys_route(
.delete_all_keys(body.sender_user(), &body.version) .delete_all_keys(body.sender_user(), &body.version)
.await; .await;
let (count, etag) = get_count_etag(&services, body.sender_user(), &body.version).await?; let (count, etag) = get_count_etag(&services, body.sender_user(), &body.version).await;
Ok(delete_backup_keys::v3::Response { count, etag }) Ok(delete_backup_keys::v3::Response::new(etag, count))
} }
/// # `DELETE /_matrix/client/r0/room_keys/keys/{roomId}` /// # `DELETE /_matrix/client/r0/room_keys/keys/{roomId}`
@@ -357,9 +351,9 @@ pub(crate) async fn delete_backup_keys_for_room_route(
.delete_room_keys(body.sender_user(), &body.version, &body.room_id) .delete_room_keys(body.sender_user(), &body.version, &body.room_id)
.await; .await;
let (count, etag) = get_count_etag(&services, body.sender_user(), &body.version).await?; let (count, etag) = get_count_etag(&services, body.sender_user(), &body.version).await;
Ok(delete_backup_keys_for_room::v3::Response { count, etag }) Ok(delete_backup_keys_for_room::v3::Response::new(etag, count))
} }
/// # `DELETE /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}` /// # `DELETE /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}`
@@ -374,22 +368,24 @@ pub(crate) async fn delete_backup_keys_for_session_route(
.delete_room_key(body.sender_user(), &body.version, &body.room_id, &body.session_id) .delete_room_key(body.sender_user(), &body.version, &body.room_id, &body.session_id)
.await; .await;
let (count, etag) = get_count_etag(&services, body.sender_user(), &body.version).await?; let (count, etag) = get_count_etag(&services, body.sender_user(), &body.version).await;
Ok(delete_backup_keys_for_session::v3::Response { count, etag }) Ok(delete_backup_keys_for_session::v3::Response::new(etag, count))
} }
async fn get_count_etag( async fn get_count_etag(
services: &Services, services: &Services,
sender_user: &UserId, sender_user: &UserId,
version: &str, version: &str,
) -> Result<(UInt, String)> { ) -> (UInt, String) {
let count = services let count: UInt = services
.key_backups .key_backups
.count_keys(sender_user, version) .count_keys(sender_user, version)
.map(TryInto::try_into); .await
.try_into()
.expect("number of keys should fit into a UInt");
let etag = services.key_backups.get_etag(sender_user, version).map(Ok); let etag = services.key_backups.get_etag(sender_user, version).await;
Ok(try_join(count, etag).await?) (count, etag)
} }
+13 -12
View File
@@ -5,8 +5,11 @@
use ruma::{ use ruma::{
RoomVersionId, RoomVersionId,
api::client::discovery::get_capabilities::{ api::client::discovery::get_capabilities::{
self, Capabilities, GetLoginTokenCapability, RoomVersionStability, self,
RoomVersionsCapability, ThirdPartyIdChangesCapability, v3::{
Capabilities, GetLoginTokenCapability, RoomVersionStability, RoomVersionsCapability,
ThirdPartyIdChangesCapability,
},
}, },
}; };
use serde_json::json; use serde_json::json;
@@ -25,19 +28,17 @@ pub(crate) async fn get_capabilities_route(
Server::available_room_versions().collect(); Server::available_room_versions().collect();
let mut capabilities = Capabilities::default(); let mut capabilities = Capabilities::default();
capabilities.room_versions = RoomVersionsCapability { capabilities.room_versions = RoomVersionsCapability::new(
services.server.config.default_room_version.clone(),
available, available,
default: services.server.config.default_room_version.clone(), );
};
// Only allow 3pid changes if SMTP is configured // Only allow 3pid changes if SMTP is configured
capabilities.thirdparty_id_changes = ThirdPartyIdChangesCapability { capabilities.thirdparty_id_changes =
enabled: services.threepid.email_requirement().may_change(), ThirdPartyIdChangesCapability::new(services.threepid.email_requirement().may_change());
};
capabilities.get_login_token = GetLoginTokenCapability { capabilities.get_login_token =
enabled: services.server.config.login_via_existing_session, GetLoginTokenCapability::new(services.server.config.login_via_existing_session);
};
// MSC4133 capability // MSC4133 capability
capabilities.set("uk.tcpip.msc4133.profile_fields", json!({"enabled": true}))?; capabilities.set("uk.tcpip.msc4133.profile_fields", json!({"enabled": true}))?;
@@ -56,5 +57,5 @@ pub(crate) async fn get_capabilities_route(
capabilities.set("uk.timedout.msc4323", json!({"suspend": true, "lock": false}))?; capabilities.set("uk.timedout.msc4323", json!({"suspend": true, "lock": false}))?;
} }
Ok(get_capabilities::v3::Response { capabilities }) Ok(get_capabilities::v3::Response::new(capabilities))
} }
+5 -3
View File
@@ -12,7 +12,9 @@
FutureExt, StreamExt, TryFutureExt, TryStreamExt, FutureExt, StreamExt, TryFutureExt, TryStreamExt,
future::{OptionFuture, join, join3, try_join3}, future::{OptionFuture, join, join3, try_join3},
}; };
use ruma::{OwnedEventId, UserId, api::client::context::get_context, events::StateEventType}; use ruma::{
OwnedEventId, UserId, api::client::context::get_context, assign, events::StateEventType,
};
use crate::{ use crate::{
Ruma, Ruma,
@@ -213,7 +215,7 @@ pub(crate) async fn get_context_route(
.collect() .collect()
.await; .await;
Ok(get_context::v3::Response { Ok(assign!(get_context::v3::Response::new(), {
event: base_event.map(at!(1)).map(Event::into_format), event: base_event.map(at!(1)).map(Event::into_format),
start: events_before start: events_before
@@ -243,5 +245,5 @@ pub(crate) async fn get_context_route(
.collect(), .collect(),
state, state,
}) }))
} }
+16 -16
View File
@@ -1,11 +1,15 @@
use axum::extract::State; use axum::extract::State;
use axum_client_ip::InsecureClientIp; use axum_client_ip::ClientIp;
use conduwuit::{Err, Result, at}; use conduwuit::{Err, Result, at};
use futures::StreamExt; use futures::StreamExt;
use ruma::api::client::dehydrated_device::{ use ruma::{
api::client::dehydrated_device::{
delete_dehydrated_device::unstable as delete_dehydrated_device, delete_dehydrated_device::unstable as delete_dehydrated_device,
get_dehydrated_device::unstable as get_dehydrated_device, get_events::unstable as get_events, get_dehydrated_device::unstable as get_dehydrated_device,
get_events::unstable as get_events,
put_dehydrated_device::unstable as put_dehydrated_device, put_dehydrated_device::unstable as put_dehydrated_device,
},
assign,
}; };
use crate::Ruma; use crate::Ruma;
@@ -18,7 +22,7 @@
#[tracing::instrument(skip_all, fields(%client))] #[tracing::instrument(skip_all, fields(%client))]
pub(crate) async fn put_dehydrated_device_route( pub(crate) async fn put_dehydrated_device_route(
State(services): State<crate::State>, State(services): State<crate::State>,
InsecureClientIp(client): InsecureClientIp, ClientIp(client): ClientIp,
body: Ruma<put_dehydrated_device::Request>, body: Ruma<put_dehydrated_device::Request>,
) -> Result<put_dehydrated_device::Response> { ) -> Result<put_dehydrated_device::Response> {
let sender_user = body let sender_user = body
@@ -33,7 +37,7 @@ pub(crate) async fn put_dehydrated_device_route(
.set_dehydrated_device(sender_user, body.body) .set_dehydrated_device(sender_user, body.body)
.await?; .await?;
Ok(put_dehydrated_device::Response { device_id }) Ok(put_dehydrated_device::Response::new(device_id))
} }
/// # `DELETE /_matrix/client/../dehydrated_device` /// # `DELETE /_matrix/client/../dehydrated_device`
@@ -42,7 +46,7 @@ pub(crate) async fn put_dehydrated_device_route(
#[tracing::instrument(skip_all, fields(%client))] #[tracing::instrument(skip_all, fields(%client))]
pub(crate) async fn delete_dehydrated_device_route( pub(crate) async fn delete_dehydrated_device_route(
State(services): State<crate::State>, State(services): State<crate::State>,
InsecureClientIp(client): InsecureClientIp, ClientIp(client): ClientIp,
body: Ruma<delete_dehydrated_device::Request>, body: Ruma<delete_dehydrated_device::Request>,
) -> Result<delete_dehydrated_device::Response> { ) -> Result<delete_dehydrated_device::Response> {
let sender_user = body.sender_user(); let sender_user = body.sender_user();
@@ -51,7 +55,7 @@ pub(crate) async fn delete_dehydrated_device_route(
services.users.remove_device(sender_user, &device_id).await; services.users.remove_device(sender_user, &device_id).await;
Ok(delete_dehydrated_device::Response { device_id }) Ok(delete_dehydrated_device::Response::new(device_id))
} }
/// # `GET /_matrix/client/../dehydrated_device` /// # `GET /_matrix/client/../dehydrated_device`
@@ -60,17 +64,14 @@ pub(crate) async fn delete_dehydrated_device_route(
#[tracing::instrument(skip_all, fields(%client))] #[tracing::instrument(skip_all, fields(%client))]
pub(crate) async fn get_dehydrated_device_route( pub(crate) async fn get_dehydrated_device_route(
State(services): State<crate::State>, State(services): State<crate::State>,
InsecureClientIp(client): InsecureClientIp, ClientIp(client): ClientIp,
body: Ruma<get_dehydrated_device::Request>, body: Ruma<get_dehydrated_device::Request>,
) -> Result<get_dehydrated_device::Response> { ) -> Result<get_dehydrated_device::Response> {
let sender_user = body.sender_user(); let sender_user = body.sender_user();
let device = services.users.get_dehydrated_device(sender_user).await?; let device = services.users.get_dehydrated_device(sender_user).await?;
Ok(get_dehydrated_device::Response { Ok(get_dehydrated_device::Response::new(device.device_id, device.device_data))
device_id: device.device_id,
device_data: device.device_data,
})
} }
/// # `GET /_matrix/client/../dehydrated_device/{device_id}/events` /// # `GET /_matrix/client/../dehydrated_device/{device_id}/events`
@@ -79,7 +80,7 @@ pub(crate) async fn get_dehydrated_device_route(
#[tracing::instrument(skip_all, fields(%client))] #[tracing::instrument(skip_all, fields(%client))]
pub(crate) async fn get_dehydrated_events_route( pub(crate) async fn get_dehydrated_events_route(
State(services): State<crate::State>, State(services): State<crate::State>,
InsecureClientIp(client): InsecureClientIp, ClientIp(client): ClientIp,
body: Ruma<get_events::Request>, body: Ruma<get_events::Request>,
) -> Result<get_events::Response> { ) -> Result<get_events::Response> {
let sender_user = body.sender_user(); let sender_user = body.sender_user();
@@ -114,8 +115,7 @@ pub(crate) async fn get_dehydrated_events_route(
.collect() .collect()
.await; .await;
Ok(get_events::Response { Ok(assign!(get_events::Response::new(events), {
events,
next_batch: next_batch.as_ref().map(ToString::to_string), next_batch: next_batch.as_ref().map(ToString::to_string),
}) }))
} }
+17 -40
View File
@@ -1,5 +1,5 @@
use axum::extract::State; use axum::extract::State;
use axum_client_ip::InsecureClientIp; use axum_client_ip::ClientIp;
use conduwuit::{Err, Result, debug, err, utils}; use conduwuit::{Err, Result, debug, err, utils};
use futures::StreamExt; use futures::StreamExt;
use ruma::{ use ruma::{
@@ -25,7 +25,7 @@ pub(crate) async fn get_devices_route(
.collect() .collect()
.await; .await;
Ok(get_devices::v3::Response { devices }) Ok(get_devices::v3::Response::new(devices))
} }
/// # `GET /_matrix/client/r0/devices/{deviceId}` /// # `GET /_matrix/client/r0/devices/{deviceId}`
@@ -41,7 +41,7 @@ pub(crate) async fn get_device_route(
.await .await
.map_err(|_| err!(Request(NotFound("Device not found."))))?; .map_err(|_| err!(Request(NotFound("Device not found."))))?;
Ok(get_device::v3::Response { device }) Ok(get_device::v3::Response::new(device))
} }
/// # `PUT /_matrix/client/r0/devices/{deviceId}` /// # `PUT /_matrix/client/r0/devices/{deviceId}`
@@ -50,7 +50,7 @@ pub(crate) async fn get_device_route(
#[tracing::instrument(skip_all, fields(%client), name = "update_device", level = "debug")] #[tracing::instrument(skip_all, fields(%client), name = "update_device", level = "debug")]
pub(crate) async fn update_device_route( pub(crate) async fn update_device_route(
State(services): State<crate::State>, State(services): State<crate::State>,
InsecureClientIp(client): InsecureClientIp, ClientIp(client): ClientIp,
body: Ruma<update_device::v3::Request>, body: Ruma<update_device::v3::Request>,
) -> Result<update_device::v3::Response> { ) -> Result<update_device::v3::Response> {
let sender_user = body.sender_user(); let sender_user = body.sender_user();
@@ -73,19 +73,16 @@ pub(crate) async fn update_device_route(
.update_device_metadata(sender_user, &body.device_id, &device) .update_device_metadata(sender_user, &body.device_id, &device)
.await?; .await?;
Ok(update_device::v3::Response {}) Ok(update_device::v3::Response::new())
}, },
| Err(_) => { | Err(_) => {
let Some(appservice) = appservice else { let Some(appservice) = appservice else {
return Err!(Request(NotFound("Device not found."))); return Err!(Request(NotFound("Device not found.")));
}; };
if !appservice.registration.device_management {
return Err!(Request(NotFound("Device not found.")));
}
debug!( debug!(
"Creating new device for {sender_user} from appservice {} as MSC4190 is enabled \ "Creating new device for {sender_user} from appservice {} as device ID does not \
and device ID does not exist", exist",
appservice.registration.id appservice.registration.id
); );
@@ -102,7 +99,7 @@ pub(crate) async fn update_device_route(
) )
.await?; .await?;
return Ok(update_device::v3::Response {}); return Ok(update_device::v3::Response::new());
}, },
} }
} }
@@ -124,39 +121,28 @@ pub(crate) async fn delete_device_route(
let sender_user = body.sender_user(); let sender_user = body.sender_user();
let appservice = body.appservice_info.as_ref(); let appservice = body.appservice_info.as_ref();
if appservice.is_some_and(|appservice| appservice.registration.device_management) { // Appservices get to skip UIAA for this endpoint
debug!( if appservice.is_none() {
"Skipping UIAA for {sender_user} as this is from an appservice and MSC4190 is \
enabled"
);
services
.users
.remove_device(sender_user, &body.device_id)
.await;
return Ok(delete_device::v3::Response {});
}
// Prompt the user to confirm with their password using UIAA // Prompt the user to confirm with their password using UIAA
let _ = services let _ = services
.uiaa .uiaa
.authenticate_password(&body.auth, Some(Identity::from_user_id(sender_user))) .authenticate_password(&body.auth, Some(Identity::from_user_id(sender_user)))
.await?; .await?;
}
services services
.users .users
.remove_device(sender_user, &body.device_id) .remove_device(sender_user, &body.device_id)
.await; .await;
Ok(delete_device::v3::Response {}) Ok(delete_device::v3::Response::new())
} }
/// # `POST /_matrix/client/v3/delete_devices` /// # `POST /_matrix/client/v3/delete_devices`
/// ///
/// Deletes the given list of devices. /// Deletes the given list of devices.
/// ///
/// - Requires UIAA to verify user password unless from an appservice with /// - Requires UIAA to verify user password.
/// MSC4190 enabled.
/// ///
/// For each device: /// For each device:
/// - Invalidates access token /// - Invalidates access token
@@ -171,27 +157,18 @@ pub(crate) async fn delete_devices_route(
let sender_user = body.sender_user(); let sender_user = body.sender_user();
let appservice = body.appservice_info.as_ref(); let appservice = body.appservice_info.as_ref();
if appservice.is_some_and(|appservice| appservice.registration.device_management) { // Appservices get to skip UIAA for this endpoint
debug!( if appservice.is_none() {
"Skipping UIAA for {sender_user} as this is from an appservice and MSC4190 is \
enabled"
);
for device_id in &body.devices {
services.users.remove_device(sender_user, device_id).await;
}
return Ok(delete_devices::v3::Response {});
}
// Prompt the user to confirm with their password using UIAA // Prompt the user to confirm with their password using UIAA
let _ = services let _ = services
.uiaa .uiaa
.authenticate_password(&body.auth, Some(Identity::from_user_id(sender_user))) .authenticate_password(&body.auth, Some(Identity::from_user_id(sender_user)))
.await?; .await?;
}
for device_id in &body.devices { for device_id in &body.devices {
services.users.remove_device(sender_user, device_id).await; services.users.remove_device(sender_user, device_id).await;
} }
Ok(delete_devices::v3::Response {}) Ok(delete_devices::v3::Response::new())
} }
+50 -140
View File
@@ -1,23 +1,16 @@
use std::iter::once;
use axum::extract::State; use axum::extract::State;
use axum_client_ip::InsecureClientIp; use axum_client_ip::ClientIp;
use conduwuit::{ use conduwuit::{
Err, Event, Result, RoomVersion, err, info, Err, Result, err, info,
utils::{ utils::{
TryFutureExtExt,
math::Expected, math::Expected,
result::FlatOk,
stream::{ReadyExt, WidebandExt}, stream::{ReadyExt, WidebandExt},
}, },
}; };
use conduwuit_service::Services; use conduwuit_service::Services;
use futures::{ use futures::StreamExt;
FutureExt, StreamExt, TryFutureExt,
future::{join, join4, join5},
};
use ruma::{ use ruma::{
OwnedRoomId, RoomId, ServerName, UInt, UserId, RoomId, ServerName, UInt, UserId,
api::{ api::{
client::{ client::{
directory::{ directory::{
@@ -28,15 +21,9 @@
}, },
federation, federation,
}, },
directory::{Filter, PublicRoomJoinRule, PublicRoomsChunk, RoomNetwork, RoomTypeFilter}, assign,
events::{ directory::{Filter, PublicRoomsChunk, RoomNetwork, RoomTypeFilter},
StateEventType, events::StateEventType,
room::{
create::RoomCreateEventContent,
join_rules::{JoinRule, RoomJoinRulesEventContent},
power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent},
},
},
uint, uint,
}; };
use tokio::join; use tokio::join;
@@ -51,7 +38,7 @@
#[tracing::instrument(skip_all, fields(%client), name = "publicrooms", level = "info")] #[tracing::instrument(skip_all, fields(%client), name = "publicrooms", level = "info")]
pub(crate) async fn get_public_rooms_filtered_route( pub(crate) async fn get_public_rooms_filtered_route(
State(services): State<crate::State>, State(services): State<crate::State>,
InsecureClientIp(client): InsecureClientIp, ClientIp(client): ClientIp,
body: Ruma<get_public_rooms_filtered::v3::Request>, body: Ruma<get_public_rooms_filtered::v3::Request>,
) -> Result<get_public_rooms_filtered::v3::Response> { ) -> Result<get_public_rooms_filtered::v3::Response> {
if let Some(server) = &body.server { if let Some(server) = &body.server {
@@ -87,7 +74,7 @@ pub(crate) async fn get_public_rooms_filtered_route(
#[tracing::instrument(skip_all, fields(%client), name = "publicrooms", level = "info")] #[tracing::instrument(skip_all, fields(%client), name = "publicrooms", level = "info")]
pub(crate) async fn get_public_rooms_route( pub(crate) async fn get_public_rooms_route(
State(services): State<crate::State>, State(services): State<crate::State>,
InsecureClientIp(client): InsecureClientIp, ClientIp(client): ClientIp,
body: Ruma<get_public_rooms::v3::Request>, body: Ruma<get_public_rooms::v3::Request>,
) -> Result<get_public_rooms::v3::Response> { ) -> Result<get_public_rooms::v3::Response> {
if let Some(server) = &body.server { if let Some(server) = &body.server {
@@ -109,12 +96,11 @@ pub(crate) async fn get_public_rooms_route(
err!(Request(Unknown(warn!(?body.server, "Failed to return /publicRooms: {e}")))) err!(Request(Unknown(warn!(?body.server, "Failed to return /publicRooms: {e}"))))
})?; })?;
Ok(get_public_rooms::v3::Response { Ok(assign!(get_public_rooms::v3::Response::new(response.chunk), {
chunk: response.chunk,
prev_batch: response.prev_batch, prev_batch: response.prev_batch,
next_batch: response.next_batch, next_batch: response.next_batch,
total_room_count_estimate: response.total_room_count_estimate, total_room_count_estimate: response.total_room_count_estimate,
}) }))
} }
/// # `PUT /_matrix/client/r0/directory/list/room/{roomId}` /// # `PUT /_matrix/client/r0/directory/list/room/{roomId}`
@@ -123,7 +109,7 @@ pub(crate) async fn get_public_rooms_route(
#[tracing::instrument(skip_all, fields(%client), name = "room_directory", level = "info")] #[tracing::instrument(skip_all, fields(%client), name = "room_directory", level = "info")]
pub(crate) async fn set_room_visibility_route( pub(crate) async fn set_room_visibility_route(
State(services): State<crate::State>, State(services): State<crate::State>,
InsecureClientIp(client): InsecureClientIp, ClientIp(client): ClientIp,
body: Ruma<set_room_visibility::v3::Request>, body: Ruma<set_room_visibility::v3::Request>,
) -> Result<set_room_visibility::v3::Response> { ) -> Result<set_room_visibility::v3::Response> {
let sender_user = body.sender_user(); let sender_user = body.sender_user();
@@ -197,7 +183,7 @@ pub(crate) async fn set_room_visibility_route(
}, },
} }
Ok(set_room_visibility::v3::Response {}) Ok(set_room_visibility::v3::Response::new())
} }
/// # `GET /_matrix/client/r0/directory/list/room/{roomId}` /// # `GET /_matrix/client/r0/directory/list/room/{roomId}`
@@ -212,13 +198,13 @@ pub(crate) async fn get_room_visibility_route(
return Err!(Request(NotFound("Room not found"))); return Err!(Request(NotFound("Room not found")));
} }
Ok(get_room_visibility::v3::Response { let visibility = if services.rooms.directory.is_public_room(&body.room_id).await {
visibility: if services.rooms.directory.is_public_room(&body.room_id).await {
room::Visibility::Public room::Visibility::Public
} else { } else {
room::Visibility::Private room::Visibility::Private
}, };
})
Ok(get_room_visibility::v3::Response::new(visibility))
} }
pub(crate) async fn get_public_rooms_filtered_helper( pub(crate) async fn get_public_rooms_filtered_helper(
@@ -236,24 +222,24 @@ pub(crate) async fn get_public_rooms_filtered_helper(
.sending .sending
.send_federation_request( .send_federation_request(
other_server, other_server,
federation::directory::get_public_rooms_filtered::v1::Request { assign!(federation::directory::get_public_rooms_filtered::v1::Request::new(), {
limit, limit,
since: since.map(ToOwned::to_owned), since: since.map(ToOwned::to_owned),
filter: Filter { filter: assign!(Filter::new(), {
generic_search_term: filter.generic_search_term.clone(), generic_search_term: filter.generic_search_term.clone(),
room_types: filter.room_types.clone(), room_types: filter.room_types.clone(),
}, }),
room_network: RoomNetwork::Matrix, room_network: RoomNetwork::Matrix,
}, }),
) )
.await?; .await?;
return Ok(get_public_rooms_filtered::v3::Response { return Ok(assign!(get_public_rooms_filtered::v3::Response::new(), {
chunk: response.chunk, chunk: response.chunk,
prev_batch: response.prev_batch, prev_batch: response.prev_batch,
next_batch: response.next_batch, next_batch: response.next_batch,
total_room_count_estimate: response.total_room_count_estimate, total_room_count_estimate: response.total_room_count_estimate,
}); }));
} }
// Use limit or else 10, with maximum 100 // Use limit or else 10, with maximum 100
@@ -284,16 +270,24 @@ pub(crate) async fn get_public_rooms_filtered_helper(
.rooms .rooms
.directory .directory
.public_rooms() .public_rooms()
.map(ToOwned::to_owned) .wide_then(async |room_id| {
.wide_then(|room_id| public_rooms_chunk(services, room_id)) let summary = services
.ready_filter_map(|chunk| { .rooms
.summary
.build_local_room_summary(&room_id)
.await
.expect("room in public room directory should exist");
summary.into()
})
.ready_filter_map(|chunk: PublicRoomsChunk| {
if !filter.room_types.is_empty() && !filter.room_types.contains(&RoomTypeFilter::from(chunk.room_type.clone())) { if !filter.room_types.is_empty() && !filter.room_types.contains(&RoomTypeFilter::from(chunk.room_type.clone())) {
return None; return None;
} }
if let Some(query) = filter.generic_search_term.as_ref().map(|q| q.to_lowercase()) { if let Some(query) = filter.generic_search_term.as_ref().map(|q| q.to_lowercase()) {
if let Some(name) = &chunk.name { if let Some(name) = &chunk.name {
if name.as_str().to_lowercase().contains(&query) { if name.to_lowercase().contains(&query) {
return Some(chunk); return Some(chunk);
} }
} }
@@ -320,7 +314,7 @@ pub(crate) async fn get_public_rooms_filtered_helper(
.collect() .collect()
.await; .await;
all_rooms.sort_by(|l, r| r.num_joined_members.cmp(&l.num_joined_members)); all_rooms.sort_by_key(|r| std::cmp::Reverse(r.num_joined_members));
let total_room_count_estimate = UInt::try_from(all_rooms.len()) let total_room_count_estimate = UInt::try_from(all_rooms.len())
.unwrap_or_else(|_| uint!(0)) .unwrap_or_else(|_| uint!(0))
@@ -335,12 +329,12 @@ pub(crate) async fn get_public_rooms_filtered_helper(
.ge(&limit) .ge(&limit)
.then_some(format!("n{}", num_since.expected_add(limit))); .then_some(format!("n{}", num_since.expected_add(limit)));
Ok(get_public_rooms_filtered::v3::Response { Ok(assign!(get_public_rooms_filtered::v3::Response::new(), {
chunk, chunk,
prev_batch, prev_batch,
next_batch, next_batch,
total_room_count_estimate, total_room_count_estimate,
}) }))
} }
/// Checks whether the given user ID is allowed to publish the target room to /// Checks whether the given user ID is allowed to publish the target room to
@@ -356,109 +350,25 @@ async fn user_can_publish_room(
// Server admins can always publish to their own room directory. // Server admins can always publish to their own room directory.
return Ok(true); return Ok(true);
} }
let (create_event, room_version, power_levels_content) = join!(
services let (room_version, room_creators, power_levels) = join!(
.rooms
.state_accessor
.room_state_get(room_id, &StateEventType::RoomCreate, ""),
services.rooms.state.get_room_version(room_id), services.rooms.state.get_room_version(room_id),
services services.rooms.state_accessor.get_room_creators(room_id),
.rooms services.rooms.state_accessor.get_room_power_levels(room_id),
.state_accessor
.room_state_get_content::<RoomPowerLevelsEventContent>(
room_id,
&StateEventType::RoomPowerLevels,
""
)
); );
let room_version = room_version let room_version = room_version
.as_ref() .as_ref()
.map_err(|_| err!(Request(NotFound("Unknown room"))))?; .map_err(|_| err!(Request(NotFound("Unknown room"))))?;
let create_event = create_event.map_err(|_| err!(Request(NotFound("Unknown room"))))?; let room_version_rules = room_version.rules().unwrap();
if RoomVersion::new(room_version)
.expect("room version must be supported") if room_version_rules
.authorization
.explicitly_privilege_room_creators .explicitly_privilege_room_creators
&& room_creators.contains(user_id)
{ {
let create_content: RoomCreateEventContent =
serde_json::from_str(create_event.content().get())
.map_err(|_| err!(Database("Invalid event content for m.room.create")))?;
let is_creator = create_content
.additional_creators
.unwrap_or_default()
.into_iter()
.chain(once(create_event.sender().to_owned()))
.any(|sender| sender == user_id);
if is_creator {
return Ok(true); return Ok(true);
} }
}
match power_levels_content.map(RoomPowerLevels::from) { Ok(power_levels.user_can_send_state(user_id, StateEventType::RoomCanonicalAlias))
| Ok(pl) => Ok(pl.user_can_send_state(user_id, StateEventType::RoomCanonicalAlias)),
| Err(e) =>
if e.is_not_found() {
Ok(create_event.sender() == user_id)
} else {
Err!(Database("Invalid event content for m.room.power_levels: {e}"))
},
}
}
async fn public_rooms_chunk(services: &Services, room_id: OwnedRoomId) -> PublicRoomsChunk {
let name = services.rooms.state_accessor.get_name(&room_id).ok();
let room_type = services.rooms.state_accessor.get_room_type(&room_id).ok();
let canonical_alias = services
.rooms
.state_accessor
.get_canonical_alias(&room_id)
.ok();
let avatar_url = services.rooms.state_accessor.get_avatar(&room_id);
let topic = services.rooms.state_accessor.get_room_topic(&room_id).ok();
let world_readable = services.rooms.state_accessor.is_world_readable(&room_id);
let join_rule = services
.rooms
.state_accessor
.room_state_get_content(&room_id, &StateEventType::RoomJoinRules, "")
.map_ok(|c: RoomJoinRulesEventContent| match c.join_rule {
| JoinRule::Public => PublicRoomJoinRule::Public,
| JoinRule::Knock => "knock".into(),
| JoinRule::KnockRestricted(_) => "knock_restricted".into(),
| _ => "invite".into(),
});
let guest_can_join = services.rooms.state_accessor.guest_can_join(&room_id);
let num_joined_members = services.rooms.state_cache.room_joined_count(&room_id);
let (
(avatar_url, canonical_alias, guest_can_join, join_rule, name),
(num_joined_members, room_type, topic, world_readable),
) = join(
join5(avatar_url, canonical_alias, guest_can_join, join_rule, name),
join4(num_joined_members, room_type, topic, world_readable),
)
.boxed()
.await;
PublicRoomsChunk {
avatar_url: avatar_url.into_option().unwrap_or_default().url,
canonical_alias,
guest_can_join,
join_rule: join_rule.unwrap_or_default(),
name,
num_joined_members: num_joined_members
.map(TryInto::try_into)
.map(Result::ok)
.flat_ok()
.unwrap_or_else(|| uint!(0)),
room_id,
room_type,
topic,
world_readable,
}
} }
+69 -109
View File
@@ -5,25 +5,23 @@
use axum::extract::State; use axum::extract::State;
use conduwuit::{ use conduwuit::{
Err, Error, Result, debug, debug_warn, err, Err, Result, debug, debug_warn, err,
result::NotFound, result::FlatOk,
utils::{IterStream, stream::WidebandExt}, utils::{IterStream, TryFutureExtExt, stream::WidebandExt},
}; };
use conduwuit_service::{Services, users::parse_master_key}; use conduwuit_service::{Services, users::parse_master_key};
use futures::{StreamExt, stream::FuturesUnordered}; use futures::{StreamExt, stream::FuturesUnordered};
use ruma::{ use ruma::{
OneTimeKeyAlgorithm, OwnedDeviceId, OwnedUserId, UserId, OneTimeKeyAlgorithm, OwnedDeviceId, OwnedUserId, UserId,
api::{ api::{
client::{ client::keys::{
error::ErrorKind,
keys::{
claim_keys, get_key_changes, get_keys, upload_keys, claim_keys, get_key_changes, get_keys, upload_keys,
upload_signatures::{self}, upload_signatures::{self},
upload_signing_keys, upload_signing_keys,
}, },
},
federation, federation,
}, },
assign,
encryption::CrossSigningKey, encryption::CrossSigningKey,
serde::Raw, serde::Raw,
}; };
@@ -115,12 +113,12 @@ pub(crate) async fn upload_keys_route(
} }
} }
Ok(upload_keys::v3::Response { let one_time_key_counts = services
one_time_key_counts: services
.users .users
.count_one_time_keys(sender_user, sender_device) .count_one_time_keys(sender_user, sender_device)
.await, .await;
})
Ok(upload_keys::v3::Response::new(one_time_key_counts))
} }
/// # `POST /_matrix/client/r0/keys/query` /// # `POST /_matrix/client/r0/keys/query`
@@ -174,7 +172,7 @@ pub(crate) async fn upload_signing_keys_route(
) -> Result<upload_signing_keys::v3::Response> { ) -> Result<upload_signing_keys::v3::Response> {
let sender_user = body.sender_user(); let sender_user = body.sender_user();
match check_for_new_keys( if uiaa_needed_to_upload_keys(
services, services,
sender_user, sender_user,
body.self_signing_key.as_ref(), body.self_signing_key.as_ref(),
@@ -182,25 +180,11 @@ pub(crate) async fn upload_signing_keys_route(
body.master_key.as_ref(), body.master_key.as_ref(),
) )
.await .await
.inspect_err(|e| debug!(?e))
{ {
| Ok(exists) => {
if let Some(result) = exists {
// No-op, they tried to reupload the same set of keys
// (lost connection for example)
return Ok(result);
}
debug!(
"Skipping UIA in accordance with MSC3967, the user didn't have any existing keys"
);
// Some of the keys weren't found, so we let them upload
},
| _ => {
let _ = services let _ = services
.uiaa .uiaa
.authenticate_password(&body.auth, Some(Identity::from_user_id(sender_user))) .authenticate_password(&body.auth, Some(Identity::from_user_id(sender_user)))
.await?; .await?;
},
} }
services services
@@ -214,77 +198,56 @@ pub(crate) async fn upload_signing_keys_route(
) )
.await?; .await?;
Ok(upload_signing_keys::v3::Response {}) Ok(upload_signing_keys::v3::Response::new())
} }
async fn check_for_new_keys( async fn uiaa_needed_to_upload_keys(
services: crate::State, services: crate::State,
user_id: &UserId, user_id: &UserId,
self_signing_key: Option<&Raw<CrossSigningKey>>, self_signing_key: Option<&Raw<CrossSigningKey>>,
user_signing_key: Option<&Raw<CrossSigningKey>>, user_signing_key: Option<&Raw<CrossSigningKey>>,
master_signing_key: Option<&Raw<CrossSigningKey>>, master_signing_key: Option<&Raw<CrossSigningKey>>,
) -> Result<Option<upload_signing_keys::v3::Response>> { ) -> bool {
debug!("checking for existing keys"); let (self_signing_key, user_signing_key, master_signing_key) = (
let mut empty = false; self_signing_key.map(Raw::deserialize).flat_ok(),
if let Some(master_signing_key) = master_signing_key { user_signing_key.map(Raw::deserialize).flat_ok(),
let (key, value) = parse_master_key(user_id, master_signing_key)?; master_signing_key.map(Raw::deserialize).flat_ok(),
let result = services );
.users
.get_master_key(None, user_id, &|_| true) let (existing_self_signing_key, existing_user_signing_key, existing_master_signing_key) = futures::join!(
.await; services
if result.is_not_found() {
empty = true;
} else {
let existing_master_key = result?;
let (existing_key, existing_value) = parse_master_key(user_id, &existing_master_key)?;
if existing_key != key || existing_value != value {
return Err!(Request(Forbidden(
"Tried to change an existing master key, UIA required"
)));
}
}
}
if let Some(user_signing_key) = user_signing_key {
let key = services.users.get_user_signing_key(user_id).await;
if key.is_not_found() && !empty {
return Err!(Request(Forbidden(
"Tried to update an existing user signing key, UIA required"
)));
}
if !key.is_not_found() {
let existing_signing_key = key?.deserialize()?;
if existing_signing_key != user_signing_key.deserialize()? {
return Err!(Request(Forbidden(
"Tried to change an existing user signing key, UIA required"
)));
}
}
}
if let Some(self_signing_key) = self_signing_key {
let key = services
.users .users
.get_self_signing_key(None, user_id, &|_| true) .get_self_signing_key(None, user_id, &|_| true)
.await; .ok(),
if key.is_not_found() && !empty { services.users.get_user_signing_key(user_id).ok(),
debug!(?key); services.users.get_master_key(None, user_id, &|_| true).ok(),
return Err!(Request(Forbidden( );
"Tried to add a new signing key independently from the master key"
)));
}
if !key.is_not_found() {
let existing_signing_key = key?.deserialize()?;
if existing_signing_key != self_signing_key.deserialize()? {
return Err!(Request(Forbidden(
"Tried to update an existing self signing key, UIA required"
)));
}
}
}
if empty {
return Ok(None);
}
Ok(Some(upload_signing_keys::v3::Response {})) let (existing_self_signing_key, existing_user_signing_key, existing_master_signing_key) = (
existing_self_signing_key
.as_ref()
.map(Raw::deserialize)
.flat_ok(),
existing_user_signing_key
.as_ref()
.map(Raw::deserialize)
.flat_ok(),
existing_master_signing_key
.as_ref()
.map(Raw::deserialize)
.flat_ok(),
);
if let Some(existing_master_signing_key) = existing_master_signing_key {
// If a master key exists, UIAA is required if any of the keys are different.
master_signing_key != Some(existing_master_signing_key)
|| user_signing_key != existing_user_signing_key
|| self_signing_key != existing_self_signing_key
} else {
// If no master key exists, UIAA is not required.
false
}
} }
/// # `POST /_matrix/client/r0/keys/signatures/upload` /// # `POST /_matrix/client/r0/keys/signatures/upload`
@@ -343,7 +306,7 @@ pub(crate) async fn upload_signatures_route(
} }
} }
Ok(upload_signatures::v3::Response { failures: BTreeMap::new() }) Ok(upload_signatures::v3::Response::new())
} }
/// # `POST /_matrix/client/r0/keys/changes` /// # `POST /_matrix/client/r0/keys/changes`
@@ -363,18 +326,17 @@ pub(crate) async fn get_key_changes_route(
let from = body let from = body
.from .from
.parse() .parse()
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `from`."))?; .map_err(|_| err!(Request(InvalidParam("Invalid `from`."))))?;
let to = body let to = body
.to .to
.parse() .parse()
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `to`."))?; .map_err(|_| err!(Request(InvalidParam("Invalid `to`."))))?;
device_list_updates.extend( device_list_updates.extend(
services services
.users .users
.keys_changed(sender_user, Some(from), Some(to)) .keys_changed(sender_user, Some(from), Some(to))
.map(ToOwned::to_owned)
.collect::<Vec<_>>() .collect::<Vec<_>>()
.await, .await,
); );
@@ -385,18 +347,18 @@ pub(crate) async fn get_key_changes_route(
device_list_updates.extend( device_list_updates.extend(
services services
.users .users
.room_keys_changed(room_id, Some(from), Some(to)) .room_keys_changed(&room_id, Some(from), Some(to))
.map(|(user_id, _)| user_id) .map(|(user_id, _)| user_id)
.map(ToOwned::to_owned)
.collect::<Vec<_>>() .collect::<Vec<_>>()
.await, .await,
); );
} }
Ok(get_key_changes::v3::Response { Ok(get_key_changes::v3::Response::new(
changed: device_list_updates.into_iter().collect(), device_list_updates.into_iter().collect(),
left: Vec::new(), // TODO // TODO
}) vec![],
))
} }
pub(crate) async fn get_keys_helper<F>( pub(crate) async fn get_keys_helper<F>(
@@ -433,10 +395,10 @@ pub(crate) async fn get_keys_helper<F>(
let mut devices = services.users.all_device_ids(user_id).boxed(); let mut devices = services.users.all_device_ids(user_id).boxed();
while let Some(device_id) = devices.next().await { while let Some(device_id) = devices.next().await {
if let Ok(mut keys) = services.users.get_device_keys(user_id, device_id).await { if let Ok(mut keys) = services.users.get_device_keys(user_id, &device_id).await {
let metadata = services let metadata = services
.users .users
.get_device_metadata(user_id, device_id) .get_device_metadata(user_id, &device_id)
.await .await
.map_err(|_| { .map_err(|_| {
err!(Database("all_device_keys contained nonexistent device.")) err!(Database("all_device_keys contained nonexistent device."))
@@ -445,7 +407,7 @@ pub(crate) async fn get_keys_helper<F>(
add_unsigned_device_display_name(&mut keys, metadata, include_display_names) add_unsigned_device_display_name(&mut keys, metadata, include_display_names)
.map_err(|_| err!(Database("invalid device keys in database")))?; .map_err(|_| err!(Database("invalid device keys in database")))?;
container.insert(device_id.to_owned(), keys); container.insert(device_id.clone(), keys);
} }
} }
@@ -506,8 +468,7 @@ pub(crate) async fn get_keys_helper<F>(
device_keys_input_fed.insert(user_id.to_owned(), keys.clone()); device_keys_input_fed.insert(user_id.to_owned(), keys.clone());
} }
let request = let request = federation::keys::get_keys::v1::Request::new(device_keys_input_fed);
federation::keys::get_keys::v1::Request { device_keys: device_keys_input_fed };
let response = tokio::time::timeout( let response = tokio::time::timeout(
timeout, timeout,
services.sending.send_federation_request(server, request), services.sending.send_federation_request(server, request),
@@ -561,13 +522,13 @@ pub(crate) async fn get_keys_helper<F>(
} }
} }
Ok(get_keys::v3::Response { Ok(assign!(get_keys::v3::Response::new(), {
failures, failures,
device_keys, device_keys,
master_keys, master_keys,
self_signing_keys, self_signing_keys,
user_signing_keys, user_signing_keys,
}) }))
} }
fn add_unsigned_device_display_name( fn add_unsigned_device_display_name(
@@ -576,7 +537,8 @@ fn add_unsigned_device_display_name(
include_display_names: bool, include_display_names: bool,
) -> serde_json::Result<()> { ) -> serde_json::Result<()> {
if let Some(display_name) = metadata.display_name { if let Some(display_name) = metadata.display_name {
let mut object = keys.deserialize_as::<serde_json::Map<String, serde_json::Value>>()?; let mut object =
keys.deserialize_as_unchecked::<serde_json::Map<String, serde_json::Value>>()?;
let unsigned = object.entry("unsigned").or_insert_with(|| json!({})); let unsigned = object.entry("unsigned").or_insert_with(|| json!({}));
if let serde_json::Value::Object(unsigned_object) = unsigned { if let serde_json::Value::Object(unsigned_object) = unsigned {
@@ -642,9 +604,7 @@ pub(crate) async fn claim_keys_helper(
timeout, timeout,
services.sending.send_federation_request( services.sending.send_federation_request(
server, server,
federation::keys::claim_keys::v1::Request { federation::keys::claim_keys::v1::Request::new(one_time_keys_input_fed),
one_time_keys: one_time_keys_input_fed,
},
), ),
) )
.await .await
@@ -667,5 +627,5 @@ pub(crate) async fn claim_keys_helper(
} }
} }
Ok(claim_keys::v3::Response { failures, one_time_keys }) Ok(assign!(claim_keys::v3::Response::new(one_time_keys), { failures: failures }))
} }
+42 -87
View File
@@ -1,7 +1,7 @@
use std::time::Duration; use std::time::Duration;
use axum::extract::State; use axum::extract::State;
use axum_client_ip::InsecureClientIp; use axum_client_ip::ClientIp;
use conduwuit::{ use conduwuit::{
Err, Result, err, Err, Result, err,
utils::{self, content_disposition::make_content_disposition, math::ruma_from_usize}, utils::{self, content_disposition::make_content_disposition, math::ruma_from_usize},
@@ -9,11 +9,11 @@
use conduwuit_core::error; use conduwuit_core::error;
use conduwuit_service::{ use conduwuit_service::{
Services, Services,
media::{CACHE_CONTROL_IMMUTABLE, CORP_CROSS_ORIGIN, Dim, FileMeta, MXC_LENGTH}, media::{Dim, FileMeta, MXC_LENGTH},
}; };
use reqwest::Url; use reqwest::Url;
use ruma::{ use ruma::{
Mxc, UserId, UserId,
api::client::{ api::client::{
authenticated_media::{ authenticated_media::{
get_content, get_content_as_filename, get_content_thumbnail, get_media_config, get_content, get_content_as_filename, get_content_thumbnail, get_media_config,
@@ -21,7 +21,9 @@
}, },
media::create_content, media::create_content,
}, },
assign,
}; };
use service::media::mxc::Mxc;
use crate::Ruma; use crate::Ruma;
@@ -30,9 +32,9 @@ pub(crate) async fn get_media_config_route(
State(services): State<crate::State>, State(services): State<crate::State>,
_body: Ruma<get_media_config::v1::Request>, _body: Ruma<get_media_config::v1::Request>,
) -> Result<get_media_config::v1::Response> { ) -> Result<get_media_config::v1::Response> {
Ok(get_media_config::v1::Response { Ok(get_media_config::v1::Response::new(ruma_from_usize(
upload_size: ruma_from_usize(services.server.config.max_request_size), services.server.config.max_request_size,
}) )))
} }
/// # `POST /_matrix/media/v3/upload` /// # `POST /_matrix/media/v3/upload`
@@ -49,7 +51,7 @@ pub(crate) async fn get_media_config_route(
)] )]
pub(crate) async fn create_content_route( pub(crate) async fn create_content_route(
State(services): State<crate::State>, State(services): State<crate::State>,
InsecureClientIp(client): InsecureClientIp, ClientIp(client): ClientIp,
body: Ruma<create_content::v3::Request>, body: Ruma<create_content::v3::Request>,
) -> Result<create_content::v3::Response> { ) -> Result<create_content::v3::Response> {
let user = body.sender_user(); let user = body.sender_user();
@@ -82,10 +84,9 @@ pub(crate) async fn create_content_route(
.flatten() .flatten()
}); });
Ok(create_content::v3::Response { Ok(assign!(create_content::v3::Response::new(mxc.to_string().into()), {
content_uri: mxc.to_string().into(),
blurhash: blurhash.flatten(), blurhash: blurhash.flatten(),
}) }))
} }
/// # `GET /_matrix/client/v1/media/thumbnail/{serverName}/{mediaId}` /// # `GET /_matrix/client/v1/media/thumbnail/{serverName}/{mediaId}`
@@ -99,7 +100,7 @@ pub(crate) async fn create_content_route(
)] )]
pub(crate) async fn get_content_thumbnail_route( pub(crate) async fn get_content_thumbnail_route(
State(services): State<crate::State>, State(services): State<crate::State>,
InsecureClientIp(client): InsecureClientIp, ClientIp(client): ClientIp,
body: Ruma<get_content_thumbnail::v1::Request>, body: Ruma<get_content_thumbnail::v1::Request>,
) -> Result<get_content_thumbnail::v1::Response> { ) -> Result<get_content_thumbnail::v1::Response> {
let user = body.sender_user(); let user = body.sender_user();
@@ -114,7 +115,7 @@ pub(crate) async fn get_content_thumbnail_route(
content, content,
content_type, content_type,
content_disposition, content_disposition,
} = match fetch_thumbnail(&services, &mxc, user, body.timeout_ms, &dim).await { } = match fetch_thumbnail_meta(&services, &mxc, user, body.timeout_ms, &dim).await {
| Ok(meta) => meta, | Ok(meta) => meta,
| Err(conduwuit::Error::Io(e)) => match e.kind() { | Err(conduwuit::Error::Io(e)) => match e.kind() {
| std::io::ErrorKind::NotFound => | std::io::ErrorKind::NotFound =>
@@ -128,13 +129,14 @@ pub(crate) async fn get_content_thumbnail_route(
| Err(_) => return Err!(Request(Unknown("Unknown error when fetching thumbnail."))), | Err(_) => return Err!(Request(Unknown("Unknown error when fetching thumbnail."))),
}; };
Ok(get_content_thumbnail::v1::Response { let content_disposition =
file: content.expect("entire file contents"), make_content_disposition(content_disposition.as_ref(), content_type.as_deref(), None);
content_type: content_type.map(Into::into),
cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()), Ok(get_content_thumbnail::v1::Response::new(
cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()), content.expect("entire file contents"),
content_type.unwrap_or_default(),
content_disposition, content_disposition,
}) ))
} }
/// # `GET /_matrix/client/v1/media/download/{serverName}/{mediaId}` /// # `GET /_matrix/client/v1/media/download/{serverName}/{mediaId}`
@@ -148,7 +150,7 @@ pub(crate) async fn get_content_thumbnail_route(
)] )]
pub(crate) async fn get_content_route( pub(crate) async fn get_content_route(
State(services): State<crate::State>, State(services): State<crate::State>,
InsecureClientIp(client): InsecureClientIp, ClientIp(client): ClientIp,
body: Ruma<get_content::v1::Request>, body: Ruma<get_content::v1::Request>,
) -> Result<get_content::v1::Response> { ) -> Result<get_content::v1::Response> {
let user = body.sender_user(); let user = body.sender_user();
@@ -161,7 +163,7 @@ pub(crate) async fn get_content_route(
content, content,
content_type, content_type,
content_disposition, content_disposition,
} = match fetch_file(&services, &mxc, user, body.timeout_ms, None).await { } = match fetch_file_meta(&services, &mxc, user, body.timeout_ms).await {
| Ok(meta) => meta, | Ok(meta) => meta,
| Err(conduwuit::Error::Io(e)) => match e.kind() { | Err(conduwuit::Error::Io(e)) => match e.kind() {
| std::io::ErrorKind::NotFound => return Err!(Request(NotFound("Media not found."))), | std::io::ErrorKind::NotFound => return Err!(Request(NotFound("Media not found."))),
@@ -174,13 +176,14 @@ pub(crate) async fn get_content_route(
| Err(_) => return Err!(Request(Unknown("Unknown error when fetching file."))), | Err(_) => return Err!(Request(Unknown("Unknown error when fetching file."))),
}; };
Ok(get_content::v1::Response { let content_disposition =
file: content.expect("entire file contents"), make_content_disposition(content_disposition.as_ref(), content_type.as_deref(), None);
content_type: content_type.map(Into::into),
cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()), Ok(get_content::v1::Response::new(
cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()), content.expect("entire file contents"),
content_type.unwrap_or_default(),
content_disposition, content_disposition,
}) ))
} }
/// # `GET /_matrix/client/v1/media/download/{serverName}/{mediaId}/{fileName}` /// # `GET /_matrix/client/v1/media/download/{serverName}/{mediaId}/{fileName}`
@@ -194,7 +197,7 @@ pub(crate) async fn get_content_route(
)] )]
pub(crate) async fn get_content_as_filename_route( pub(crate) async fn get_content_as_filename_route(
State(services): State<crate::State>, State(services): State<crate::State>,
InsecureClientIp(client): InsecureClientIp, ClientIp(client): ClientIp,
body: Ruma<get_content_as_filename::v1::Request>, body: Ruma<get_content_as_filename::v1::Request>,
) -> Result<get_content_as_filename::v1::Response> { ) -> Result<get_content_as_filename::v1::Response> {
let user = body.sender_user(); let user = body.sender_user();
@@ -208,7 +211,7 @@ pub(crate) async fn get_content_as_filename_route(
content, content,
content_type, content_type,
content_disposition, content_disposition,
} = match fetch_file(&services, &mxc, user, body.timeout_ms, None).await { } = match fetch_file_meta(&services, &mxc, user, body.timeout_ms).await {
| Ok(meta) => meta, | Ok(meta) => meta,
| Err(conduwuit::Error::Io(e)) => match e.kind() { | Err(conduwuit::Error::Io(e)) => match e.kind() {
| std::io::ErrorKind::NotFound => return Err!(Request(NotFound("Media not found."))), | std::io::ErrorKind::NotFound => return Err!(Request(NotFound("Media not found."))),
@@ -221,13 +224,17 @@ pub(crate) async fn get_content_as_filename_route(
| Err(_) => return Err!(Request(Unknown("Unknown error when fetching file."))), | Err(_) => return Err!(Request(Unknown("Unknown error when fetching file."))),
}; };
Ok(get_content_as_filename::v1::Response { let content_disposition = make_content_disposition(
file: content.expect("entire file contents"), content_disposition.as_ref(),
content_type: content_type.map(Into::into), content_type.as_deref(),
cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()), Some(&body.filename),
cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()), );
Ok(get_content_as_filename::v1::Response::new(
content.expect("entire file contents"),
content_type.unwrap_or_default(),
content_disposition, content_disposition,
}) ))
} }
/// # `GET /_matrix/client/v1/media/preview_url` /// # `GET /_matrix/client/v1/media/preview_url`
@@ -241,7 +248,7 @@ pub(crate) async fn get_content_as_filename_route(
)] )]
pub(crate) async fn get_media_preview_route( pub(crate) async fn get_media_preview_route(
State(services): State<crate::State>, State(services): State<crate::State>,
InsecureClientIp(client): InsecureClientIp, ClientIp(client): ClientIp,
body: Ruma<get_media_preview::v1::Request>, body: Ruma<get_media_preview::v1::Request>,
) -> Result<get_media_preview::v1::Response> { ) -> Result<get_media_preview::v1::Response> {
let sender_user = body.sender_user(); let sender_user = body.sender_user();
@@ -278,58 +285,6 @@ pub(crate) async fn get_media_preview_route(
}) })
} }
async fn fetch_thumbnail(
services: &Services,
mxc: &Mxc<'_>,
user: &UserId,
timeout_ms: Duration,
dim: &Dim,
) -> Result<FileMeta> {
let FileMeta {
content,
content_type,
content_disposition,
} = fetch_thumbnail_meta(services, mxc, user, timeout_ms, dim).await?;
let content_disposition = Some(make_content_disposition(
content_disposition.as_ref(),
content_type.as_deref(),
None,
));
Ok(FileMeta {
content,
content_type,
content_disposition,
})
}
async fn fetch_file(
services: &Services,
mxc: &Mxc<'_>,
user: &UserId,
timeout_ms: Duration,
filename: Option<&str>,
) -> Result<FileMeta> {
let FileMeta {
content,
content_type,
content_disposition,
} = fetch_file_meta(services, mxc, user, timeout_ms).await?;
let content_disposition = Some(make_content_disposition(
content_disposition.as_ref(),
content_type.as_deref(),
filename,
));
Ok(FileMeta {
content,
content_type,
content_disposition,
})
}
async fn fetch_thumbnail_meta( async fn fetch_thumbnail_meta(
services: &Services, services: &Services,
mxc: &Mxc<'_>, mxc: &Mxc<'_>,
+76 -58
View File
@@ -1,20 +1,21 @@
#![allow(deprecated)] #![allow(deprecated)]
use axum::extract::State; use axum::extract::State;
use axum_client_ip::InsecureClientIp; use axum_client_ip::ClientIp;
use conduwuit::{ use conduwuit::{
Err, Result, err, Err, Result, err,
utils::{content_disposition::make_content_disposition, math::ruma_from_usize}, utils::{content_disposition::make_content_disposition, math::ruma_from_usize},
}; };
use conduwuit_service::media::{CACHE_CONTROL_IMMUTABLE, CORP_CROSS_ORIGIN, Dim, FileMeta}; use conduwuit_service::media::{CORP_CROSS_ORIGIN, Dim, FileMeta};
use reqwest::Url; use reqwest::Url;
use ruma::{ use ruma::{
Mxc,
api::client::media::{ api::client::media::{
create_content, get_content, get_content_as_filename, get_content_thumbnail, create_content, get_content, get_content_as_filename, get_content_thumbnail,
get_media_config, get_media_preview, get_media_config, get_media_preview,
}, },
assign,
}; };
use service::media::mxc::Mxc;
use crate::{Ruma, RumaResponse, client::create_content_route}; use crate::{Ruma, RumaResponse, client::create_content_route};
@@ -25,9 +26,9 @@ pub(crate) async fn get_media_config_legacy_route(
State(services): State<crate::State>, State(services): State<crate::State>,
_body: Ruma<get_media_config::v3::Request>, _body: Ruma<get_media_config::v3::Request>,
) -> Result<get_media_config::v3::Response> { ) -> Result<get_media_config::v3::Response> {
Ok(get_media_config::v3::Response { Ok(get_media_config::v3::Response::new(ruma_from_usize(
upload_size: ruma_from_usize(services.server.config.max_request_size), services.server.config.max_request_size,
}) )))
} }
/// # `GET /_matrix/media/v1/config` /// # `GET /_matrix/media/v1/config`
@@ -52,7 +53,7 @@ pub(crate) async fn get_media_config_legacy_legacy_route(
#[tracing::instrument(skip_all, fields(%client), name = "url_preview_legacy", level = "debug")] #[tracing::instrument(skip_all, fields(%client), name = "url_preview_legacy", level = "debug")]
pub(crate) async fn get_media_preview_legacy_route( pub(crate) async fn get_media_preview_legacy_route(
State(services): State<crate::State>, State(services): State<crate::State>,
InsecureClientIp(client): InsecureClientIp, ClientIp(client): ClientIp,
body: Ruma<get_media_preview::v3::Request>, body: Ruma<get_media_preview::v3::Request>,
) -> Result<get_media_preview::v3::Response> { ) -> Result<get_media_preview::v3::Response> {
let sender_user = body.sender_user(); let sender_user = body.sender_user();
@@ -94,10 +95,10 @@ pub(crate) async fn get_media_preview_legacy_route(
/// Returns URL preview. /// Returns URL preview.
pub(crate) async fn get_media_preview_legacy_legacy_route( pub(crate) async fn get_media_preview_legacy_legacy_route(
State(services): State<crate::State>, State(services): State<crate::State>,
InsecureClientIp(client): InsecureClientIp, ClientIp(client): ClientIp,
body: Ruma<get_media_preview::v3::Request>, body: Ruma<get_media_preview::v3::Request>,
) -> Result<RumaResponse<get_media_preview::v3::Response>> { ) -> Result<RumaResponse<get_media_preview::v3::Response>> {
get_media_preview_legacy_route(State(services), InsecureClientIp(client), body) get_media_preview_legacy_route(State(services), ClientIp(client), body)
.await .await
.map(RumaResponse) .map(RumaResponse)
} }
@@ -114,10 +115,10 @@ pub(crate) async fn get_media_preview_legacy_legacy_route(
/// - Media will be saved in the media/ directory /// - Media will be saved in the media/ directory
pub(crate) async fn create_content_legacy_route( pub(crate) async fn create_content_legacy_route(
State(services): State<crate::State>, State(services): State<crate::State>,
InsecureClientIp(client): InsecureClientIp, ClientIp(client): ClientIp,
body: Ruma<create_content::v3::Request>, body: Ruma<create_content::v3::Request>,
) -> Result<RumaResponse<create_content::v3::Response>> { ) -> Result<RumaResponse<create_content::v3::Response>> {
create_content_route(State(services), InsecureClientIp(client), body) create_content_route(State(services), ClientIp(client), body)
.await .await
.map(RumaResponse) .map(RumaResponse)
} }
@@ -133,7 +134,7 @@ pub(crate) async fn create_content_legacy_route(
#[tracing::instrument(skip_all, fields(%client), name = "media_get_legacy", level = "debug")] #[tracing::instrument(skip_all, fields(%client), name = "media_get_legacy", level = "debug")]
pub(crate) async fn get_content_legacy_route( pub(crate) async fn get_content_legacy_route(
State(services): State<crate::State>, State(services): State<crate::State>,
InsecureClientIp(client): InsecureClientIp, ClientIp(client): ClientIp,
body: Ruma<get_content::v3::Request>, body: Ruma<get_content::v3::Request>,
) -> Result<get_content::v3::Response> { ) -> Result<get_content::v3::Response> {
let mxc = Mxc { let mxc = Mxc {
@@ -153,13 +154,16 @@ pub(crate) async fn get_content_legacy_route(
None, None,
); );
Ok(get_content::v3::Response { Ok(assign!(
file: content.expect("entire file contents"), get_content::v3::Response::new(
content_type: content_type.map(Into::into), content.expect("entire file contents"),
content_disposition: Some(content_disposition), content_type.unwrap_or_default(),
content_disposition,
),
{
cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()), cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()),
cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()), }
}) ))
}, },
| _ => | _ =>
if !services.globals.server_is_ours(&body.server_name) && body.allow_remote { if !services.globals.server_is_ours(&body.server_name) && body.allow_remote {
@@ -177,13 +181,16 @@ pub(crate) async fn get_content_legacy_route(
None, None,
); );
Ok(get_content::v3::Response { Ok(assign!(
file: response.file, get_content::v3::Response::new(
content_type: response.content_type, response.file,
content_disposition: Some(content_disposition), response.content_type.unwrap_or_default(),
content_disposition,
),
{
cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()), cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()),
cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()), }
}) ))
} else { } else {
Err!(Request(NotFound("Media not found."))) Err!(Request(NotFound("Media not found.")))
}, },
@@ -205,10 +212,10 @@ pub(crate) async fn get_content_legacy_route(
#[tracing::instrument(skip_all, fields(%client), name = "media_get_legacy", level = "debug")] #[tracing::instrument(skip_all, fields(%client), name = "media_get_legacy", level = "debug")]
pub(crate) async fn get_content_legacy_legacy_route( pub(crate) async fn get_content_legacy_legacy_route(
State(services): State<crate::State>, State(services): State<crate::State>,
InsecureClientIp(client): InsecureClientIp, ClientIp(client): ClientIp,
body: Ruma<get_content::v3::Request>, body: Ruma<get_content::v3::Request>,
) -> Result<RumaResponse<get_content::v3::Response>> { ) -> Result<RumaResponse<get_content::v3::Response>> {
get_content_legacy_route(State(services), InsecureClientIp(client), body) get_content_legacy_route(State(services), ClientIp(client), body)
.await .await
.map(RumaResponse) .map(RumaResponse)
} }
@@ -224,7 +231,7 @@ pub(crate) async fn get_content_legacy_legacy_route(
#[tracing::instrument(skip_all, fields(%client), name = "media_get_legacy", level = "debug")] #[tracing::instrument(skip_all, fields(%client), name = "media_get_legacy", level = "debug")]
pub(crate) async fn get_content_as_filename_legacy_route( pub(crate) async fn get_content_as_filename_legacy_route(
State(services): State<crate::State>, State(services): State<crate::State>,
InsecureClientIp(client): InsecureClientIp, ClientIp(client): ClientIp,
body: Ruma<get_content_as_filename::v3::Request>, body: Ruma<get_content_as_filename::v3::Request>,
) -> Result<get_content_as_filename::v3::Response> { ) -> Result<get_content_as_filename::v3::Response> {
let mxc = Mxc { let mxc = Mxc {
@@ -244,13 +251,15 @@ pub(crate) async fn get_content_as_filename_legacy_route(
Some(&body.filename), Some(&body.filename),
); );
Ok(get_content_as_filename::v3::Response { Ok(assign!(get_content_as_filename::v3::Response::new(
file: content.expect("entire file contents"), content.expect("entire file contents"),
content_type: content_type.map(Into::into), content_type.unwrap_or_default(),
content_disposition: Some(content_disposition), content_disposition,
),
{
cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()), cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()),
cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()), }
}) ))
}, },
| _ => | _ =>
if !services.globals.server_is_ours(&body.server_name) && body.allow_remote { if !services.globals.server_is_ours(&body.server_name) && body.allow_remote {
@@ -268,13 +277,16 @@ pub(crate) async fn get_content_as_filename_legacy_route(
None, None,
); );
Ok(get_content_as_filename::v3::Response { Ok(assign!(
content_disposition: Some(content_disposition), get_content_as_filename::v3::Response::new(
content_type: response.content_type, response.file,
file: response.file, response.content_type.unwrap_or_default(),
content_disposition,
),
{
cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()), cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()),
cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()), }
}) ))
} else { } else {
Err!(Request(NotFound("Media not found."))) Err!(Request(NotFound("Media not found.")))
}, },
@@ -295,10 +307,10 @@ pub(crate) async fn get_content_as_filename_legacy_route(
/// seconds /// seconds
pub(crate) async fn get_content_as_filename_legacy_legacy_route( pub(crate) async fn get_content_as_filename_legacy_legacy_route(
State(services): State<crate::State>, State(services): State<crate::State>,
InsecureClientIp(client): InsecureClientIp, ClientIp(client): ClientIp,
body: Ruma<get_content_as_filename::v3::Request>, body: Ruma<get_content_as_filename::v3::Request>,
) -> Result<RumaResponse<get_content_as_filename::v3::Response>> { ) -> Result<RumaResponse<get_content_as_filename::v3::Response>> {
get_content_as_filename_legacy_route(State(services), InsecureClientIp(client), body) get_content_as_filename_legacy_route(State(services), ClientIp(client), body)
.await .await
.map(RumaResponse) .map(RumaResponse)
} }
@@ -314,7 +326,7 @@ pub(crate) async fn get_content_as_filename_legacy_legacy_route(
#[tracing::instrument(skip_all, fields(%client), name = "media_thumbnail_get_legacy", level = "debug")] #[tracing::instrument(skip_all, fields(%client), name = "media_thumbnail_get_legacy", level = "debug")]
pub(crate) async fn get_content_thumbnail_legacy_route( pub(crate) async fn get_content_thumbnail_legacy_route(
State(services): State<crate::State>, State(services): State<crate::State>,
InsecureClientIp(client): InsecureClientIp, ClientIp(client): ClientIp,
body: Ruma<get_content_thumbnail::v3::Request>, body: Ruma<get_content_thumbnail::v3::Request>,
) -> Result<get_content_thumbnail::v3::Response> { ) -> Result<get_content_thumbnail::v3::Response> {
let mxc = Mxc { let mxc = Mxc {
@@ -335,13 +347,16 @@ pub(crate) async fn get_content_thumbnail_legacy_route(
None, None,
); );
Ok(get_content_thumbnail::v3::Response { Ok(assign!(
file: content.expect("entire file contents"), get_content_thumbnail::v3::Response::new(
content_type: content_type.map(Into::into), content.expect("entire file contents"),
cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()), content_type.unwrap_or_default(),
cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()), content_disposition,
content_disposition: Some(content_disposition), ),
}) {
cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.to_owned()),
}
))
}, },
| _ => | _ =>
if !services.globals.server_is_ours(&body.server_name) && body.allow_remote { if !services.globals.server_is_ours(&body.server_name) && body.allow_remote {
@@ -359,13 +374,16 @@ pub(crate) async fn get_content_thumbnail_legacy_route(
None, None,
); );
Ok(get_content_thumbnail::v3::Response { Ok(assign!(
file: response.file, get_content_thumbnail::v3::Response::new(
content_type: response.content_type, response.file,
cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()), response.content_type.unwrap_or_default(),
cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()), content_disposition,
content_disposition: Some(content_disposition), ),
}) {
cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.to_owned()),
}
))
} else { } else {
Err!(Request(NotFound("Media not found."))) Err!(Request(NotFound("Media not found.")))
}, },
@@ -386,10 +404,10 @@ pub(crate) async fn get_content_thumbnail_legacy_route(
/// seconds /// seconds
pub(crate) async fn get_content_thumbnail_legacy_legacy_route( pub(crate) async fn get_content_thumbnail_legacy_legacy_route(
State(services): State<crate::State>, State(services): State<crate::State>,
InsecureClientIp(client): InsecureClientIp, ClientIp(client): ClientIp,
body: Ruma<get_content_thumbnail::v3::Request>, body: Ruma<get_content_thumbnail::v3::Request>,
) -> Result<RumaResponse<get_content_thumbnail::v3::Response>> { ) -> Result<RumaResponse<get_content_thumbnail::v3::Response>> {
get_content_thumbnail_legacy_route(State(services), InsecureClientIp(client), body) get_content_thumbnail_legacy_route(State(services), ClientIp(client), body)
.await .await
.map(RumaResponse) .map(RumaResponse)
} }
+7 -17
View File
@@ -1,7 +1,8 @@
use axum::extract::State; use axum::extract::State;
use conduwuit::{Err, Result, matrix::pdu::PduBuilder}; use conduwuit::{Err, Result, matrix::pdu::PartialPdu};
use ruma::{ use ruma::{
api::client::membership::ban_user, api::client::membership::ban_user,
assign,
events::room::member::{MembershipState, RoomMemberEventContent}, events::room::member::{MembershipState, RoomMemberEventContent},
}; };
@@ -24,30 +25,19 @@ pub(crate) async fn ban_user_route(
return Err!(Request(UserSuspended("You cannot perform this action while suspended."))); return Err!(Request(UserSuspended("You cannot perform this action while suspended.")));
} }
let state_lock = services.rooms.state.mutex.lock(&body.room_id).await; let state_lock = services.rooms.state.mutex.lock(body.room_id.as_str()).await;
let current_member_content = services
.rooms
.state_accessor
.get_member(&body.room_id, &body.user_id)
.await
.unwrap_or_else(|_| RoomMemberEventContent::new(MembershipState::Ban));
services services
.rooms .rooms
.timeline .timeline
.build_and_append_pdu( .build_and_append_pdu(
PduBuilder::state(body.user_id.to_string(), &RoomMemberEventContent { PartialPdu::state(
membership: MembershipState::Ban, body.user_id.to_string(),
&assign!(RoomMemberEventContent::new(MembershipState::Ban), {
reason: body.reason.clone(), reason: body.reason.clone(),
displayname: None, // display name may be offensive
avatar_url: None, // avatar may be offensive
is_direct: None,
join_authorized_via_users_server: None,
third_party_invite: None,
redact_events: body.redact_events, redact_events: body.redact_events,
..current_member_content
}), }),
),
sender_user, sender_user,
Some(&body.room_id), Some(&body.room_id),
&state_lock, &state_lock,
+52 -39
View File
@@ -1,19 +1,20 @@
use axum::extract::State; use axum::extract::State;
use axum_client_ip::InsecureClientIp; use axum_client_ip::ClientIp;
use conduwuit::{ use conduwuit::{
Err, Result, debug_error, err, info, Err, Result, debug_error, err, info,
matrix::{event::gen_event_id_canonical_json, pdu::PduBuilder}, matrix::{event::gen_event_id_canonical_json, pdu::PartialPdu},
warn, warn,
}; };
use futures::FutureExt; use futures::FutureExt;
use ruma::{ use ruma::{
RoomId, UserId, RoomId, UserId,
api::{client::membership::invite_user, federation::membership::create_invite}, api::{
events::{ client::membership::invite_user::{self, v3::InviteUserId},
invite_permission_config::FilterLevel, federation::membership::create_invite,
room::member::{MembershipState, RoomMemberEventContent},
}, },
events::room::member::{MembershipState, RoomMemberEventContent},
}; };
use ruminuwuity::invite_permission_config::FilterLevel;
use service::Services; use service::Services;
use super::banned_room_check; use super::banned_room_check;
@@ -25,7 +26,7 @@
#[tracing::instrument(skip_all, fields(%client), name = "invite", level = "info")] #[tracing::instrument(skip_all, fields(%client), name = "invite", level = "info")]
pub(crate) async fn invite_user_route( pub(crate) async fn invite_user_route(
State(services): State<crate::State>, State(services): State<crate::State>,
InsecureClientIp(client): InsecureClientIp, ClientIp(client): ClientIp,
body: Ruma<invite_user::v3::Request>, body: Ruma<invite_user::v3::Request>,
) -> Result<invite_user::v3::Response> { ) -> Result<invite_user::v3::Response> {
let sender_user = body.sender_user(); let sender_user = body.sender_user();
@@ -51,7 +52,11 @@ pub(crate) async fn invite_user_route(
.await?; .await?;
match &body.recipient { match &body.recipient {
| invite_user::v3::InvitationRecipient::UserId { user_id: recipient_user } => { | invite_user::v3::InvitationRecipient::UserId(InviteUserId {
user_id: recipient_user,
reason,
..
}) => {
let sender_filter_level = services let sender_filter_level = services
.users .users
.invite_filter_level(recipient_user, sender_user) .invite_filter_level(recipient_user, sender_user)
@@ -59,7 +64,7 @@ pub(crate) async fn invite_user_route(
if !matches!(sender_filter_level, FilterLevel::Allow) { if !matches!(sender_filter_level, FilterLevel::Allow) {
// drop invites if the sender has the recipient filtered // drop invites if the sender has the recipient filtered
return Ok(invite_user::v3::Response {}); return Ok(invite_user::v3::Response::new());
} }
if let Ok(target_user_membership) = services if let Ok(target_user_membership) = services
@@ -95,13 +100,13 @@ pub(crate) async fn invite_user_route(
sender_user, sender_user,
recipient_user, recipient_user,
&body.room_id, &body.room_id,
body.reason.clone(), reason.clone(),
false, false,
) )
.boxed() .boxed()
.await?; .await?;
Ok(invite_user::v3::Response {}) Ok(invite_user::v3::Response::new())
}, },
| _ => { | _ => {
Err!(Request(NotFound("User not found."))) Err!(Request(NotFound("User not found.")))
@@ -141,25 +146,28 @@ pub(crate) async fn invite_helper(
let (pdu, pdu_json, invite_room_state) = { let (pdu, pdu_json, invite_room_state) = {
let state_lock = services.rooms.state.mutex.lock(room_id).await; let state_lock = services.rooms.state.mutex.lock(room_id).await;
let content = RoomMemberEventContent { let mut content = RoomMemberEventContent::new(MembershipState::Invite);
avatar_url: services.users.avatar_url(recipient_user).await.ok(), content.displayname = services.users.displayname(recipient_user).await.ok();
is_direct: Some(is_direct), content.avatar_url = services.users.avatar_url(recipient_user).await.ok();
reason, content.is_direct = Some(is_direct);
..RoomMemberEventContent::new(MembershipState::Invite) content.reason = reason;
};
let (pdu, pdu_json) = services let (pdu, pdu_json) = services
.rooms .rooms
.timeline .timeline
.create_hash_and_sign_event( .create_hash_and_sign_event(
PduBuilder::state(recipient_user.to_string(), &content), PartialPdu::state(recipient_user.to_string(), &content),
sender_user, sender_user,
Some(room_id), Some(room_id),
&state_lock, &state_lock,
) )
.await?; .await?;
let invite_room_state = services.rooms.state.summary_stripped(&pdu, room_id).await; let invite_room_state = services
.rooms
.state
.summary_stripped(&pdu, room_id, recipient_user)
.await;
drop(state_lock); drop(state_lock);
@@ -168,29 +176,36 @@ pub(crate) async fn invite_helper(
let room_version_id = services.rooms.state.get_room_version(room_id).await?; let room_version_id = services.rooms.state.get_room_version(room_id).await?;
let response = services let mut request = create_invite::v2::Request::new(
.sending room_id.to_owned(),
.send_federation_request(recipient_user.server_name(), create_invite::v2::Request { (*pdu.event_id).to_owned(),
room_id: room_id.to_owned(), room_version_id.clone(),
event_id: (*pdu.event_id).to_owned(), services
room_version: room_version_id.clone(),
event: services
.sending .sending
.convert_to_outgoing_federation_event(pdu_json.clone()) .convert_to_outgoing_federation_event(pdu_json.clone())
.await, .await,
invite_room_state, invite_room_state,
via: services );
request.via = services
.rooms .rooms
.state_cache .state_cache
.servers_route_via(room_id) .servers_route_via(room_id)
.await .await
.ok(), .ok();
})
let response = services
.sending
.send_federation_request(recipient_user.server_name(), request)
.await?; .await?;
// We do not add the event_id field to the pdu here because of signature and // We do not add the event_id field to the pdu here because of signature and
// hashes checks // hashes checks
let (event_id, value) = gen_event_id_canonical_json(&response.event, &room_version_id) let (event_id, value) = gen_event_id_canonical_json(
&response.event,
&room_version_id
.rules()
.expect("room version should have defined rules"),
)
.map_err(|e| { .map_err(|e| {
err!(Request(BadJson(warn!("Could not convert event to canonical JSON: {e}")))) err!(Request(BadJson(warn!("Could not convert event to canonical JSON: {e}"))))
})?; })?;
@@ -229,20 +244,18 @@ pub(crate) async fn invite_helper(
let state_lock = services.rooms.state.mutex.lock(room_id).await; let state_lock = services.rooms.state.mutex.lock(room_id).await;
let content = RoomMemberEventContent { let mut content = RoomMemberEventContent::new(MembershipState::Invite);
displayname: services.users.displayname(recipient_user).await.ok(), content.displayname = services.users.displayname(recipient_user).await.ok();
avatar_url: services.users.avatar_url(recipient_user).await.ok(), content.avatar_url = services.users.avatar_url(recipient_user).await.ok();
blurhash: services.users.blurhash(recipient_user).await.ok(), content.blurhash = services.users.blurhash(recipient_user).await.ok();
is_direct: Some(is_direct), content.is_direct = Some(is_direct);
reason, content.reason = reason;
..RoomMemberEventContent::new(MembershipState::Invite)
};
services services
.rooms .rooms
.timeline .timeline
.build_and_append_pdu( .build_and_append_pdu(
PduBuilder::state(recipient_user.to_string(), &content), PartialPdu::state(recipient_user.to_string(), &content),
sender_user, sender_user,
Some(room_id), Some(room_id),
&state_lock, &state_lock,
+61 -73
View File
@@ -1,13 +1,13 @@
use std::{borrow::Borrow, collections::HashMap, iter::once, sync::Arc}; use std::{borrow::Borrow, collections::HashMap, iter::once, sync::Arc};
use axum::extract::State; use axum::extract::State;
use axum_client_ip::InsecureClientIp; use axum_client_ip::ClientIp;
use conduwuit::{ use conduwuit::{
Err, Result, debug, debug_info, debug_warn, err, error, info, is_true, Err, Result, debug, debug_info, debug_warn, err, error, info, is_true,
matrix::{ matrix::{
StateKey, StateKey,
event::{gen_event_id, gen_event_id_canonical_json}, event::{gen_event_id, gen_event_id_canonical_json},
pdu::{PduBuilder, PduEvent}, pdu::{PartialPdu, PduEvent},
state_res, state_res,
}, },
result::FlatOk, result::FlatOk,
@@ -24,10 +24,8 @@
CanonicalJsonObject, CanonicalJsonValue, OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, CanonicalJsonObject, CanonicalJsonValue, OwnedRoomId, OwnedServerName, OwnedUserId, RoomId,
RoomVersionId, UserId, RoomVersionId, UserId,
api::{ api::{
client::{ client::membership::{join_room_by_id, join_room_by_id_or_alias},
error::ErrorKind, error::{ErrorKind, IncompatibleRoomVersionErrorData},
membership::{join_room_by_id, join_room_by_id_or_alias},
},
federation::{self}, federation::{self},
}, },
canonical_json::to_canonical_value, canonical_json::to_canonical_value,
@@ -67,7 +65,7 @@
#[tracing::instrument(skip_all, fields(%client), name = "join", level = "info")] #[tracing::instrument(skip_all, fields(%client), name = "join", level = "info")]
pub(crate) async fn join_room_by_id_route( pub(crate) async fn join_room_by_id_route(
State(services): State<crate::State>, State(services): State<crate::State>,
InsecureClientIp(client): InsecureClientIp, ClientIp(client): ClientIp,
body: Ruma<join_room_by_id::v3::Request>, body: Ruma<join_room_by_id::v3::Request>,
) -> Result<join_room_by_id::v3::Response> { ) -> Result<join_room_by_id::v3::Response> {
let sender_user = body.sender_user(); let sender_user = body.sender_user();
@@ -89,7 +87,6 @@ pub(crate) async fn join_room_by_id_route(
.rooms .rooms
.state_cache .state_cache
.servers_invite_via(&body.room_id) .servers_invite_via(&body.room_id)
.map(ToOwned::to_owned)
.collect() .collect()
.await; .await;
@@ -139,7 +136,7 @@ pub(crate) async fn join_room_by_id_route(
#[tracing::instrument(skip_all, fields(%client), name = "join", level = "info")] #[tracing::instrument(skip_all, fields(%client), name = "join", level = "info")]
pub(crate) async fn join_room_by_id_or_alias_route( pub(crate) async fn join_room_by_id_or_alias_route(
State(services): State<crate::State>, State(services): State<crate::State>,
InsecureClientIp(client): InsecureClientIp, ClientIp(client): ClientIp,
body: Ruma<join_room_by_id_or_alias::v3::Request>, body: Ruma<join_room_by_id_or_alias::v3::Request>,
) -> Result<join_room_by_id_or_alias::v3::Response> { ) -> Result<join_room_by_id_or_alias::v3::Response> {
let sender_user = body.sender_user(); let sender_user = body.sender_user();
@@ -169,7 +166,6 @@ pub(crate) async fn join_room_by_id_or_alias_route(
.rooms .rooms
.state_cache .state_cache
.servers_invite_via(&room_id) .servers_invite_via(&room_id)
.map(ToOwned::to_owned)
.collect::<Vec<_>>() .collect::<Vec<_>>()
.await, .await,
); );
@@ -210,11 +206,7 @@ pub(crate) async fn join_room_by_id_or_alias_route(
) )
.await?; .await?;
let addl_via_servers = services let addl_via_servers = services.rooms.state_cache.servers_invite_via(&room_id);
.rooms
.state_cache
.servers_invite_via(&room_id)
.map(ToOwned::to_owned);
let addl_state_servers = services let addl_state_servers = services
.rooms .rooms
@@ -227,7 +219,7 @@ pub(crate) async fn join_room_by_id_or_alias_route(
.iter() .iter()
.map(|event| event.get_field("sender")) .map(|event| event.get_field("sender"))
.filter_map(FlatOk::flat_ok) .filter_map(FlatOk::flat_ok)
.map(|user: &UserId| user.server_name().to_owned()) .map(|user: OwnedUserId| user.server_name().to_owned())
.stream() .stream()
.chain(addl_via_servers) .chain(addl_via_servers)
.collect() .collect()
@@ -254,7 +246,7 @@ pub(crate) async fn join_room_by_id_or_alias_route(
.boxed() .boxed()
.await?; .await?;
Ok(join_room_by_id_or_alias::v3::Response { room_id: join_room_response.room_id }) Ok(join_room_by_id_or_alias::v3::Response::new(join_room_response.room_id))
} }
pub async fn join_room_by_id_helper( pub async fn join_room_by_id_helper(
@@ -285,7 +277,7 @@ pub async fn join_room_by_id_helper(
.await .await
{ {
debug_warn!("{sender_user} is already joined in {room_id}"); debug_warn!("{sender_user} is already joined in {room_id}");
return Ok(join_room_by_id::v3::Response { room_id: room_id.into() }); return Ok(join_room_by_id::v3::Response::new(room_id.to_owned()));
} }
if let Err(e) = services if let Err(e) = services
@@ -385,13 +377,14 @@ async fn join_room_by_id_helper_remote(
info!("make_join finished"); info!("make_join finished");
let room_version_id = make_join_response.room_version.unwrap_or(RoomVersionId::V1); let room_version = make_join_response.room_version.unwrap_or(RoomVersionId::V1);
let room_version_rules = room_version
.rules()
.expect("room version should have defined rules");
if !services.server.supported_room_version(&room_version_id) { if !services.server.supported_room_version(&room_version) {
// How did we get here? // How did we get here?
return Err!(BadServerResponse( return Err!(BadServerResponse("Remote room version {room_version} is not supported"));
"Remote room version {room_version_id} is not supported by conduwuit"
));
} }
let mut join_event_stub: CanonicalJsonObject = let mut join_event_stub: CanonicalJsonObject =
@@ -403,7 +396,7 @@ async fn join_room_by_id_helper_remote(
let join_authorized_via_users_server = { let join_authorized_via_users_server = {
use RoomVersionId::*; use RoomVersionId::*;
if !matches!(room_version_id, V1 | V2 | V3 | V4 | V5 | V6 | V7) { if !matches!(room_version, V1 | V2 | V3 | V4 | V5 | V6 | V7) {
join_event_stub join_event_stub
.get("content") .get("content")
.map(|s| { .map(|s| {
@@ -425,36 +418,32 @@ async fn join_room_by_id_helper_remote(
.expect("Timestamp is valid js_int value"), .expect("Timestamp is valid js_int value"),
), ),
); );
let mut join_content = RoomMemberEventContent::new(MembershipState::Join);
join_content.displayname = services.users.displayname(sender_user).await.ok();
join_content.avatar_url = services.users.avatar_url(sender_user).await.ok();
join_content.blurhash = services.users.blurhash(sender_user).await.ok();
join_content.reason = reason;
join_content
.join_authorized_via_users_server
.clone_from(&join_authorized_via_users_server);
join_event_stub.insert( join_event_stub.insert(
"content".to_owned(), "content".to_owned(),
to_canonical_value(RoomMemberEventContent { to_canonical_value(join_content).expect("event is valid, we just created it"),
displayname: services.users.displayname(sender_user).await.ok(),
avatar_url: services.users.avatar_url(sender_user).await.ok(),
blurhash: services.users.blurhash(sender_user).await.ok(),
reason,
join_authorized_via_users_server: join_authorized_via_users_server.clone(),
..RoomMemberEventContent::new(MembershipState::Join)
})
.expect("event is valid, we just created it"),
); );
// We keep the "event_id" in the pdu only in v1 or // Remove event id if it exists
// v2 rooms
match room_version_id {
| RoomVersionId::V1 | RoomVersionId::V2 => {},
| _ => {
join_event_stub.remove("event_id"); join_event_stub.remove("event_id");
},
}
// In order to create a compatible ref hash (EventID) the `hashes` field needs // In order to create a compatible ref hash (EventID) the `hashes` field needs
// to be present // to be present
services services
.server_keys .server_keys
.hash_and_sign_event(&mut join_event_stub, &room_version_id)?; .hash_and_sign_event(&mut join_event_stub, &room_version_rules)?;
// Generate event id // Generate event id
let event_id = gen_event_id(&join_event_stub, &room_version_id)?; let event_id = gen_event_id(&join_event_stub, &room_version_rules)?;
// Add event_id back // Add event_id back
join_event_stub join_event_stub
@@ -464,15 +453,14 @@ async fn join_room_by_id_helper_remote(
let mut join_event = join_event_stub; let mut join_event = join_event_stub;
info!("Asking {remote_server} for send_join in room {room_id}"); info!("Asking {remote_server} for send_join in room {room_id}");
let send_join_request = federation::membership::create_join_event::v2::Request { let send_join_request = federation::membership::create_join_event::v2::Request::new(
room_id: room_id.to_owned(), room_id.to_owned(),
event_id: event_id.clone(), event_id.clone(),
omit_members: false, services
pdu: services
.sending .sending
.convert_to_outgoing_federation_event(join_event.clone()) .convert_to_outgoing_federation_event(join_event.clone())
.await, .await,
}; );
let send_join_response = match services let send_join_response = match services
.sending .sending
@@ -496,7 +484,7 @@ async fn join_room_by_id_helper_remote(
); );
let (signed_event_id, signed_value) = let (signed_event_id, signed_value) =
gen_event_id_canonical_json(signed_raw, &room_version_id).map_err(|e| { gen_event_id_canonical_json(signed_raw, &room_version_rules).map_err(|e| {
err!(Request(BadJson(warn!( err!(Request(BadJson(warn!(
"Could not convert event to canonical JSON: {e}" "Could not convert event to canonical JSON: {e}"
)))) ))))
@@ -571,7 +559,7 @@ async fn join_room_by_id_helper_remote(
.then(|pdu| { .then(|pdu| {
services services
.server_keys .server_keys
.validate_and_add_event_id_no_fetch(pdu, &room_version_id) .validate_and_add_event_id_no_fetch(pdu, &room_version_rules)
.inspect_err(|e| { .inspect_err(|e| {
debug_warn!("Could not validate send_join response room_state event: {e:?}"); debug_warn!("Could not validate send_join response room_state event: {e:?}");
}) })
@@ -619,7 +607,7 @@ async fn join_room_by_id_helper_remote(
.then(|pdu| { .then(|pdu| {
services services
.server_keys .server_keys
.validate_and_add_event_id_no_fetch(pdu, &room_version_id) .validate_and_add_event_id_no_fetch(pdu, &room_version_rules)
}) })
.ready_filter_map(Result::ok) .ready_filter_map(Result::ok)
.ready_for_each(|(event_id, value)| { .ready_for_each(|(event_id, value)| {
@@ -640,7 +628,7 @@ async fn join_room_by_id_helper_remote(
}; };
let auth_check = state_res::event_auth::auth_check( let auth_check = state_res::event_auth::auth_check(
&state_res::RoomVersion::new(&room_version_id)?, &room_version.rules().unwrap(),
&parsed_join_pdu, &parsed_join_pdu,
None, // TODO: third party invite None, // TODO: third party invite
|k, s| state_fetch(k.clone(), s.into()), |k, s| state_fetch(k.clone(), s.into()),
@@ -747,8 +735,7 @@ async fn join_room_by_id_helper_local(
// This is a restricted room, check if we can complete the join requirements // This is a restricted room, check if we can complete the join requirements
// locally. // locally.
let needs_auth_user = let needs_auth_user =
user_can_perform_restricted_join(services, sender_user, room_id, &room_version) user_can_perform_restricted_join(services, sender_user, room_id).await;
.await;
if needs_auth_user.is_ok_and(is_true!()) { if needs_auth_user.is_ok_and(is_true!()) {
// If there was an error or the value is false, we'll try joining over // If there was an error or the value is false, we'll try joining over
// federation. Since it's Ok(true), we can authorise this locally. // federation. Since it's Ok(true), we can authorise this locally.
@@ -761,21 +748,19 @@ async fn join_room_by_id_helper_local(
} }
} }
let content = RoomMemberEventContent { let mut content = RoomMemberEventContent::new(MembershipState::Join);
displayname: services.users.displayname(sender_user).await.ok(), content.displayname = services.users.displayname(sender_user).await.ok();
avatar_url: services.users.avatar_url(sender_user).await.ok(), content.avatar_url = services.users.avatar_url(sender_user).await.ok();
blurhash: services.users.blurhash(sender_user).await.ok(), content.blurhash = services.users.blurhash(sender_user).await.ok();
reason: reason.clone(), content.reason.clone_from(&reason);
join_authorized_via_users_server: auth_user, content.join_authorized_via_users_server = auth_user;
..RoomMemberEventContent::new(MembershipState::Join)
};
// Try normal join first // Try normal join first
let Err(error) = services let Err(error) = services
.rooms .rooms
.timeline .timeline
.build_and_append_pdu( .build_and_append_pdu(
PduBuilder::state(sender_user.to_string(), &content), PartialPdu::state(sender_user.to_string(), &content),
sender_user, sender_user,
Some(room_id), Some(room_id),
&state_lock, &state_lock,
@@ -824,16 +809,16 @@ async fn make_join_request(
"Asking {remote_server} for make_join (attempt {make_join_counter}/{})", "Asking {remote_server} for make_join (attempt {make_join_counter}/{})",
servers.len() servers.len()
); );
let mut request = federation::membership::prepare_join_event::v1::Request::new(
room_id.to_owned(),
sender_user.to_owned(),
);
request.ver = services.server.supported_room_versions().collect();
let make_join_response = services let make_join_response = services
.sending .sending
.send_federation_request( .send_federation_request(remote_server, request)
remote_server,
federation::membership::prepare_join_event::v1::Request {
room_id: room_id.to_owned(),
user_id: sender_user.to_owned(),
ver: services.server.supported_room_versions().collect(),
},
)
.await; .await;
trace!("make_join response: {:?}", make_join_response); trace!("make_join response: {:?}", make_join_response);
@@ -866,14 +851,17 @@ async fn make_join_request(
rules, but is unable to authorise a join for us. Will continue trying." rules, but is unable to authorise a join for us. Will continue trying."
); );
}, },
| ErrorKind::IncompatibleRoomVersion { room_version } => { | ErrorKind::IncompatibleRoomVersion(IncompatibleRoomVersionErrorData {
room_version,
..
}) => {
warn!( warn!(
"{remote_server} reports the room we are trying to join is \ "{remote_server} reports the room we are trying to join is \
v{room_version}, which we do not support." v{room_version}, which we do not support."
); );
return Err(e); return Err(e);
}, },
| ErrorKind::Forbidden { .. } => { | ErrorKind::Forbidden => {
warn!("{remote_server} refuses to let us join: {e}."); warn!("{remote_server} refuses to let us join: {e}.");
return Err(e); return Err(e);
}, },
+18 -25
View File
@@ -1,7 +1,8 @@
use axum::extract::State; use axum::extract::State;
use conduwuit::{Err, Result, matrix::pdu::PduBuilder}; use conduwuit::{Err, Result, matrix::pdu::PartialPdu};
use ruma::{ use ruma::{
api::client::membership::kick_user, api::client::membership::kick_user,
assign,
events::room::member::{MembershipState, RoomMemberEventContent}, events::room::member::{MembershipState, RoomMemberEventContent},
}; };
@@ -18,41 +19,33 @@ pub(crate) async fn kick_user_route(
if services.users.is_suspended(sender_user).await? { if services.users.is_suspended(sender_user).await? {
return Err!(Request(UserSuspended("You cannot perform this action while suspended."))); return Err!(Request(UserSuspended("You cannot perform this action while suspended.")));
} }
let state_lock = services.rooms.state.mutex.lock(&body.room_id).await; let state_lock = services.rooms.state.mutex.lock(body.room_id.as_str()).await;
let Ok(event) = services if !services
.rooms .rooms
.state_accessor .state_cache
.get_member(&body.room_id, &body.user_id) .user_membership(&body.user_id, &body.room_id)
.await .await
else { .is_some_and(|membership| {
// copy synapse's behaviour of returning 200 without any change to the state matches!(
// instead of erroring on left users membership,
return Ok(kick_user::v3::Response::new()); MembershipState::Invite | MembershipState::Join | MembershipState::Knock
}; )
}) {
if !matches!( return Err!(Request(Forbidden("You cannot kick users who are not in the room.")));
event.membership,
MembershipState::Invite | MembershipState::Knock | MembershipState::Join,
) {
return Err!(Request(Forbidden(
"Cannot kick a user who is not apart of the room (current membership: {})",
event.membership
)));
} }
services services
.rooms .rooms
.timeline .timeline
.build_and_append_pdu( .build_and_append_pdu(
PduBuilder::state(body.user_id.to_string(), &RoomMemberEventContent { PartialPdu::state(
membership: MembershipState::Leave, body.user_id.to_string(),
&assign!(RoomMemberEventContent::new(MembershipState::Leave), {
reason: body.reason.clone(), reason: body.reason.clone(),
is_direct: None, redact_events: body.redact_events,
join_authorized_via_users_server: None,
third_party_invite: None,
..event
}), }),
),
sender_user, sender_user,
Some(&body.room_id), Some(&body.room_id),
&state_lock, &state_lock,
+75 -85
View File
@@ -1,12 +1,12 @@
use std::{borrow::Borrow, collections::HashMap, iter::once, sync::Arc}; use std::{borrow::Borrow, collections::HashMap, iter::once, sync::Arc};
use axum::extract::State; use axum::extract::State;
use axum_client_ip::InsecureClientIp; use axum_client_ip::ClientIp;
use conduwuit::{ use conduwuit::{
Err, Result, debug, debug_info, debug_warn, err, info, Err, Result, debug, debug_info, debug_warn, err, info,
matrix::{ matrix::{
event::gen_event_id, event::gen_event_id,
pdu::{PduBuilder, PduEvent}, pdu::{PartialPdu, PduEvent},
}, },
result::FlatOk, result::FlatOk,
trace, trace,
@@ -15,8 +15,8 @@
}; };
use futures::{FutureExt, StreamExt}; use futures::{FutureExt, StreamExt};
use ruma::{ use ruma::{
CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, OwnedRoomId, OwnedServerName, RoomId, CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, OwnedRoomId, OwnedServerName,
RoomVersionId, UserId, OwnedUserId, RoomId, UserId,
api::{ api::{
client::knock::knock_room, client::knock::knock_room,
federation::{self}, federation::{self},
@@ -47,7 +47,7 @@
#[tracing::instrument(skip_all, fields(%client), name = "knock", level = "info")] #[tracing::instrument(skip_all, fields(%client), name = "knock", level = "info")]
pub(crate) async fn knock_room_route( pub(crate) async fn knock_room_route(
State(services): State<crate::State>, State(services): State<crate::State>,
InsecureClientIp(client): InsecureClientIp, ClientIp(client): ClientIp,
body: Ruma<knock_room::v3::Request>, body: Ruma<knock_room::v3::Request>,
) -> Result<knock_room::v3::Response> { ) -> Result<knock_room::v3::Response> {
let sender_user = body.sender_user(); let sender_user = body.sender_user();
@@ -73,7 +73,6 @@ pub(crate) async fn knock_room_route(
.rooms .rooms
.state_cache .state_cache
.servers_invite_via(&room_id) .servers_invite_via(&room_id)
.map(ToOwned::to_owned)
.collect::<Vec<_>>() .collect::<Vec<_>>()
.await, .await,
); );
@@ -113,11 +112,7 @@ pub(crate) async fn knock_room_route(
) )
.await?; .await?;
let addl_via_servers = services let addl_via_servers = services.rooms.state_cache.servers_invite_via(&room_id);
.rooms
.state_cache
.servers_invite_via(&room_id)
.map(ToOwned::to_owned);
let addl_state_servers = services let addl_state_servers = services
.rooms .rooms
@@ -130,7 +125,7 @@ pub(crate) async fn knock_room_route(
.iter() .iter()
.map(|event| event.get_field("sender")) .map(|event| event.get_field("sender"))
.filter_map(FlatOk::flat_ok) .filter_map(FlatOk::flat_ok)
.map(|user: &UserId| user.server_name().to_owned()) .map(|user: OwnedUserId| user.server_name().to_owned())
.stream() .stream()
.chain(addl_via_servers) .chain(addl_via_servers)
.collect() .collect()
@@ -188,7 +183,7 @@ async fn knock_room_by_id_helper(
.await .await
{ {
debug_warn!("{sender_user} is already knocked in {room_id}"); debug_warn!("{sender_user} is already knocked in {room_id}");
return Ok(knock_room::v3::Response { room_id: room_id.into() }); return Ok(knock_room::v3::Response::new(room_id.into()));
} }
if let Ok(membership) = services if let Ok(membership) = services
@@ -339,34 +334,27 @@ async fn knock_room_helper_local(
) -> Result { ) -> Result {
debug_info!("We can knock locally"); debug_info!("We can knock locally");
let room_version_id = services.rooms.state.get_room_version(room_id).await?; let room_version = services.rooms.state.get_room_version(room_id).await?;
let room_version_rules = room_version
.rules()
.expect("room version should have defined rules");
if matches!( if !room_version_rules.authorization.knocking {
room_version_id,
RoomVersionId::V1
| RoomVersionId::V2
| RoomVersionId::V3
| RoomVersionId::V4
| RoomVersionId::V5
| RoomVersionId::V6
) {
return Err!(Request(Forbidden("This room does not support knocking."))); return Err!(Request(Forbidden("This room does not support knocking.")));
} }
let content = RoomMemberEventContent { let mut content = RoomMemberEventContent::new(MembershipState::Knock);
displayname: services.users.displayname(sender_user).await.ok(), content.displayname = services.users.displayname(sender_user).await.ok();
avatar_url: services.users.avatar_url(sender_user).await.ok(), content.avatar_url = services.users.avatar_url(sender_user).await.ok();
blurhash: services.users.blurhash(sender_user).await.ok(), content.blurhash = services.users.blurhash(sender_user).await.ok();
reason: reason.clone(), content.reason.clone_from(&reason.clone());
..RoomMemberEventContent::new(MembershipState::Knock)
};
// Try normal knock first // Try normal knock first
let Err(error) = services let Err(error) = services
.rooms .rooms
.timeline .timeline
.build_and_append_pdu( .build_and_append_pdu(
PduBuilder::state(sender_user.to_string(), &content), PartialPdu::state(sender_user.to_string(), &content),
sender_user, sender_user,
Some(room_id), Some(room_id),
&state_lock, &state_lock,
@@ -381,19 +369,18 @@ async fn knock_room_helper_local(
return Err(error); return Err(error);
} }
warn!("We couldn't do the knock locally, maybe federation can help to satisfy the knock");
let (make_knock_response, remote_server) = let (make_knock_response, remote_server) =
make_knock_request(services, sender_user, room_id, servers).await?; make_knock_request(services, sender_user, room_id, servers).await?;
info!("make_knock finished"); info!("make_knock finished");
let room_version_id = make_knock_response.room_version; let room_version = make_knock_response.room_version;
let room_version_rules = room_version
.rules()
.expect("room version should have defined rules");
if !services.server.supported_room_version(&room_version_id) { if !services.server.supported_room_version(&room_version) {
return Err!(BadServerResponse( return Err!(BadServerResponse("Remote room version {room_version} is not supported"));
"Remote room version {room_version_id} is not supported by conduwuit"
));
} }
let mut knock_event_stub = serde_json::from_str::<CanonicalJsonObject>( let mut knock_event_stub = serde_json::from_str::<CanonicalJsonObject>(
@@ -424,24 +411,17 @@ async fn knock_room_helper_local(
); );
knock_event_stub.insert( knock_event_stub.insert(
"content".to_owned(), "content".to_owned(),
to_canonical_value(RoomMemberEventContent { to_canonical_value(content).expect("event is valid, we just created it"),
displayname: services.users.displayname(sender_user).await.ok(),
avatar_url: services.users.avatar_url(sender_user).await.ok(),
blurhash: services.users.blurhash(sender_user).await.ok(),
reason,
..RoomMemberEventContent::new(MembershipState::Knock)
})
.expect("event is valid, we just created it"),
); );
// In order to create a compatible ref hash (EventID) the `hashes` field needs // In order to create a compatible ref hash (EventID) the `hashes` field needs
// to be present // to be present
services services
.server_keys .server_keys
.hash_and_sign_event(&mut knock_event_stub, &room_version_id)?; .hash_and_sign_event(&mut knock_event_stub, &room_version_rules)?;
// Generate event id // Generate event id
let event_id = gen_event_id(&knock_event_stub, &room_version_id)?; let event_id = gen_event_id(&knock_event_stub, &room_version_rules)?;
// Add event_id // Add event_id
knock_event_stub knock_event_stub
@@ -451,14 +431,14 @@ async fn knock_room_helper_local(
let knock_event = knock_event_stub; let knock_event = knock_event_stub;
info!("Asking {remote_server} for send_knock in room {room_id}"); info!("Asking {remote_server} for send_knock in room {room_id}");
let send_knock_request = federation::knock::send_knock::v1::Request { let send_knock_request = federation::membership::create_knock_event::v1::Request::new(
room_id: room_id.to_owned(), room_id.to_owned(),
event_id: event_id.clone(), event_id.clone(),
pdu: services services
.sending .sending
.convert_to_outgoing_federation_event(knock_event.clone()) .convert_to_outgoing_federation_event(knock_event.clone())
.await, .await,
}; );
services services
.sending .sending
@@ -520,12 +500,13 @@ async fn knock_room_helper_remote(
info!("make_knock finished"); info!("make_knock finished");
let room_version_id = make_knock_response.room_version; let room_version = make_knock_response.room_version;
let room_version_rules = room_version
.rules()
.expect("room version should have defined rules");
if !services.server.supported_room_version(&room_version_id) { if !services.server.supported_room_version(&room_version) {
return Err!(BadServerResponse( return Err!(BadServerResponse("Remote room version {room_version} is not supported"));
"Remote room version {room_version_id} is not supported by conduwuit"
));
} }
let mut knock_event_stub: CanonicalJsonObject = let mut knock_event_stub: CanonicalJsonObject =
@@ -545,26 +526,26 @@ async fn knock_room_helper_remote(
.expect("Timestamp is valid js_int value"), .expect("Timestamp is valid js_int value"),
), ),
); );
let mut knock_content = RoomMemberEventContent::new(MembershipState::Knock);
knock_content.displayname = services.users.displayname(sender_user).await.ok();
knock_content.avatar_url = services.users.avatar_url(sender_user).await.ok();
knock_content.blurhash = services.users.blurhash(sender_user).await.ok();
knock_content.reason = reason;
knock_event_stub.insert( knock_event_stub.insert(
"content".to_owned(), "content".to_owned(),
to_canonical_value(RoomMemberEventContent { to_canonical_value(knock_content).expect("event is valid, we just created it"),
displayname: services.users.displayname(sender_user).await.ok(),
avatar_url: services.users.avatar_url(sender_user).await.ok(),
blurhash: services.users.blurhash(sender_user).await.ok(),
reason,
..RoomMemberEventContent::new(MembershipState::Knock)
})
.expect("event is valid, we just created it"),
); );
// In order to create a compatible ref hash (EventID) the `hashes` field needs // In order to create a compatible ref hash (EventID) the `hashes` field needs
// to be present // to be present
services services
.server_keys .server_keys
.hash_and_sign_event(&mut knock_event_stub, &room_version_id)?; .hash_and_sign_event(&mut knock_event_stub, &room_version_rules)?;
// Generate event id // Generate event id
let event_id = gen_event_id(&knock_event_stub, &room_version_id)?; let event_id = gen_event_id(&knock_event_stub, &room_version_rules)?;
// Add event_id // Add event_id
knock_event_stub knock_event_stub
@@ -574,18 +555,18 @@ async fn knock_room_helper_remote(
let knock_event = knock_event_stub; let knock_event = knock_event_stub;
info!("Asking {remote_server} for send_knock in room {room_id}"); info!("Asking {remote_server} for send_knock in room {room_id}");
let send_knock_request = federation::knock::send_knock::v1::Request { let request = federation::membership::create_knock_event::v1::Request::new(
room_id: room_id.to_owned(), room_id.to_owned(),
event_id: event_id.clone(), event_id.clone(),
pdu: services services
.sending .sending
.convert_to_outgoing_federation_event(knock_event.clone()) .convert_to_outgoing_federation_event(knock_event.clone())
.await, .await,
}; );
let send_knock_response = services let send_knock_response = services
.sending .sending
.send_federation_request(&remote_server, send_knock_request) .send_federation_request(&remote_server, request)
.await?; .await?;
info!("send_knock finished"); info!("send_knock finished");
@@ -604,7 +585,17 @@ async fn knock_room_helper_remote(
let state = send_knock_response let state = send_knock_response
.knock_room_state .knock_room_state
.iter() .iter()
.map(|event| serde_json::from_str::<CanonicalJsonObject>(event.clone().into_json().get())) .map(|event| {
#[allow(deprecated)]
let raw_value = match event {
| federation::membership::RawStrippedState::Stripped(raw_state) =>
&raw_state.clone().into_json(),
| federation::membership::RawStrippedState::Pdu(raw_value) => raw_value,
| _ => panic!("unknown raw stripped state type"),
};
serde_json::from_str::<CanonicalJsonObject>(raw_value.get())
})
.filter_map(Result::ok); .filter_map(Result::ok);
let mut state_map: HashMap<u64, OwnedEventId> = HashMap::new(); let mut state_map: HashMap<u64, OwnedEventId> = HashMap::new();
@@ -629,7 +620,7 @@ async fn knock_room_helper_remote(
continue; continue;
}; };
let event_id = gen_event_id(&event, &room_version_id)?; let event_id = gen_event_id(&event, &room_version_rules)?;
let shortstatekey = services let shortstatekey = services
.rooms .rooms
.short .short
@@ -709,7 +700,7 @@ async fn make_knock_request(
sender_user: &UserId, sender_user: &UserId,
room_id: &RoomId, room_id: &RoomId,
servers: &[OwnedServerName], servers: &[OwnedServerName],
) -> Result<(federation::knock::create_knock_event_template::v1::Response, OwnedServerName)> { ) -> Result<(federation::membership::prepare_knock_event::v1::Response, OwnedServerName)> {
let mut make_knock_response_and_server = let mut make_knock_response_and_server =
Err!(BadServerResponse("No server available to assist in knocking.")); Err!(BadServerResponse("No server available to assist in knocking."));
@@ -722,16 +713,15 @@ async fn make_knock_request(
info!("Asking {remote_server} for make_knock ({make_knock_counter})"); info!("Asking {remote_server} for make_knock ({make_knock_counter})");
let mut request = federation::membership::prepare_knock_event::v1::Request::new(
room_id.to_owned(),
sender_user.to_owned(),
);
request.ver = services.server.supported_room_versions().collect();
let make_knock_response = services let make_knock_response = services
.sending .sending
.send_federation_request( .send_federation_request(remote_server, request)
remote_server,
federation::knock::create_knock_event_template::v1::Request {
room_id: room_id.to_owned(),
user_id: sender_user.to_owned(),
ver: services.server.supported_room_versions().collect(),
},
)
.await; .await;
trace!("make_knock response: {make_knock_response:?}"); trace!("make_knock response: {make_knock_response:?}");
+26 -33
View File
@@ -3,13 +3,13 @@
use axum::extract::State; use axum::extract::State;
use conduwuit::{ use conduwuit::{
Err, Pdu, Result, debug_info, debug_warn, err, Err, Pdu, Result, debug_info, debug_warn, err,
matrix::{event::gen_event_id, pdu::PduBuilder}, matrix::{event::gen_event_id, pdu::PartialPdu},
utils::{self, FutureBoolExt, future::ReadyEqExt}, utils::{self, FutureBoolExt, future::ReadyEqExt},
warn, warn,
}; };
use futures::{FutureExt, StreamExt, pin_mut}; use futures::{FutureExt, StreamExt, pin_mut};
use ruma::{ use ruma::{
CanonicalJsonObject, CanonicalJsonValue, OwnedServerName, RoomId, RoomVersionId, UserId, CanonicalJsonObject, CanonicalJsonValue, OwnedServerName, RoomId, UserId,
api::{ api::{
client::membership::leave_room, client::membership::leave_room,
federation::{self}, federation::{self},
@@ -42,11 +42,7 @@ pub(crate) async fn leave_room_route(
// Make a user leave all their joined rooms, rescinds knocks, forgets all rooms, // Make a user leave all their joined rooms, rescinds knocks, forgets all rooms,
// and ignores errors // and ignores errors
pub async fn leave_all_rooms(services: &Services, user_id: &UserId) { pub async fn leave_all_rooms(services: &Services, user_id: &UserId) {
let rooms_joined = services let rooms_joined = services.rooms.state_cache.rooms_joined(user_id);
.rooms
.state_cache
.rooms_joined(user_id)
.map(ToOwned::to_owned);
let rooms_invited = services let rooms_invited = services
.rooms .rooms
@@ -142,18 +138,17 @@ pub async fn leave_room(
.await; .await;
match user_member_event_content { match user_member_event_content {
| Ok(content) => { | Ok(mut content) => {
content.membership = MembershipState::Leave;
content.reason = reason;
content.join_authorized_via_users_server = None;
content.is_direct = None;
services services
.rooms .rooms
.timeline .timeline
.build_and_append_pdu( .build_and_append_pdu(
PduBuilder::state(user_id.to_string(), &RoomMemberEventContent { PartialPdu::state(user_id.to_string(), &content),
membership: MembershipState::Leave,
reason,
join_authorized_via_users_server: None,
is_direct: None,
..content
}),
user_id, user_id,
Some(room_id), Some(room_id),
&state_lock, &state_lock,
@@ -226,7 +221,6 @@ pub async fn remote_leave_room<S: ::std::hash::BuildHasher>(
.rooms .rooms
.state_cache .state_cache
.servers_invite_via(room_id) .servers_invite_via(room_id)
.map(ToOwned::to_owned)
.collect::<HashSet<OwnedServerName>>() .collect::<HashSet<OwnedServerName>>()
.await, .await,
); );
@@ -260,7 +254,7 @@ pub async fn remote_leave_room<S: ::std::hash::BuildHasher>(
.filter_map(|event| event.get_field("sender").ok().flatten()) .filter_map(|event| event.get_field("sender").ok().flatten())
.filter_map(|sender: &str| UserId::parse(sender).ok()) .filter_map(|sender: &str| UserId::parse(sender).ok())
.filter_map(|sender| { .filter_map(|sender| {
if !services.globals.user_is_local(sender) { if !services.globals.user_is_local(&sender) {
Some(sender.server_name().to_owned()) Some(sender.server_name().to_owned())
} else { } else {
None None
@@ -289,10 +283,10 @@ pub async fn remote_leave_room<S: ::std::hash::BuildHasher>(
.sending .sending
.send_federation_request( .send_federation_request(
remote_server.as_ref(), remote_server.as_ref(),
federation::membership::prepare_leave_event::v1::Request { federation::membership::prepare_leave_event::v1::Request::new(
room_id: room_id.to_owned(), room_id.to_owned(),
user_id: user_id.to_owned(), user_id.to_owned(),
}, ),
) )
.await; .await;
@@ -329,6 +323,10 @@ pub async fn remote_leave_room<S: ::std::hash::BuildHasher>(
))); )));
} }
let room_version_rules = room_version_id
.rules()
.expect("room version should have defined rules");
let mut leave_event_stub = serde_json::from_str::<CanonicalJsonObject>( let mut leave_event_stub = serde_json::from_str::<CanonicalJsonObject>(
make_leave_response.event.get(), make_leave_response.event.get(),
) )
@@ -366,21 +364,16 @@ pub async fn remote_leave_room<S: ::std::hash::BuildHasher>(
} }
// room v3 and above removed the "event_id" field from remote PDU format // room v3 and above removed the "event_id" field from remote PDU format
match room_version_id {
| RoomVersionId::V1 | RoomVersionId::V2 => {},
| _ => {
leave_event_stub.remove("event_id"); leave_event_stub.remove("event_id");
},
}
// In order to create a compatible ref hash (EventID) the `hashes` field needs // In order to create a compatible ref hash (EventID) the `hashes` field needs
// to be present // to be present
services services
.server_keys .server_keys
.hash_and_sign_event(&mut leave_event_stub, &room_version_id)?; .hash_and_sign_event(&mut leave_event_stub, &room_version_rules)?;
// Generate event id // Generate event id
let event_id = gen_event_id(&leave_event_stub, &room_version_id)?; let event_id = gen_event_id(&leave_event_stub, &room_version_rules)?;
// Add event_id back // Add event_id back
leave_event_stub leave_event_stub
@@ -393,14 +386,14 @@ pub async fn remote_leave_room<S: ::std::hash::BuildHasher>(
.sending .sending
.send_federation_request( .send_federation_request(
&remote_server, &remote_server,
federation::membership::create_leave_event::v2::Request { federation::membership::create_leave_event::v2::Request::new(
room_id: room_id.to_owned(), room_id.to_owned(),
event_id: event_id.clone(), event_id.clone(),
pdu: services services
.sending .sending
.convert_to_outgoing_federation_event(leave_event.clone()) .convert_to_outgoing_federation_event(leave_event.clone())
.await, .await,
}, ),
) )
.await?; .await?;
+24 -48
View File
@@ -9,7 +9,7 @@
use futures::{FutureExt, StreamExt, future::join}; use futures::{FutureExt, StreamExt, future::join};
use ruma::{ use ruma::{
api::client::membership::{ api::client::membership::{
get_member_events::{self, v3::MembershipEventFilter}, get_member_events::{self},
joined_members::{self, v3::RoomMember}, joined_members::{self, v3::RoomMember},
}, },
events::{ events::{
@@ -43,8 +43,7 @@ pub(crate) async fn get_member_events_route(
return Err!(Request(Forbidden("You don't have permission to view this room."))); return Err!(Request(Forbidden("You don't have permission to view this room.")));
} }
Ok(get_member_events::v3::Response { let chunk = services
chunk: services
.rooms .rooms
.state_accessor .state_accessor
.room_state_full(&body.room_id) .room_state_full(&body.room_id)
@@ -55,8 +54,9 @@ pub(crate) async fn get_member_events_route(
.map(Event::into_format) .map(Event::into_format)
.collect() .collect()
.boxed() .boxed()
.await, .await;
})
Ok(get_member_events::v3::Response::new(chunk))
} }
/// # `POST /_matrix/client/r0/rooms/{roomId}/joined_members` /// # `POST /_matrix/client/r0/rooms/{roomId}/joined_members`
@@ -78,70 +78,46 @@ pub(crate) async fn joined_members_route(
return Err!(Request(Forbidden("You don't have permission to view this room."))); return Err!(Request(Forbidden("You don't have permission to view this room.")));
} }
Ok(joined_members::v3::Response { let joined = services
joined: services
.rooms .rooms
.state_cache .state_cache
.room_members(&body.room_id) .room_members(&body.room_id)
.map(ToOwned::to_owned)
.broad_then(|user_id| async move { .broad_then(|user_id| async move {
let mut member = RoomMember::new();
let (display_name, avatar_url) = join( let (display_name, avatar_url) = join(
services.users.displayname(&user_id).ok(), services.users.displayname(&user_id).ok(),
services.users.avatar_url(&user_id).ok(), services.users.avatar_url(&user_id).ok(),
) )
.await; .await;
member.display_name = display_name;
member.avatar_url = avatar_url;
(user_id, RoomMember { display_name, avatar_url }) (user_id, member)
}) })
.collect() .collect()
.await, .await;
})
Ok(joined_members::v3::Response::new(joined))
} }
fn membership_filter<Pdu: Event>( fn membership_filter<Pdu: Event>(
pdu: Pdu, pdu: Pdu,
for_membership: Option<&MembershipEventFilter>, membership_state_filter: Option<&MembershipState>,
not_membership: Option<&MembershipEventFilter>, not_membership_state_filter: Option<&MembershipState>,
) -> Option<impl Event> { ) -> Option<impl Event> {
let membership_state_filter = match for_membership {
| Some(MembershipEventFilter::Ban) => MembershipState::Ban,
| Some(MembershipEventFilter::Invite) => MembershipState::Invite,
| Some(MembershipEventFilter::Knock) => MembershipState::Knock,
| Some(MembershipEventFilter::Leave) => MembershipState::Leave,
| Some(_) | None => MembershipState::Join,
};
let not_membership_state_filter = match not_membership {
| Some(MembershipEventFilter::Ban) => MembershipState::Ban,
| Some(MembershipEventFilter::Invite) => MembershipState::Invite,
| Some(MembershipEventFilter::Join) => MembershipState::Join,
| Some(MembershipEventFilter::Knock) => MembershipState::Knock,
| Some(_) | None => MembershipState::Leave,
};
let evt_membership = pdu.get_content::<RoomMemberEventContent>().ok()?.membership; let evt_membership = pdu.get_content::<RoomMemberEventContent>().ok()?.membership;
if for_membership.is_some() && not_membership.is_some() { if let Some(membership_state_filter) = membership_state_filter
if membership_state_filter != evt_membership && *membership_state_filter != evt_membership
|| not_membership_state_filter == evt_membership
{ {
None return None;
} else {
Some(pdu)
} }
} else if for_membership.is_some() && not_membership.is_none() {
if membership_state_filter != evt_membership { if let Some(not_membership_state_filter) = not_membership_state_filter
None && *not_membership_state_filter == evt_membership
} else { {
Some(pdu) return None;
} }
} else if not_membership.is_some() && for_membership.is_none() {
if not_membership_state_filter == evt_membership {
None
} else {
Some(pdu) Some(pdu)
}
} else {
Some(pdu)
}
} }
+4 -5
View File
@@ -47,15 +47,14 @@ pub(crate) async fn joined_rooms_route(
State(services): State<crate::State>, State(services): State<crate::State>,
body: Ruma<joined_rooms::v3::Request>, body: Ruma<joined_rooms::v3::Request>,
) -> Result<joined_rooms::v3::Response> { ) -> Result<joined_rooms::v3::Response> {
Ok(joined_rooms::v3::Response { let joined_rooms = services
joined_rooms: services
.rooms .rooms
.state_cache .state_cache
.rooms_joined(body.sender_user()) .rooms_joined(body.sender_user())
.map(ToOwned::to_owned)
.collect() .collect()
.await, .await;
})
Ok(joined_rooms::v3::Response::new(joined_rooms))
} }
/// Checks if the room is banned in any way possible and the sender user is not /// Checks if the room is banned in any way possible and the sender user is not
+10 -11
View File
@@ -1,5 +1,5 @@
use axum::extract::State; use axum::extract::State;
use conduwuit::{Err, Result, matrix::pdu::PduBuilder}; use conduwuit::{Err, Result, matrix::pdu::PartialPdu};
use ruma::{ use ruma::{
api::client::membership::unban_user, api::client::membership::unban_user,
events::room::member::{MembershipState, RoomMemberEventContent}, events::room::member::{MembershipState, RoomMemberEventContent},
@@ -18,9 +18,9 @@ pub(crate) async fn unban_user_route(
if services.users.is_suspended(sender_user).await? { if services.users.is_suspended(sender_user).await? {
return Err!(Request(UserSuspended("You cannot perform this action while suspended."))); return Err!(Request(UserSuspended("You cannot perform this action while suspended.")));
} }
let state_lock = services.rooms.state.mutex.lock(&body.room_id).await; let state_lock = services.rooms.state.mutex.lock(body.room_id.as_str()).await;
let current_member_content = services let mut current_member_content = services
.rooms .rooms
.state_accessor .state_accessor
.get_member(&body.room_id, &body.user_id) .get_member(&body.room_id, &body.user_id)
@@ -34,18 +34,17 @@ pub(crate) async fn unban_user_route(
))); )));
} }
current_member_content.membership = MembershipState::Leave;
current_member_content.reason.clone_from(&body.reason);
current_member_content.join_authorized_via_users_server = None;
current_member_content.third_party_invite = None;
current_member_content.is_direct = None;
services services
.rooms .rooms
.timeline .timeline
.build_and_append_pdu( .build_and_append_pdu(
PduBuilder::state(body.user_id.to_string(), &RoomMemberEventContent { PartialPdu::state(body.user_id.to_string(), &current_member_content),
membership: MembershipState::Leave,
reason: body.reason.clone(),
join_authorized_via_users_server: None,
third_party_invite: None,
is_direct: None,
..current_member_content
}),
sender_user, sender_user,
Some(&body.room_id), Some(&body.room_id),
&state_lock, &state_lock,
+15 -12
View File
@@ -1,5 +1,5 @@
use axum::extract::State; use axum::extract::State;
use axum_client_ip::InsecureClientIp; use axum_client_ip::ClientIp;
use conduwuit::{ use conduwuit::{
Err, Error, Result, at, debug_warn, Err, Error, Result, at, debug_warn,
matrix::{ matrix::{
@@ -26,15 +26,17 @@
DeviceId, RoomId, UserId, DeviceId, RoomId, UserId,
api::{ api::{
Direction, Direction,
client::{error::ErrorKind, filter::RoomEventFilter, message::get_message_events}, client::{filter::RoomEventFilter, message::get_message_events},
error::{ErrorKind, SenderIgnoredErrorData},
}, },
assign,
events::{ events::{
AnyStateEvent, StateEventType, AnyStateEvent, StateEventType,
TimelineEventType::{self, *}, TimelineEventType::{self, *},
invite_permission_config::FilterLevel,
}, },
serde::Raw, serde::Raw,
}; };
use ruminuwuity::invite_permission_config::FilterLevel;
use tracing::warn; use tracing::warn;
use crate::Ruma; use crate::Ruma;
@@ -71,10 +73,9 @@
/// where the user was joined, depending on `history_visibility`) /// where the user was joined, depending on `history_visibility`)
pub(crate) async fn get_message_events_route( pub(crate) async fn get_message_events_route(
State(services): State<crate::State>, State(services): State<crate::State>,
InsecureClientIp(client_ip): InsecureClientIp, ClientIp(client_ip): ClientIp,
body: Ruma<get_message_events::v3::Request>, body: Ruma<get_message_events::v3::Request>,
) -> Result<get_message_events::v3::Response> { ) -> Result<get_message_events::v3::Response> {
debug_assert!(IGNORED_MESSAGE_TYPES.is_sorted(), "IGNORED_MESSAGE_TYPES is not sorted");
let sender_user = body.sender_user(); let sender_user = body.sender_user();
let sender_device = body.sender_device.as_deref(); let sender_device = body.sender_device.as_deref();
let room_id = &body.room_id; let room_id = &body.room_id;
@@ -199,12 +200,12 @@ pub(crate) async fn get_message_events_route(
.map(Event::into_format) .map(Event::into_format)
.collect(); .collect();
Ok(get_message_events::v3::Response { Ok(assign!(get_message_events::v3::Response::new(), {
start: from.to_string(), start: from.to_string(),
end: next_token.as_ref().map(PduCount::to_string), end: next_token.as_ref().map(PduCount::to_string),
chunk, chunk: chunk,
state, state: state,
}) }))
} }
pub(crate) async fn lazy_loading_witness<'a, I>( pub(crate) async fn lazy_loading_witness<'a, I>(
@@ -301,7 +302,7 @@ pub(crate) async fn is_ignored_pdu<Pdu>(
{ {
// exclude Synapse's dummy events from bloating up response bodies. clients // exclude Synapse's dummy events from bloating up response bodies. clients
// don't need to see this. // don't need to see this.
if event.kind().to_cow_str() == "org.matrix.dummy_event" { if event.kind().to_string() == "org.matrix.dummy_event" {
return Ok(true); return Ok(true);
} }
@@ -323,7 +324,7 @@ pub(crate) async fn is_ignored_pdu<Pdu>(
if server_ignored { if server_ignored {
// the sender's server is ignored, so ignore this event // the sender's server is ignored, so ignore this event
return Err(Error::BadRequest( return Err(Error::BadRequest(
ErrorKind::SenderIgnored { sender: None }, ErrorKind::SenderIgnored(SenderIgnoredErrorData::new()),
"The sender's server is ignored by this server.", "The sender's server is ignored by this server.",
)); ));
} }
@@ -332,7 +333,9 @@ pub(crate) async fn is_ignored_pdu<Pdu>(
// the recipient of this PDU has the sender ignored, and we're not // the recipient of this PDU has the sender ignored, and we're not
// configured to send ignored messages to clients // configured to send ignored messages to clients
return Err(Error::BadRequest( return Err(Error::BadRequest(
ErrorKind::SenderIgnored { sender: Some(event.sender().to_owned()) }, ErrorKind::SenderIgnored(SenderIgnoredErrorData::with_sender(
event.sender().to_owned(),
)),
"You have ignored this sender.", "You have ignored this sender.",
)); ));
} }
+2 -3
View File
@@ -15,6 +15,7 @@
pub(super) mod media_legacy; pub(super) mod media_legacy;
pub(super) mod membership; pub(super) mod membership;
pub(super) mod message; pub(super) mod message;
pub(super) mod mutual_rooms;
pub(super) mod openid; pub(super) mod openid;
pub(super) mod presence; pub(super) mod presence;
pub(super) mod profile; pub(super) mod profile;
@@ -35,7 +36,6 @@
pub(super) mod threads; pub(super) mod threads;
pub(super) mod to_device; pub(super) mod to_device;
pub(super) mod typing; pub(super) mod typing;
pub(super) mod unstable;
pub(super) mod unversioned; pub(super) mod unversioned;
pub(super) mod user_directory; pub(super) mod user_directory;
pub(super) mod voip; pub(super) mod voip;
@@ -60,10 +60,10 @@
pub(super) use membership::*; pub(super) use membership::*;
pub use membership::{join_room_by_id_helper, leave_all_rooms, leave_room, remote_leave_room}; pub use membership::{join_room_by_id_helper, leave_all_rooms, leave_room, remote_leave_room};
pub(super) use message::*; pub(super) use message::*;
pub(super) use mutual_rooms::*;
pub(super) use openid::*; pub(super) use openid::*;
pub(super) use presence::*; pub(super) use presence::*;
pub(super) use profile::*; pub(super) use profile::*;
pub use profile::{update_all_rooms, update_avatar_url, update_displayname};
pub use push::recreate_push_rules_and_return; pub use push::recreate_push_rules_and_return;
pub(super) use push::*; pub(super) use push::*;
pub(super) use read_marker::*; pub(super) use read_marker::*;
@@ -82,7 +82,6 @@
pub(super) use threads::*; pub(super) use threads::*;
pub(super) use to_device::*; pub(super) use to_device::*;
pub(super) use typing::*; pub(super) use typing::*;
pub(super) use unstable::*;
pub(super) use unversioned::*; pub(super) use unversioned::*;
pub(super) use user_directory::*; pub(super) use user_directory::*;
pub(super) use voip::*; pub(super) use voip::*;
+36
View File
@@ -0,0 +1,36 @@
use axum::extract::State;
use conduwuit::{Err, Result};
use futures::StreamExt;
use ruma::api::client::membership::mutual_rooms;
use crate::Ruma;
/// # `GET /_matrix/client/unstable/uk.half-shot.msc2666/user/mutual_rooms`
///
/// Gets all the rooms the sender shares with the specified user.
///
/// An implementation of [MSC2666](https://github.com/matrix-org/matrix-spec-proposals/pull/2666)
#[tracing::instrument(skip_all, name = "mutual_rooms", level = "info")]
pub(crate) async fn get_mutual_rooms_route(
State(services): State<crate::State>,
body: Ruma<mutual_rooms::unstable::Request>,
) -> Result<mutual_rooms::unstable::Response> {
let sender_user = body.sender_user();
if sender_user == body.user_id {
return Err!(Request(Unknown("You cannot request rooms in common with yourself.")));
}
if !services.users.exists(&body.user_id).await {
return Ok(mutual_rooms::unstable::Response::new(vec![]));
}
let mutual_rooms = services
.rooms
.state_cache
.get_shared_rooms(sender_user, &body.user_id)
.collect()
.await;
Ok(mutual_rooms::unstable::Response::new(mutual_rooms))
}
+5 -5
View File
@@ -29,10 +29,10 @@ pub(crate) async fn create_openid_token_route(
.users .users
.create_openid_token(&body.user_id, &access_token)?; .create_openid_token(&body.user_id, &access_token)?;
Ok(account::request_openid_token::v3::Response { Ok(account::request_openid_token::v3::Response::new(
access_token, access_token,
token_type: TokenType::Bearer, TokenType::Bearer,
matrix_server_name: services.server.name.clone(), services.server.name.clone(),
expires_in: Duration::from_secs(expires_in), Duration::from_secs(expires_in),
}) ))
} }
+7 -6
View File
@@ -2,7 +2,10 @@
use axum::extract::State; use axum::extract::State;
use conduwuit::{Err, Result}; use conduwuit::{Err, Result};
use ruma::api::client::presence::{get_presence, set_presence}; use ruma::{
api::client::presence::{get_presence, set_presence},
assign,
};
use crate::Ruma; use crate::Ruma;
@@ -26,7 +29,7 @@ pub(crate) async fn set_presence_route(
.set_presence(body.sender_user(), &body.presence, None, None, body.status_msg.clone()) .set_presence(body.sender_user(), &body.presence, None, None, body.status_msg.clone())
.await?; .await?;
Ok(set_presence::v3::Response {}) Ok(set_presence::v3::Response::new())
} }
/// # `GET /_matrix/client/r0/presence/{userId}/status` /// # `GET /_matrix/client/r0/presence/{userId}/status`
@@ -76,13 +79,11 @@ pub(crate) async fn get_presence_route(
.map(|millis| Duration::from_millis(millis.into())), .map(|millis| Duration::from_millis(millis.into())),
}; };
Ok(get_presence::v3::Response { Ok(assign!(get_presence::v3::Response::new(presence.content.presence), {
// TODO: Should ruma just use the presenceeventcontent type here?
status_msg, status_msg,
currently_active: presence.content.currently_active, currently_active: presence.content.currently_active,
last_active_ago, last_active_ago,
presence: presence.content.presence, }))
})
}, },
| _ => Err!(Request(NotFound("Presence state for this user was not found"))), | _ => Err!(Request(NotFound("Presence state for this user was not found"))),
} }
+330 -374
View File
@@ -1,229 +1,26 @@
use std::collections::BTreeMap;
use axum::extract::State; use axum::extract::State;
use conduwuit::{ use conduwuit::{Err, Result, matrix::pdu::PartialPdu, utils::to_canonical_object};
Err, Result,
matrix::pdu::PduBuilder,
utils::{IterStream, future::TryExtExt, stream::TryIgnore},
warn,
};
use conduwuit_service::Services; use conduwuit_service::Services;
use futures::{ use futures::StreamExt;
FutureExt, StreamExt, TryStreamExt,
future::{join, join3, join4},
};
use ruma::{ use ruma::{
OwnedMxcUri, OwnedRoomId, UserId, UserId,
api::{ api::{
client::profile::{ client::profile::{
get_avatar_url, get_display_name, get_profile, set_avatar_url, set_display_name, delete_profile_field, get_profile, get_profile_field, set_profile_field,
}, },
federation, federation,
}, },
assign,
events::room::member::{MembershipState, RoomMemberEventContent}, events::room::member::{MembershipState, RoomMemberEventContent},
presence::PresenceState, presence::PresenceState,
profile::{ProfileFieldName, ProfileFieldValue},
}; };
use serde_json::{Value, to_value};
use crate::Ruma; use crate::Ruma;
/// # `PUT /_matrix/client/r0/profile/{userId}/displayname`
///
/// Updates the displayname.
///
/// - Also makes sure other users receive the update using presence EDUs
pub(crate) async fn set_displayname_route(
State(services): State<crate::State>,
body: Ruma<set_display_name::v3::Request>,
) -> Result<set_display_name::v3::Response> {
let sender_user = body.sender_user();
if services.users.is_suspended(sender_user).await? {
return Err!(Request(UserSuspended("You cannot perform this action while suspended.")));
}
if *sender_user != body.user_id && body.appservice_info.is_none() {
return Err!(Request(Forbidden("You cannot update the profile of another user")));
}
let all_joined_rooms: Vec<OwnedRoomId> = services
.rooms
.state_cache
.rooms_joined(&body.user_id)
.map(ToOwned::to_owned)
.collect()
.await;
update_displayname(&services, &body.user_id, body.displayname.clone(), &all_joined_rooms)
.boxed()
.await;
if services.config.allow_local_presence {
// Presence update
services
.presence
.ping_presence(&body.user_id, &PresenceState::Online)
.await?;
}
Ok(set_display_name::v3::Response {})
}
/// # `GET /_matrix/client/v3/profile/{userId}/displayname`
///
/// Returns the displayname of the user.
///
/// - If user is on another server and we do not have a local copy already fetch
/// displayname over federation
pub(crate) async fn get_displayname_route(
State(services): State<crate::State>,
body: Ruma<get_display_name::v3::Request>,
) -> Result<get_display_name::v3::Response> {
if !services.globals.user_is_local(&body.user_id) {
// Create and update our local copy of the user
if let Ok(response) = services
.sending
.send_federation_request(
body.user_id.server_name(),
federation::query::get_profile_information::v1::Request {
user_id: body.user_id.clone(),
field: None, // we want the full user's profile to update locally too
},
)
.await
{
if !services.users.exists(&body.user_id).await {
services.users.create(&body.user_id, None, None).await?;
}
services
.users
.set_displayname(&body.user_id, response.displayname.clone());
services
.users
.set_avatar_url(&body.user_id, response.avatar_url.clone());
services
.users
.set_blurhash(&body.user_id, response.blurhash.clone());
return Ok(get_display_name::v3::Response { displayname: response.displayname });
}
}
if !services.users.exists(&body.user_id).await {
// Return 404 if this user doesn't exist and we couldn't fetch it over
// federation
return Err!(Request(NotFound("Profile was not found.")));
}
Ok(get_display_name::v3::Response {
displayname: services.users.displayname(&body.user_id).await.ok(),
})
}
/// # `PUT /_matrix/client/v3/profile/{userId}/avatar_url`
///
/// Updates the `avatar_url` and `blurhash`.
///
/// - Also makes sure other users receive the update using presence EDUs
pub(crate) async fn set_avatar_url_route(
State(services): State<crate::State>,
body: Ruma<set_avatar_url::v3::Request>,
) -> Result<set_avatar_url::v3::Response> {
let sender_user = body.sender_user();
if services.users.is_suspended(sender_user).await? {
return Err!(Request(UserSuspended("You cannot perform this action while suspended.")));
}
if *sender_user != body.user_id && body.appservice_info.is_none() {
return Err!(Request(Forbidden("You cannot update the profile of another user")));
}
let all_joined_rooms: Vec<OwnedRoomId> = services
.rooms
.state_cache
.rooms_joined(&body.user_id)
.map(ToOwned::to_owned)
.collect()
.await;
update_avatar_url(
&services,
&body.user_id,
body.avatar_url.clone(),
body.blurhash.clone(),
&all_joined_rooms,
)
.boxed()
.await;
if services.config.allow_local_presence {
// Presence update
services
.presence
.ping_presence(&body.user_id, &PresenceState::Online)
.await
.ok();
}
Ok(set_avatar_url::v3::Response {})
}
/// # `GET /_matrix/client/v3/profile/{userId}/avatar_url`
///
/// Returns the `avatar_url` and `blurhash` of the user.
///
/// - If user is on another server and we do not have a local copy already fetch
/// `avatar_url` and blurhash over federation
pub(crate) async fn get_avatar_url_route(
State(services): State<crate::State>,
body: Ruma<get_avatar_url::v3::Request>,
) -> Result<get_avatar_url::v3::Response> {
if !services.globals.user_is_local(&body.user_id) {
// Create and update our local copy of the user
if let Ok(response) = services
.sending
.send_federation_request(
body.user_id.server_name(),
federation::query::get_profile_information::v1::Request {
user_id: body.user_id.clone(),
field: None, // we want the full user's profile to update locally as well
},
)
.await
{
if !services.users.exists(&body.user_id).await {
services.users.create(&body.user_id, None, None).await?;
}
services
.users
.set_displayname(&body.user_id, response.displayname.clone());
services
.users
.set_avatar_url(&body.user_id, response.avatar_url.clone());
services
.users
.set_blurhash(&body.user_id, response.blurhash.clone());
return Ok(get_avatar_url::v3::Response {
avatar_url: response.avatar_url,
blurhash: response.blurhash,
});
}
}
if !services.users.exists(&body.user_id).await {
// Return 404 if this user doesn't exist and we couldn't fetch it over
// federation
return Err!(Request(NotFound("Profile was not found.")));
}
let (avatar_url, blurhash) = join(
services.users.avatar_url(&body.user_id).ok(),
services.users.blurhash(&body.user_id).ok(),
)
.await;
Ok(get_avatar_url::v3::Response { avatar_url, blurhash })
}
/// # `GET /_matrix/client/v3/profile/{userId}` /// # `GET /_matrix/client/v3/profile/{userId}`
/// ///
/// Returns the displayname, avatar_url, blurhash, and custom profile fields of /// Returns the displayname, avatar_url, blurhash, and custom profile fields of
@@ -235,188 +32,347 @@ pub(crate) async fn get_profile_route(
State(services): State<crate::State>, State(services): State<crate::State>,
body: Ruma<get_profile::v3::Request>, body: Ruma<get_profile::v3::Request>,
) -> Result<get_profile::v3::Response> { ) -> Result<get_profile::v3::Response> {
let Some(profile) = fetch_full_profile(&services, &body.user_id).await else {
return Err!(Request(NotFound("This user's profile could not be fetched.")));
};
Ok(get_profile::v3::Response::from_iter(profile))
}
pub(crate) async fn get_profile_field_route(
State(services): State<crate::State>,
body: Ruma<get_profile_field::v3::Request>,
) -> Result<get_profile_field::v3::Response> {
let value = fetch_profile_field(&services, &body.user_id, body.field.clone()).await?;
Ok(assign!(get_profile_field::v3::Response::default(), { value }))
}
pub(crate) async fn set_profile_field_route(
State(services): State<crate::State>,
body: Ruma<set_profile_field::v3::Request>,
) -> Result<set_profile_field::v3::Response> {
if body.user_id != body.sender_user()
&& !(body.appservice_info.is_some()
|| services.admin.user_is_admin(body.sender_user()).await)
{
return Err!(Request(Forbidden("You may not change other users' profile data.")));
}
if !services.globals.user_is_local(&body.user_id) { if !services.globals.user_is_local(&body.user_id) {
// Create and update our local copy of the user return Err!(Request(InvalidParam("You may not change a remote user's profile data.")));
if let Ok(response) = services }
set_profile_field(&services, &body.user_id, ProfileFieldChange::Set(body.value.clone()))
.await?;
Ok(set_profile_field::v3::Response::new())
}
pub(crate) async fn delete_profile_field_route(
State(services): State<crate::State>,
body: Ruma<delete_profile_field::v3::Request>,
) -> Result<delete_profile_field::v3::Response> {
if body.user_id != body.sender_user()
&& !(body.appservice_info.is_some()
|| services.admin.user_is_admin(body.sender_user()).await)
{
return Err!(Request(Forbidden("You may not change other users' profile data.")));
}
if !services.globals.user_is_local(&body.user_id) {
return Err!(Request(InvalidParam("You may not change a remote user's profile data.")));
}
set_profile_field(&services, &body.user_id, ProfileFieldChange::Delete(body.field.clone()))
.await?;
Ok(delete_profile_field::v3::Response::new())
}
async fn fetch_full_profile(
services: &Services,
user_id: &UserId,
) -> Option<BTreeMap<String, Value>> {
// If the user exists locally, fetch their local profile
if services.users.exists(user_id).await {
return Some(get_local_profile(services, user_id).await);
}
// Otherwise ask their homeserver
let Ok(response) = services
.sending .sending
.send_federation_request( .send_federation_request(
body.user_id.server_name(), user_id.server_name(),
federation::query::get_profile_information::v1::Request { federation::query::get_profile_information::v1::Request::new(user_id.to_owned()),
user_id: body.user_id.clone(),
field: None,
},
) )
.await .await
{ else {
if !services.users.exists(&body.user_id).await { return None;
services.users.create(&body.user_id, None, None).await?; };
// Update our local copies of their profile fields
services.users.clear_profile(user_id).await;
for (field, value) in response.iter() {
let Ok(value) = ProfileFieldValue::new(field, value.to_owned()) else {
// Skip malformed fields
continue;
};
let _ = set_profile_field(services, user_id, ProfileFieldChange::Set(value)).await;
} }
services Some(BTreeMap::from_iter(response))
.users }
.set_displayname(&body.user_id, response.displayname.clone());
services
.users
.set_avatar_url(&body.user_id, response.avatar_url.clone());
services
.users
.set_blurhash(&body.user_id, response.blurhash.clone());
for (profile_key, profile_key_value) in &response.custom_profile_fields { async fn fetch_profile_field(
services.users.set_profile_key( services: &Services,
&body.user_id, user_id: &UserId,
profile_key, field: ProfileFieldName,
Some(profile_key_value.clone()), ) -> Result<Option<ProfileFieldValue>> {
// If the user exists locally, fetch their local profile field
if services.globals.user_is_local(user_id) {
return Ok(get_local_profile_field(services, user_id, field).await);
}
// Otherwise ask their homeserver
let Ok(response) = services
.sending
.send_federation_request(
user_id.server_name(),
assign!(federation::query::get_profile_information::v1::Request::new(user_id.to_owned()), {
field: Some(field.clone())
}),
)
.await
else {
return Err!(Request(NotFound(
"User's homeserver could not provide this profile field."
)));
};
if let Some(value) = response.get(field.as_str()).map(ToOwned::to_owned) {
if let Ok(value) = ProfileFieldValue::new(field.as_str(), value) {
let _ = set_profile_field(services, user_id, ProfileFieldChange::Set(value.clone()))
.await;
Ok(Some(value))
} else {
Err!(Request(Unknown(
"User's homeserver returned malformed data for this profile field."
)))
}
} else {
let _ = set_profile_field(services, user_id, ProfileFieldChange::Delete(field)).await;
Ok(None)
}
}
pub(crate) async fn get_local_profile(
services: &Services,
user_id: &UserId,
) -> BTreeMap<String, Value> {
let mut profile = BTreeMap::new();
// Get displayname and avatar_url independently because `all_profile_keys`
// doesn't include them
for field in [ProfileFieldName::AvatarUrl, ProfileFieldName::DisplayName] {
let key = field.as_str().to_owned();
if let Some(value) = get_local_profile_field(services, user_id, field).await {
profile.insert(key, value.value().into_owned());
}
}
// Insert all other profile fields
let mut all_fields = services.users.all_profile_keys(user_id);
while let Some((key, value)) = all_fields.next().await {
profile.insert(key, value);
}
profile
}
pub(crate) async fn get_local_profile_field(
services: &Services,
user_id: &UserId,
field: ProfileFieldName,
) -> Option<ProfileFieldValue> {
let value = match field.clone() {
| ProfileFieldName::AvatarUrl => services
.users
.avatar_url(user_id)
.await
.ok()
.map(to_value)
.transpose()
.expect("converting avatar url to value should succeed"),
| ProfileFieldName::DisplayName => services
.users
.displayname(user_id)
.await
.ok()
.map(to_value)
.transpose()
.expect("converting displayname to value should succeed"),
| other => services
.users
.profile_key(user_id, other.as_str())
.await
.ok(),
}?;
Some(
ProfileFieldValue::new(field.as_str(), value)
.expect("local profile field should be valid"),
)
}
enum ProfileFieldChange {
Set(ProfileFieldValue),
Delete(ProfileFieldName),
}
impl ProfileFieldChange {
fn field_name(&self) -> ProfileFieldName {
match self {
| &Self::Delete(ref name) => name.clone(),
| &Self::Set(ref value) => value.field_name(),
}
}
fn value(&self) -> Option<Value> {
if let Self::Set(value) = self {
Some(value.value().into_owned())
} else {
None
}
}
}
async fn set_profile_field(
services: &Services,
user_id: &UserId,
change: ProfileFieldChange,
) -> Result<()> {
const MAX_KEY_LENGTH_BYTES: usize = 255;
const MAX_PROFILE_LENGTH_BYTES: usize = 65536;
let field_name = change.field_name();
// TODO: The spec mentions special error codes (M_PROFILE_TOO_LARGE,
// M_KEY_TOO_LARGE) for profile field size limits, but they're not in its list
// of error codes and Ruma doesn't have them. Should we return those, or is
// M_TOO_LARGE okay?
if field_name.as_str().len() > MAX_KEY_LENGTH_BYTES {
return Err!(Request(TooLarge(
"Individual profile keys must not exceed {MAX_KEY_LENGTH_BYTES} bytes in length."
)));
}
// Serialize the entire profile as canonical JSON, including the new change,
// to check if it exceeds 64 KiB
{
let mut full_profile = get_local_profile(services, user_id).await;
match &change {
| ProfileFieldChange::Set(value) => {
full_profile.insert(
value.field_name().as_str().to_owned(),
value.value().clone().into_owned(),
);
},
| ProfileFieldChange::Delete(key) => {
full_profile.remove(key.as_str());
},
}
if let Ok(canonical_profile) = to_canonical_object(full_profile) {
if serde_json::to_string(&canonical_profile)
.expect("should be able to serialize to string")
.len() > MAX_PROFILE_LENGTH_BYTES
{
return Err!(
"Profile data must not exceed {MAX_PROFILE_LENGTH_BYTES} bytes in length."
); );
} }
} else {
return Ok(get_profile::v3::Response { return Err!(Request(BadJson("Failed to canonicalize profile.")));
displayname: response.displayname,
avatar_url: response.avatar_url,
blurhash: response.blurhash,
custom_profile_fields: response.custom_profile_fields,
});
} }
} }
if !services.users.exists(&body.user_id).await { match change {
// Return 404 if this user doesn't exist and we couldn't fetch it over | ProfileFieldChange::Set(ProfileFieldValue::DisplayName(displayname)) => {
// federation services
return Err!(Request(NotFound("Profile was not found."))); .users
.set_displayname(user_id, Some(displayname).filter(|dn| !dn.is_empty()));
},
| ProfileFieldChange::Set(ProfileFieldValue::AvatarUrl(avatar_url)) => {
services
.users
.set_avatar_url(user_id, Some(avatar_url).filter(|av| av.is_valid()));
},
| ProfileFieldChange::Delete(ProfileFieldName::DisplayName) => {
services.users.set_displayname(user_id, None);
},
| ProfileFieldChange::Delete(ProfileFieldName::AvatarUrl) => {
services.users.set_avatar_url(user_id, None);
},
| other =>
if other.field_name().as_str() == "blurhash" {
if let Some(Value::String(blurhash)) = other.value() {
services.users.set_blurhash(user_id, Some(blurhash));
} else {
services.users.set_blurhash(user_id, None);
}
} else {
services.users.set_profile_key(
user_id,
other.field_name().as_str(),
other.value(),
);
},
} }
let (avatar_url, blurhash, displayname, custom_profile_fields) = join4( // If the user is local and changed their displayname or avatar_url, update it
services.users.avatar_url(&body.user_id).ok(), // in all their joined rooms
services.users.blurhash(&body.user_id).ok(), if matches!(field_name, ProfileFieldName::AvatarUrl | ProfileFieldName::DisplayName)
services.users.displayname(&body.user_id).ok(), && services.users.is_active_local(user_id).await
services.users.all_profile_keys(&body.user_id).collect(), {
) let displayname = services.users.displayname(user_id).await.ok();
.await; let avatar_url = services.users.avatar_url(user_id).await.ok();
let membership_content = assign!(
RoomMemberEventContent::new(MembershipState::Join), { displayname, avatar_url }
);
Ok(get_profile::v3::Response { let mut all_joined_rooms = services.rooms.state_cache.rooms_joined(user_id);
avatar_url,
blurhash,
displayname,
custom_profile_fields,
})
}
pub async fn update_displayname( while let Some(room_id) = all_joined_rooms.next().await {
services: &Services, let state_lock = services.rooms.state.mutex.lock(room_id.as_str()).await;
user_id: &UserId,
displayname: Option<String>,
all_joined_rooms: &[OwnedRoomId],
) {
let (current_avatar_url, current_blurhash, current_displayname) = join3(
services.users.avatar_url(user_id).ok(),
services.users.blurhash(user_id).ok(),
services.users.displayname(user_id).ok(),
)
.await;
if displayname == current_displayname { let _ = services
return;
}
services.users.set_displayname(user_id, displayname.clone());
// Send a new join membership event into all joined rooms
let avatar_url = &current_avatar_url;
let blurhash = &current_blurhash;
let displayname = &displayname;
let all_joined_rooms: Vec<_> = all_joined_rooms
.iter()
.try_stream()
.and_then(|room_id: &OwnedRoomId| async move {
let pdu = PduBuilder::state(user_id.to_string(), &RoomMemberEventContent {
displayname: displayname.clone(),
membership: MembershipState::Join,
avatar_url: avatar_url.clone(),
blurhash: blurhash.clone(),
join_authorized_via_users_server: None,
reason: None,
is_direct: None,
third_party_invite: None,
redact_events: None,
});
Ok((pdu, room_id))
})
.ignore_err()
.collect()
.await;
update_all_rooms(services, all_joined_rooms, user_id)
.boxed()
.await;
}
pub async fn update_avatar_url(
services: &Services,
user_id: &UserId,
avatar_url: Option<OwnedMxcUri>,
blurhash: Option<String>,
all_joined_rooms: &[OwnedRoomId],
) {
let (current_avatar_url, current_blurhash, current_displayname) = join3(
services.users.avatar_url(user_id).ok(),
services.users.blurhash(user_id).ok(),
services.users.displayname(user_id).ok(),
)
.await;
if current_avatar_url == avatar_url && current_blurhash == blurhash {
return;
}
services.users.set_avatar_url(user_id, avatar_url.clone());
services.users.set_blurhash(user_id, blurhash.clone());
// Send a new join membership event into all joined rooms
let avatar_url = &avatar_url;
let blurhash = &blurhash;
let displayname = &current_displayname;
let all_joined_rooms: Vec<_> = all_joined_rooms
.iter()
.try_stream()
.and_then(|room_id: &OwnedRoomId| async move {
let pdu = PduBuilder::state(user_id.to_string(), &RoomMemberEventContent {
avatar_url: avatar_url.clone(),
blurhash: blurhash.clone(),
membership: MembershipState::Join,
displayname: displayname.clone(),
join_authorized_via_users_server: None,
reason: None,
is_direct: None,
third_party_invite: None,
redact_events: None,
});
Ok((pdu, room_id))
})
.ignore_err()
.collect()
.await;
update_all_rooms(services, all_joined_rooms, user_id)
.boxed()
.await;
}
pub async fn update_all_rooms(
services: &Services,
all_joined_rooms: Vec<(PduBuilder, &OwnedRoomId)>,
user_id: &UserId,
) {
for (pdu_builder, room_id) in all_joined_rooms {
let state_lock = services.rooms.state.mutex.lock(room_id).await;
if let Err(e) = services
.rooms .rooms
.timeline .timeline
.build_and_append_pdu(pdu_builder, user_id, Some(room_id), &state_lock) .build_and_append_pdu(
.await PartialPdu::state(user_id.to_string(), &membership_content),
{ user_id,
warn!(%user_id, %room_id, "Failed to update/send new profile join membership update in room: {e}"); Some(&room_id),
&state_lock,
)
.await;
}
if services.config.allow_local_presence {
// Send a presence EDU to indicate the profile changed
let _ = services
.presence
.ping_presence(user_id, &PresenceState::Online)
.await;
} }
} }
Ok(())
} }
+36 -42
View File
@@ -3,13 +3,13 @@
use conduwuit_service::Services; use conduwuit_service::Services;
use ruma::{ use ruma::{
CanonicalJsonObject, CanonicalJsonValue, CanonicalJsonObject, CanonicalJsonValue,
api::client::{ api::{
error::ErrorKind, client::push::{
push::{
delete_pushrule, get_pushers, get_pushrule, get_pushrule_actions, delete_pushrule, get_pushers, get_pushrule, get_pushrule_actions,
get_pushrule_enabled, get_pushrules_all, get_pushrules_global_scope, set_pusher, get_pushrule_enabled, get_pushrules_all, get_pushrules_global_scope, set_pusher,
set_pushrule, set_pushrule_actions, set_pushrule_enabled, set_pushrule, set_pushrule_actions, set_pushrule_enabled,
}, },
error::ErrorKind,
}, },
events::{ events::{
GlobalAccountDataEventType, GlobalAccountDataEventType,
@@ -80,9 +80,7 @@ pub(crate) async fn get_pushrules_all_route(
global_ruleset.update_with_server_default(Ruleset::server_default(sender_user)); global_ruleset.update_with_server_default(Ruleset::server_default(sender_user));
let ty = GlobalAccountDataEventType::PushRules; let ty = GlobalAccountDataEventType::PushRules;
let event = PushRulesEvent { let event = PushRulesEvent::new(PushRulesEventContent::new(global_ruleset.clone()));
content: PushRulesEventContent { global: global_ruleset.clone() },
};
services services
.account_data .account_data
@@ -91,7 +89,7 @@ pub(crate) async fn get_pushrules_all_route(
} }
}; };
Ok(get_pushrules_all::v3::Response { global: global_ruleset }) Ok(get_pushrules_all::v3::Response::new(global_ruleset))
} }
/// # `GET /_matrix/client/r0/pushrules/global/` /// # `GET /_matrix/client/r0/pushrules/global/`
@@ -116,21 +114,20 @@ pub(crate) async fn get_pushrules_global_route(
// user somehow has non-existent push rule event. recreate it and return server // user somehow has non-existent push rule event. recreate it and return server
// default silently // default silently
let ty = GlobalAccountDataEventType::PushRules; let global_ruleset = Ruleset::server_default(sender_user);
let event = PushRulesEvent { let event = PushRulesEvent::new(PushRulesEventContent::new(global_ruleset.clone()));
content: PushRulesEventContent {
global: Ruleset::server_default(sender_user),
},
};
services services
.account_data .account_data
.update(None, sender_user, ty.to_string().into(), &serde_json::to_value(event)?) .update(
None,
sender_user,
GlobalAccountDataEventType::PushRules.to_string().into(),
&serde_json::to_value(event)?,
)
.await?; .await?;
return Ok(get_pushrules_global_scope::v3::Response { return Ok(get_pushrules_global_scope::v3::Response::new(global_ruleset));
global: Ruleset::server_default(sender_user),
});
}; };
let account_data_content = let account_data_content =
@@ -173,16 +170,16 @@ pub(crate) async fn get_pushrules_global_route(
None, None,
sender_user, sender_user,
GlobalAccountDataEventType::PushRules.to_string().into(), GlobalAccountDataEventType::PushRules.to_string().into(),
&serde_json::to_value(PushRulesEvent { &serde_json::to_value(PushRulesEvent::new(PushRulesEventContent::new(
content: PushRulesEventContent { global: global_ruleset.clone() }, global_ruleset.clone(),
}) )))
.expect("to json always works"), .expect("to json always works"),
) )
.await?; .await?;
} }
}; };
Ok(get_pushrules_global_scope::v3::Response { global: global_ruleset }) Ok(get_pushrules_global_scope::v3::Response::new(global_ruleset))
} }
/// # `GET /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}` /// # `GET /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}`
@@ -216,7 +213,7 @@ pub(crate) async fn get_pushrule_route(
.map(Into::into); .map(Into::into);
if let Some(rule) = rule { if let Some(rule) = rule {
Ok(get_pushrule::v3::Response { rule }) Ok(get_pushrule::v3::Response::new(rule))
} else { } else {
Err!(Request(NotFound("Push rule not found."))) Err!(Request(NotFound("Push rule not found.")))
} }
@@ -275,7 +272,7 @@ pub(crate) async fn set_pushrule_route(
.update(None, sender_user, ty.to_string().into(), &serde_json::to_value(account_data)?) .update(None, sender_user, ty.to_string().into(), &serde_json::to_value(account_data)?)
.await?; .await?;
Ok(set_pushrule::v3::Response {}) Ok(set_pushrule::v3::Response::new())
} }
/// # `GET /_matrix/client/r0/pushrules/global/{kind}/{ruleId}/actions` /// # `GET /_matrix/client/r0/pushrules/global/{kind}/{ruleId}/actions`
@@ -309,7 +306,7 @@ pub(crate) async fn get_pushrule_actions_route(
.map(|rule| rule.actions().to_owned()) .map(|rule| rule.actions().to_owned())
.ok_or_else(|| err!(Request(NotFound("Push rule not found."))))?; .ok_or_else(|| err!(Request(NotFound("Push rule not found."))))?;
Ok(get_pushrule_actions::v3::Response { actions }) Ok(get_pushrule_actions::v3::Response::new(actions))
} }
/// # `PUT /_matrix/client/r0/pushrules/global/{kind}/{ruleId}/actions` /// # `PUT /_matrix/client/r0/pushrules/global/{kind}/{ruleId}/actions`
@@ -342,7 +339,7 @@ pub(crate) async fn set_pushrule_actions_route(
.update(None, sender_user, ty.to_string().into(), &serde_json::to_value(account_data)?) .update(None, sender_user, ty.to_string().into(), &serde_json::to_value(account_data)?)
.await?; .await?;
Ok(set_pushrule_actions::v3::Response {}) Ok(set_pushrule_actions::v3::Response::new())
} }
/// # `GET /_matrix/client/r0/pushrules/global/{kind}/{ruleId}/enabled` /// # `GET /_matrix/client/r0/pushrules/global/{kind}/{ruleId}/enabled`
@@ -360,7 +357,7 @@ pub(crate) async fn get_pushrule_enabled_route(
|| body.rule_id.as_str() == PredefinedOverrideRuleId::ContainsDisplayName.as_str() || body.rule_id.as_str() == PredefinedOverrideRuleId::ContainsDisplayName.as_str()
|| body.rule_id.as_str() == PredefinedOverrideRuleId::RoomNotif.as_str() || body.rule_id.as_str() == PredefinedOverrideRuleId::RoomNotif.as_str()
{ {
return Ok(get_pushrule_enabled::v3::Response { enabled: false }); return Ok(get_pushrule_enabled::v3::Response::new(false));
} }
let event: PushRulesEvent = services let event: PushRulesEvent = services
@@ -376,7 +373,7 @@ pub(crate) async fn get_pushrule_enabled_route(
.map(ruma::push::AnyPushRuleRef::enabled) .map(ruma::push::AnyPushRuleRef::enabled)
.ok_or_else(|| err!(Request(NotFound("Push rule not found."))))?; .ok_or_else(|| err!(Request(NotFound("Push rule not found."))))?;
Ok(get_pushrule_enabled::v3::Response { enabled }) Ok(get_pushrule_enabled::v3::Response::new(enabled))
} }
/// # `PUT /_matrix/client/r0/pushrules/global/{kind}/{ruleId}/enabled` /// # `PUT /_matrix/client/r0/pushrules/global/{kind}/{ruleId}/enabled`
@@ -409,7 +406,7 @@ pub(crate) async fn set_pushrule_enabled_route(
.update(None, sender_user, ty.to_string().into(), &serde_json::to_value(account_data)?) .update(None, sender_user, ty.to_string().into(), &serde_json::to_value(account_data)?)
.await?; .await?;
Ok(set_pushrule_enabled::v3::Response {}) Ok(set_pushrule_enabled::v3::Response::new())
} }
/// # `DELETE /_matrix/client/r0/pushrules/global/{kind}/{ruleId}` /// # `DELETE /_matrix/client/r0/pushrules/global/{kind}/{ruleId}`
@@ -451,7 +448,7 @@ pub(crate) async fn delete_pushrule_route(
.update(None, sender_user, ty.to_string().into(), &serde_json::to_value(account_data)?) .update(None, sender_user, ty.to_string().into(), &serde_json::to_value(account_data)?)
.await?; .await?;
Ok(delete_pushrule::v3::Response {}) Ok(delete_pushrule::v3::Response::new())
} }
/// # `GET /_matrix/client/r0/pushers` /// # `GET /_matrix/client/r0/pushers`
@@ -463,9 +460,7 @@ pub(crate) async fn get_pushers_route(
) -> Result<get_pushers::v3::Response> { ) -> Result<get_pushers::v3::Response> {
let sender_user = body.sender_user(); let sender_user = body.sender_user();
Ok(get_pushers::v3::Response { Ok(get_pushers::v3::Response::new(services.pusher.get_pushers(sender_user).await))
pushers: services.pusher.get_pushers(sender_user).await,
})
} }
/// # `POST /_matrix/client/r0/pushers/set` /// # `POST /_matrix/client/r0/pushers/set`
@@ -493,19 +488,18 @@ pub async fn recreate_push_rules_and_return(
services: &Services, services: &Services,
sender_user: &ruma::UserId, sender_user: &ruma::UserId,
) -> Result<get_pushrules_all::v3::Response> { ) -> Result<get_pushrules_all::v3::Response> {
let ty = GlobalAccountDataEventType::PushRules; let global_ruleset = Ruleset::server_default(sender_user);
let event = PushRulesEvent { let event = PushRulesEvent::new(PushRulesEventContent::new(global_ruleset.clone()));
content: PushRulesEventContent {
global: Ruleset::server_default(sender_user),
},
};
services services
.account_data .account_data
.update(None, sender_user, ty.to_string().into(), &serde_json::to_value(event)?) .update(
None,
sender_user,
GlobalAccountDataEventType::PushRules.to_string().into(),
&serde_json::to_value(event)?,
)
.await?; .await?;
Ok(get_pushrules_all::v3::Response { Ok(get_pushrules_all::v3::Response::new(global_ruleset))
global: Ruleset::server_default(sender_user),
})
} }
+23 -33
View File
@@ -1,14 +1,15 @@
use std::collections::BTreeMap; use std::collections::BTreeMap;
use axum::extract::State; use axum::extract::State;
use axum_client_ip::InsecureClientIp; use axum_client_ip::ClientIp;
use conduwuit::{Err, PduCount, Result, err}; use conduwuit::{Err, PduCount, Result, err};
use ruma::{ use ruma::{
MilliSecondsSinceUnixEpoch, MilliSecondsSinceUnixEpoch,
api::client::{read_marker::set_read_marker, receipt::create_receipt}, api::client::{read_marker::set_read_marker, receipt::create_receipt},
events::{ events::{
RoomAccountDataEventType, RoomAccountDataEventType,
receipt::{ReceiptThread, ReceiptType}, fully_read::{FullyReadEvent, FullyReadEventContent},
receipt::{Receipt, ReceiptEvent, ReceiptEventContent, ReceiptType},
}, },
}; };
@@ -28,9 +29,7 @@ pub(crate) async fn set_read_marker_route(
let sender_user = body.sender_user(); let sender_user = body.sender_user();
if let Some(event) = &body.fully_read { if let Some(event) = &body.fully_read {
let fully_read_event = ruma::events::fully_read::FullyReadEvent { let fully_read_event = FullyReadEvent::new(FullyReadEventContent::new(event.to_owned()));
content: ruma::events::fully_read::FullyReadEventContent { event_id: event.clone() },
};
services services
.account_data .account_data
@@ -62,19 +61,16 @@ pub(crate) async fn set_read_marker_route(
if services.config.allow_local_read_receipts if services.config.allow_local_read_receipts
&& !services.users.is_suspended(sender_user).await? && !services.users.is_suspended(sender_user).await?
{ {
let receipt_content = BTreeMap::from_iter([( let receipt_content = [(
event.to_owned(), event.to_owned(),
BTreeMap::from_iter([( BTreeMap::from_iter([(
ReceiptType::Read, ReceiptType::Read,
BTreeMap::from_iter([( BTreeMap::from_iter([(
sender_user.to_owned(), sender_user.to_owned(),
ruma::events::receipt::Receipt { Receipt::new(MilliSecondsSinceUnixEpoch::now()),
ts: Some(MilliSecondsSinceUnixEpoch::now()),
thread: ReceiptThread::Unthreaded,
},
)]), )]),
)]), )]),
)]); )];
services services
.rooms .rooms
@@ -82,10 +78,10 @@ pub(crate) async fn set_read_marker_route(
.readreceipt_update( .readreceipt_update(
sender_user, sender_user,
&body.room_id, &body.room_id,
&ruma::events::receipt::ReceiptEvent { &ReceiptEvent::new(
content: ruma::events::receipt::ReceiptEventContent(receipt_content), body.room_id.clone(),
room_id: body.room_id.clone(), ReceiptEventContent::from_iter(receipt_content),
}, ),
) )
.await; .await;
} }
@@ -111,7 +107,7 @@ pub(crate) async fn set_read_marker_route(
.private_read_set(&body.room_id, sender_user, count); .private_read_set(&body.room_id, sender_user, count);
} }
Ok(set_read_marker::v3::Response {}) Ok(set_read_marker::v3::Response::new())
} }
/// # `POST /_matrix/client/r0/rooms/{roomId}/receipt/{receiptType}/{eventId}` /// # `POST /_matrix/client/r0/rooms/{roomId}/receipt/{receiptType}/{eventId}`
@@ -119,7 +115,7 @@ pub(crate) async fn set_read_marker_route(
/// Sets private read marker and public read receipt EDU. /// Sets private read marker and public read receipt EDU.
pub(crate) async fn create_receipt_route( pub(crate) async fn create_receipt_route(
State(services): State<crate::State>, State(services): State<crate::State>,
InsecureClientIp(client_ip): InsecureClientIp, ClientIp(client_ip): ClientIp,
body: Ruma<create_receipt::v3::Request>, body: Ruma<create_receipt::v3::Request>,
) -> Result<create_receipt::v3::Response> { ) -> Result<create_receipt::v3::Response> {
let sender_user = body.sender_user(); let sender_user = body.sender_user();
@@ -148,11 +144,8 @@ pub(crate) async fn create_receipt_route(
match body.receipt_type { match body.receipt_type {
| create_receipt::v3::ReceiptType::FullyRead => { | create_receipt::v3::ReceiptType::FullyRead => {
let fully_read_event = ruma::events::fully_read::FullyReadEvent { let fully_read_event =
content: ruma::events::fully_read::FullyReadEventContent { FullyReadEvent::new(FullyReadEventContent::new(body.event_id.clone()));
event_id: body.event_id.clone(),
},
};
services services
.account_data .account_data
.update( .update(
@@ -164,19 +157,16 @@ pub(crate) async fn create_receipt_route(
.await?; .await?;
}, },
| create_receipt::v3::ReceiptType::Read => { | create_receipt::v3::ReceiptType::Read => {
let receipt_content = BTreeMap::from_iter([( let receipt_content = [(
body.event_id.clone(), body.event_id.clone(),
BTreeMap::from_iter([( BTreeMap::from_iter([(
ReceiptType::Read, ReceiptType::Read,
BTreeMap::from_iter([( BTreeMap::from_iter([(
sender_user.to_owned(), sender_user.to_owned(),
ruma::events::receipt::Receipt { Receipt::new(MilliSecondsSinceUnixEpoch::now()),
ts: Some(MilliSecondsSinceUnixEpoch::now()),
thread: ReceiptThread::Unthreaded,
},
)]), )]),
)]), )]),
)]); )];
services services
.rooms .rooms
@@ -184,10 +174,10 @@ pub(crate) async fn create_receipt_route(
.readreceipt_update( .readreceipt_update(
sender_user, sender_user,
&body.room_id, &body.room_id,
&ruma::events::receipt::ReceiptEvent { &ReceiptEvent::new(
content: ruma::events::receipt::ReceiptEventContent(receipt_content), body.room_id.clone(),
room_id: body.room_id.clone(), ReceiptEventContent::from_iter(receipt_content),
}, ),
) )
.await; .await;
}, },
@@ -218,5 +208,5 @@ pub(crate) async fn create_receipt_route(
}, },
} }
Ok(create_receipt::v3::Response {}) Ok(create_receipt::v3::Response::new())
} }
+12 -11
View File
@@ -1,8 +1,8 @@
use axum::extract::State; use axum::extract::State;
use axum_client_ip::InsecureClientIp; use axum_client_ip::ClientIp;
use conduwuit::{Err, Result, matrix::pdu::PduBuilder}; use conduwuit::{Err, Result, matrix::pdu::PartialPdu};
use ruma::{ use ruma::{
api::client::redact::redact_event, events::room::redaction::RoomRedactionEventContent, api::client::redact::redact_event, assign, events::room::redaction::RoomRedactionEventContent,
}; };
use crate::Ruma; use crate::Ruma;
@@ -14,7 +14,7 @@
/// - TODO: Handle txn id /// - TODO: Handle txn id
pub(crate) async fn redact_event_route( pub(crate) async fn redact_event_route(
State(services): State<crate::State>, State(services): State<crate::State>,
InsecureClientIp(client_ip): InsecureClientIp, ClientIp(client_ip): ClientIp,
body: Ruma<redact_event::v3::Request>, body: Ruma<redact_event::v3::Request>,
) -> Result<redact_event::v3::Response> { ) -> Result<redact_event::v3::Response> {
let sender_user = body.sender_user(); let sender_user = body.sender_user();
@@ -28,18 +28,19 @@ pub(crate) async fn redact_event_route(
return Err!(Request(UserSuspended("You cannot perform this action while suspended."))); return Err!(Request(UserSuspended("You cannot perform this action while suspended.")));
} }
let state_lock = services.rooms.state.mutex.lock(&body.room_id).await; let state_lock = services.rooms.state.mutex.lock(body.room_id.as_str()).await;
let event_id = services let event_id = services
.rooms .rooms
.timeline .timeline
.build_and_append_pdu( .build_and_append_pdu(
PduBuilder { PartialPdu {
redacts: Some(body.event_id.clone()), redacts: Some(body.event_id.clone()),
..PduBuilder::timeline(&RoomRedactionEventContent { ..PartialPdu::timeline(
redacts: Some(body.event_id.clone()), &assign!(RoomRedactionEventContent::new_v11(body.event_id.clone()), {
reason: body.reason.clone(), reason: body.reason.clone()
}) }),
)
}, },
sender_user, sender_user,
Some(&body.room_id), Some(&body.room_id),
@@ -49,5 +50,5 @@ pub(crate) async fn redact_event_route(
drop(state_lock); drop(state_lock);
Ok(redact_event::v3::Response { event_id }) Ok(redact_event::v3::Response::new(event_id))
} }
+9 -7
View File
@@ -15,6 +15,7 @@
get_relating_events_with_rel_type_and_event_type, get_relating_events_with_rel_type_and_event_type,
}, },
}, },
assign,
events::{TimelineEventType, relation::RelationType}, events::{TimelineEventType, relation::RelationType},
}; };
@@ -39,12 +40,13 @@ pub(crate) async fn get_relating_events_with_rel_type_and_event_type_route(
body.dir, body.dir,
) )
.await .await
.map(|res| get_relating_events_with_rel_type_and_event_type::v1::Response { .map(|res| {
chunk: res.chunk, assign!(get_relating_events_with_rel_type_and_event_type::v1::Response::new(res.chunk), {
next_batch: res.next_batch, next_batch: res.next_batch,
prev_batch: res.prev_batch, prev_batch: res.prev_batch,
recursion_depth: res.recursion_depth, recursion_depth: res.recursion_depth,
}) })
})
} }
/// # `GET /_matrix/client/r0/rooms/{roomId}/relations/{eventId}/{relType}` /// # `GET /_matrix/client/r0/rooms/{roomId}/relations/{eventId}/{relType}`
@@ -66,12 +68,13 @@ pub(crate) async fn get_relating_events_with_rel_type_route(
body.dir, body.dir,
) )
.await .await
.map(|res| get_relating_events_with_rel_type::v1::Response { .map(|res| {
chunk: res.chunk, assign!(get_relating_events_with_rel_type::v1::Response::new(res.chunk), {
next_batch: res.next_batch, next_batch: res.next_batch,
prev_batch: res.prev_batch, prev_batch: res.prev_batch,
recursion_depth: res.recursion_depth, recursion_depth: res.recursion_depth,
}) })
})
} }
/// # `GET /_matrix/client/r0/rooms/{roomId}/relations/{eventId}` /// # `GET /_matrix/client/r0/rooms/{roomId}/relations/{eventId}`
@@ -201,12 +204,11 @@ async fn paginate_relations_with_filter(
.map(Event::into_format) .map(Event::into_format)
.collect(); .collect();
Ok(get_relating_events::v1::Response { Ok(assign!(get_relating_events::v1::Response::new(chunk), {
next_batch, next_batch,
prev_batch: from.map(Into::into), prev_batch: from.map(Into::into),
recursion_depth: recurse.then_some(depth.into()), recursion_depth: recurse.then_some(depth.into()),
chunk, }))
})
} }
async fn visibility_filter<Pdu: Event + Send + Sync>( async fn visibility_filter<Pdu: Event + Send + Sync>(
+36 -42
View File
@@ -1,13 +1,13 @@
use std::{fmt::Write as _, time::Duration}; use std::{fmt::Write as _, time::Duration};
use axum::extract::State; use axum::extract::State;
use axum_client_ip::InsecureClientIp; use axum_client_ip::ClientIp;
use conduwuit::{Err, Event, Result, debug_info, info, matrix::pdu::PduEvent, utils::ReadyExt}; use conduwuit::{Err, Event, Result, debug_info, info, matrix::pdu::PduEvent, utils::ReadyExt};
use conduwuit_service::Services; use conduwuit_service::Services;
use ruma::{ use ruma::{
EventId, OwnedEventId, OwnedRoomId, OwnedUserId, RoomId, UserId, EventId, OwnedEventId, OwnedRoomId, OwnedUserId, RoomId, UserId,
api::client::{ api::client::{
report_user, reporting::report_user,
room::{report_content, report_room}, room::{report_content, report_room},
}, },
events::{Mentions, room::message::RoomMessageEventContent}, events::{Mentions, room::message::RoomMessageEventContent},
@@ -22,16 +22,18 @@ struct Report {
event_id: Option<OwnedEventId>, event_id: Option<OwnedEventId>,
user_id: Option<OwnedUserId>, user_id: Option<OwnedUserId>,
report_type: String, report_type: String,
reason: Option<String>, reason: String,
} }
const MAX_REASON_LENGTH: usize = 2000;
/// # `POST /_matrix/client/v3/rooms/{roomId}/report` /// # `POST /_matrix/client/v3/rooms/{roomId}/report`
/// ///
/// Reports an abusive room to homeserver admins /// Reports an abusive room to homeserver admins
#[tracing::instrument(skip_all, fields(%client), name = "report_room", level = "info")] #[tracing::instrument(skip_all, fields(%client), name = "report_room", level = "info")]
pub(crate) async fn report_room_route( pub(crate) async fn report_room_route(
State(services): State<crate::State>, State(services): State<crate::State>,
InsecureClientIp(client): InsecureClientIp, ClientIp(client): ClientIp,
body: Ruma<report_room::v3::Request>, body: Ruma<report_room::v3::Request>,
) -> Result<report_room::v3::Response> { ) -> Result<report_room::v3::Response> {
let sender_user = body.sender_user(); let sender_user = body.sender_user();
@@ -39,10 +41,10 @@ pub(crate) async fn report_room_route(
return Err!(Request(UserSuspended("You cannot perform this action while suspended."))); return Err!(Request(UserSuspended("You cannot perform this action while suspended.")));
} }
if body.reason.as_ref().is_some_and(|s| s.len() > 750) { if body.reason.len() > MAX_REASON_LENGTH {
return Err!(Request( return Err!(Request(InvalidParam(
InvalidParam("Reason too long, should be 750 characters or fewer",) "Reason too long, should be {MAX_REASON_LENGTH} bytes or fewer",
)); )));
} }
delay_response().await; delay_response().await;
@@ -52,8 +54,7 @@ pub(crate) async fn report_room_route(
// their discretion. // their discretion.
info!( info!(
"Received room report by user {sender_user} for room {} with reason: \"{}\"", "Received room report by user {sender_user} for room {} with reason: \"{}\"",
body.room_id, body.room_id, body.reason
body.reason.as_deref().unwrap_or("")
); );
if !services if !services
@@ -78,7 +79,7 @@ pub(crate) async fn report_room_route(
services.admin.send_message(build_report(report)).await.ok(); services.admin.send_message(build_report(report)).await.ok();
Ok(report_room::v3::Response {}) Ok(report_room::v3::Response::new())
} }
/// # `POST /_matrix/client/v3/rooms/{roomId}/report/{eventId}` /// # `POST /_matrix/client/v3/rooms/{roomId}/report/{eventId}`
@@ -87,7 +88,7 @@ pub(crate) async fn report_room_route(
#[tracing::instrument(skip_all, fields(%client), name = "report_event", level = "info")] #[tracing::instrument(skip_all, fields(%client), name = "report_event", level = "info")]
pub(crate) async fn report_event_route( pub(crate) async fn report_event_route(
State(services): State<crate::State>, State(services): State<crate::State>,
InsecureClientIp(client): InsecureClientIp, ClientIp(client): ClientIp,
body: Ruma<report_content::v3::Request>, body: Ruma<report_content::v3::Request>,
) -> Result<report_content::v3::Response> { ) -> Result<report_content::v3::Response> {
// user authentication // user authentication
@@ -98,26 +99,22 @@ pub(crate) async fn report_event_route(
delay_response().await; delay_response().await;
let reason = body
.reason
.clone()
.unwrap_or_else(|| "<no reason provided>".to_owned());
// check if we know about the reported event ID or if it's invalid // check if we know about the reported event ID or if it's invalid
let Ok(pdu) = services.rooms.timeline.get_pdu(&body.event_id).await else { let Ok(pdu) = services.rooms.timeline.get_pdu(&body.event_id).await else {
return Err!(Request(NotFound("Event ID is not known to us or Event ID is invalid"))); return Err!(Request(NotFound("Event ID is not known to us or Event ID is invalid")));
}; };
is_event_report_valid( is_event_report_valid(&services, &pdu.event_id, &body.room_id, sender_user, &reason, &pdu)
&services,
&pdu.event_id,
&body.room_id,
sender_user,
body.reason.as_ref(),
&pdu,
)
.await?; .await?;
info!( info!(
"Received event report by user {sender_user} for room {} and event ID {}, with reason: \ "Received event report by user {sender_user} for room {} and event ID {}, with reason: \
\"{}\"", \"{}\"",
body.room_id, body.room_id, body.event_id, reason
body.event_id,
body.reason.as_deref().unwrap_or("")
); );
let report = Report { let report = Report {
sender: sender_user.to_owned(), sender: sender_user.to_owned(),
@@ -125,17 +122,17 @@ pub(crate) async fn report_event_route(
event_id: Some(body.event_id.clone()), event_id: Some(body.event_id.clone()),
user_id: None, user_id: None,
report_type: "event".to_owned(), report_type: "event".to_owned(),
reason: body.reason.clone(), reason,
}; };
services.admin.send_message(build_report(report)).await.ok(); services.admin.send_message(build_report(report)).await.ok();
Ok(report_content::v3::Response {}) Ok(report_content::v3::Response::new())
} }
#[tracing::instrument(skip_all, fields(%client), name = "report_user", level = "info")] #[tracing::instrument(skip_all, fields(%client), name = "report_user", level = "info")]
pub(crate) async fn report_user_route( pub(crate) async fn report_user_route(
State(services): State<crate::State>, State(services): State<crate::State>,
InsecureClientIp(client): InsecureClientIp, ClientIp(client): ClientIp,
body: Ruma<report_user::v3::Request>, body: Ruma<report_user::v3::Request>,
) -> Result<report_user::v3::Response> { ) -> Result<report_user::v3::Response> {
// user authentication // user authentication
@@ -144,17 +141,17 @@ pub(crate) async fn report_user_route(
return Err!(Request(UserSuspended("You cannot perform this action while suspended."))); return Err!(Request(UserSuspended("You cannot perform this action while suspended.")));
} }
if body.reason.as_ref().is_some_and(|s| s.len() > 750) { if body.reason.len() > MAX_REASON_LENGTH {
return Err!(Request( return Err!(Request(InvalidParam(
InvalidParam("Reason too long, should be 750 characters or fewer",) "Reason too long, should be {MAX_REASON_LENGTH} bytes or fewer",
)); )));
} }
delay_response().await; delay_response().await;
if !services.users.is_active_local(&body.user_id).await { if !services.users.is_active_local(&body.user_id).await {
// return 200 as to not reveal if the user exists. Recommended by spec. // return 200 as to not reveal if the user exists. Recommended by spec.
return Ok(report_user::v3::Response {}); return Ok(report_user::v3::Response::new());
} }
let report = Report { let report = Report {
@@ -168,13 +165,12 @@ pub(crate) async fn report_user_route(
info!( info!(
"Received room report from {sender_user} for user {} with reason: \"{}\"", "Received room report from {sender_user} for user {} with reason: \"{}\"",
body.user_id, body.user_id, body.reason
body.reason.as_deref().unwrap_or("")
); );
services.admin.send_message(build_report(report)).await.ok(); services.admin.send_message(build_report(report)).await.ok();
Ok(report_user::v3::Response {}) Ok(report_user::v3::Response::new())
} }
/// in the following order: /// in the following order:
@@ -188,7 +184,7 @@ async fn is_event_report_valid(
event_id: &EventId, event_id: &EventId,
room_id: &RoomId, room_id: &RoomId,
sender_user: &UserId, sender_user: &UserId,
reason: Option<&String>, reason: &str,
pdu: &PduEvent, pdu: &PduEvent,
) -> Result<()> { ) -> Result<()> {
debug_info!( debug_info!(
@@ -200,10 +196,10 @@ async fn is_event_report_valid(
return Err!(Request(NotFound("Event ID does not belong to the reported room",))); return Err!(Request(NotFound("Event ID does not belong to the reported room",)));
} }
if reason.as_ref().is_some_and(|s| s.len() > 750) { if reason.len() > MAX_REASON_LENGTH {
return Err!(Request( return Err!(Request(InvalidParam(
InvalidParam("Reason too long, should be 750 characters or fewer",) "Reason too long, should be {MAX_REASON_LENGTH} bytes or fewer",
)); )));
} }
if !services if !services
@@ -232,9 +228,7 @@ fn build_report(report: Report) -> RoomMessageEventContent {
if report.event_id.is_some() { if report.event_id.is_some() {
let _ = writeln!(text, "- Reported Event ID: `{}`", report.event_id.unwrap()); let _ = writeln!(text, "- Reported Event ID: `{}`", report.event_id.unwrap());
} }
if let Some(reason) = report.reason { let _ = writeln!(text, "- Report Reason: {}", report.reason);
let _ = writeln!(text, "- Report Reason: {reason}");
}
RoomMessageEventContent::text_markdown(text).add_mentions(Mentions::with_room_mention()) RoomMessageEventContent::text_markdown(text).add_mentions(Mentions::with_room_mention())
} }
+4 -5
View File
@@ -26,13 +26,12 @@ pub(crate) async fn get_room_aliases_route(
return Err!(Request(Forbidden("You don't have permission to view this room.",))); return Err!(Request(Forbidden("You don't have permission to view this room.",)));
} }
Ok(aliases::v3::Response { let aliases = services
aliases: services
.rooms .rooms
.alias .alias
.local_aliases_for_room(&body.room_id) .local_aliases_for_room(&body.room_id)
.map(ToOwned::to_owned)
.collect() .collect()
.await, .await;
})
Ok(aliases::v3::Response::new(aliases))
} }
+84 -120
View File
@@ -2,18 +2,19 @@
use axum::extract::State; use axum::extract::State;
use conduwuit::{ use conduwuit::{
Err, Result, RoomVersion, debug, debug_info, debug_warn, err, info, Err, Result, debug, debug_info, err, info,
matrix::{StateKey, pdu::PduBuilder}, matrix::{StateKey, pdu::PartialPdu},
trace, warn, trace, warn,
}; };
use conduwuit_service::{Services, appservice::RegistrationInfo}; use conduwuit_service::{Services, appservice::RegistrationInfo};
use futures::FutureExt; use futures::FutureExt;
use ruma::{ use ruma::{
CanonicalJsonObject, Int, OwnedRoomAliasId, OwnedRoomId, OwnedUserId, RoomId, RoomVersionId, CanonicalJsonObject, CanonicalJsonValue, Int, MilliSecondsSinceUnixEpoch, OwnedRoomAliasId,
OwnedRoomId, OwnedUserId, RoomAliasId, RoomId, RoomVersionId, UserId,
api::client::room::{self, create_room}, api::client::room::{self, create_room},
assign,
events::{ events::{
TimelineEventType, TimelineEventType,
invite_permission_config::FilterLevel,
room::{ room::{
canonical_alias::RoomCanonicalAliasEventContent, canonical_alias::RoomCanonicalAliasEventContent,
create::RoomCreateEventContent, create::RoomCreateEventContent,
@@ -27,8 +28,10 @@
}, },
}, },
int, int,
room_version_rules::{AuthorizationRules, RoomIdFormatVersion},
serde::{JsonObject, Raw}, serde::{JsonObject, Raw},
}; };
use ruminuwuity::invite_permission_config::FilterLevel;
use serde_json::{json, value::to_raw_value}; use serde_json::{json, value::to_raw_value};
use crate::{Ruma, client::invite_helper}; use crate::{Ruma, client::invite_helper};
@@ -81,15 +84,23 @@ pub(crate) async fn create_room_route(
}, },
| None => services.server.config.default_room_version.clone(), | None => services.server.config.default_room_version.clone(),
}; };
let room_features = RoomVersion::new(&room_version)?; let room_version_rules = room_version.rules().unwrap();
let room_id: Option<OwnedRoomId> = if !room_features.room_ids_as_hashes { let room_id: Option<OwnedRoomId> = match room_version_rules.room_id_format {
match &body.room_id { | RoomIdFormatVersion::V1 => {
| Some(custom_room_id) => Some(custom_room_id_check(&services, custom_room_id)?), // Check for custom room ID field
| None => Some(RoomId::new(services.globals.server_name())), if let Some(CanonicalJsonValue::String(room_id)) =
} body.json_body.as_ref().unwrap().get("room_id")
{
Some(
RoomId::parse(room_id)
.map_err(|_| err!(Request(BadJson("Malformed custom room ID"))))?,
)
} else { } else {
None Some(RoomId::new_v1(services.globals.server_name()))
}
},
| _ => None,
}; };
// check if room ID doesn't already exist instead of erroring on auth check // check if room ID doesn't already exist instead of erroring on auth check
@@ -167,7 +178,7 @@ pub(crate) async fn create_room_route(
use RoomVersionId::*; use RoomVersionId::*;
let mut content = content let mut content = content
.deserialize_as::<CanonicalJsonObject>() .deserialize_as_unchecked::<CanonicalJsonObject>()
.map_err(|e| { .map_err(|e| {
err!(Request(BadJson(error!( err!(Request(BadJson(error!(
"Failed to deserialise content as canonical JSON: {e}" "Failed to deserialise content as canonical JSON: {e}"
@@ -201,8 +212,7 @@ pub(crate) async fn create_room_route(
let content = match room_version { let content = match room_version {
| V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 => | V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 =>
RoomCreateEventContent::new_v1(sender_user.to_owned()), RoomCreateEventContent::new_v1(sender_user.to_owned()),
| V11 => RoomCreateEventContent::new_v11(), | _ => RoomCreateEventContent::new_v11(),
| _ => RoomCreateEventContent::new_v12(),
}; };
let mut content = let mut content =
serde_json::from_str::<CanonicalJsonObject>(to_raw_value(&content)?.get())?; serde_json::from_str::<CanonicalJsonObject>(to_raw_value(&content)?.get())?;
@@ -218,27 +228,40 @@ pub(crate) async fn create_room_route(
.short .short
.get_or_create_shortroomid(&room_id) .get_or_create_shortroomid(&room_id)
.await; .await;
services.rooms.state.mutex.lock(&room_id).await services.rooms.state.mutex.lock(room_id.as_str()).await
}, },
| None => { | None => {
let temp_room_id = RoomId::new(services.globals.server_name()); let temp_room_id = RoomId::new_v1(services.globals.server_name());
trace!("Locking temporary room state mutex for {temp_room_id}"); trace!("Locking temporary room state mutex for {temp_room_id}");
services.rooms.state.mutex.lock(&temp_room_id).await services.rooms.state.mutex.lock(temp_room_id.as_str()).await
}, },
}; };
// 1. The room create event // 1. The room create event
debug!("Creating room create event for {sender_user} in room {room_id:?}"); debug!("Creating room create event for {sender_user} in room {room_id:?}");
let tmp_id = room_id.as_deref(); let tmp_id = room_id.as_deref();
// Allow requesters to override the `origin_server_ts` to customize room ids
// from v12 onwards
let custom_origin_server_ts = body
.json_body
.as_ref()
.unwrap()
.get("origin_server_ts")
.and_then(CanonicalJsonValue::as_integer)
.map(Into::into)
.and_then(|value: i64| value.try_into().ok())
.map(MilliSecondsSinceUnixEpoch);
let create_event_id = services let create_event_id = services
.rooms .rooms
.timeline .timeline
.build_and_append_pdu( .build_and_append_pdu(
PduBuilder { PartialPdu {
event_type: TimelineEventType::RoomCreate, event_type: TimelineEventType::RoomCreate,
content: to_raw_value(&create_content)?, content: to_raw_value(&create_content)?,
state_key: Some(StateKey::new()), state_key: Some(StateKey::new()),
timestamp: body.origin_server_ts, timestamp: custom_origin_server_ts,
..Default::default() ..Default::default()
}, },
sender_user, sender_user,
@@ -253,34 +276,27 @@ pub(crate) async fn create_room_route(
| None => { | None => {
let as_room_id = create_event_id.as_str().replace('$', "!"); let as_room_id = create_event_id.as_str().replace('$', "!");
trace!("Creating room with v12 room ID {as_room_id}"); trace!("Creating room with v12 room ID {as_room_id}");
RoomId::parse(&as_room_id)?.to_owned() RoomId::parse(&as_room_id)?.clone()
}, },
}; };
drop(state_lock); drop(state_lock);
if let Some(expected_room_id) = body.room_id.as_ref() {
if expected_room_id.as_str() != room_id.as_str() {
return Err!(Request(InvalidParam(
"Custom room ID {expected_room_id} does not match the generated room ID \
{room_id}.",
)));
}
}
debug!("Room created with ID {room_id}"); debug!("Room created with ID {room_id}");
let state_lock = services.rooms.state.mutex.lock(&room_id).await; let state_lock = services.rooms.state.mutex.lock(room_id.as_str()).await;
// 2. Let the room creator join // 2. Let the room creator join
let mut join_event = RoomMemberEventContent::new(MembershipState::Join);
join_event.displayname = services.users.displayname(sender_user).await.ok();
join_event.avatar_url = services.users.avatar_url(sender_user).await.ok();
join_event.blurhash = services.users.blurhash(sender_user).await.ok();
join_event.is_direct = Some(body.is_direct);
debug_info!("Joining {sender_user} to room {room_id}"); debug_info!("Joining {sender_user} to room {room_id}");
services services
.rooms .rooms
.timeline .timeline
.build_and_append_pdu( .build_and_append_pdu(
PduBuilder::state(sender_user.to_string(), &RoomMemberEventContent { PartialPdu::state(sender_user.to_string(), &join_event),
displayname: services.users.displayname(sender_user).await.ok(),
avatar_url: services.users.avatar_url(sender_user).await.ok(),
blurhash: services.users.blurhash(sender_user).await.ok(),
is_direct: Some(body.is_direct),
..RoomMemberEventContent::new(MembershipState::Join)
}),
sender_user, sender_user,
Some(&room_id), Some(&room_id),
&state_lock, &state_lock,
@@ -306,7 +322,10 @@ pub(crate) async fn create_room_route(
let mut creators: Vec<OwnedUserId> = vec![sender_user.to_owned()]; let mut creators: Vec<OwnedUserId> = vec![sender_user.to_owned()];
// Do we care about additional_creators? // Do we care about additional_creators?
if room_features.explicitly_privilege_room_creators { if room_version_rules
.authorization
.explicitly_privilege_room_creators
{
// Have they been specified? // Have they been specified?
if let Some(additional_creators) = create_content.get("additional_creators") { if let Some(additional_creators) = create_content.get("additional_creators") {
// Are they a real array? // Are they a real array?
@@ -316,9 +335,9 @@ pub(crate) async fn create_room_route(
// Are they a string? // Are they a string?
if let Some(creator) = creator.as_str() { if let Some(creator) = creator.as_str() {
// Do they parse into a real user ID? // Do they parse into a real user ID?
if let Ok(creator) = OwnedUserId::parse(creator) { if let Ok(creator) = UserId::parse(creator) {
// Add them to the power levels and creators // Add them to the power levels and creators
creators.push(creator.clone()); creators.push(creator);
} }
} }
} }
@@ -331,17 +350,20 @@ pub(crate) async fn create_room_route(
} }
let power_levels_content = default_power_levels_content( let power_levels_content = default_power_levels_content(
body.power_level_content_override.as_ref(), body.power_level_content_override
.as_ref()
.map(Raw::cast_ref),
&body.visibility, &body.visibility,
power_levels_to_grant, power_levels_to_grant,
creators, creators,
&room_version_rules.authorization,
)?; )?;
services services
.rooms .rooms
.timeline .timeline
.build_and_append_pdu( .build_and_append_pdu(
PduBuilder { PartialPdu {
event_type: TimelineEventType::RoomPowerLevels, event_type: TimelineEventType::RoomPowerLevels,
content: to_raw_value(&power_levels_content)?, content: to_raw_value(&power_levels_content)?,
state_key: Some(StateKey::new()), state_key: Some(StateKey::new()),
@@ -360,10 +382,13 @@ pub(crate) async fn create_room_route(
.rooms .rooms
.timeline .timeline
.build_and_append_pdu( .build_and_append_pdu(
PduBuilder::state(String::new(), &RoomCanonicalAliasEventContent { PartialPdu::state(
String::new(),
&assign!(RoomCanonicalAliasEventContent::new(), {
alias: Some(room_alias_id.to_owned()), alias: Some(room_alias_id.to_owned()),
alt_aliases: vec![], alt_aliases: vec![],
}), }),
),
sender_user, sender_user,
Some(&room_id), Some(&room_id),
&state_lock, &state_lock,
@@ -379,7 +404,7 @@ pub(crate) async fn create_room_route(
.rooms .rooms
.timeline .timeline
.build_and_append_pdu( .build_and_append_pdu(
PduBuilder::state( PartialPdu::state(
String::new(), String::new(),
&RoomJoinRulesEventContent::new(match preset { &RoomJoinRulesEventContent::new(match preset {
| RoomPreset::PublicChat => JoinRule::Public, | RoomPreset::PublicChat => JoinRule::Public,
@@ -399,7 +424,7 @@ pub(crate) async fn create_room_route(
.rooms .rooms
.timeline .timeline
.build_and_append_pdu( .build_and_append_pdu(
PduBuilder::state( PartialPdu::state(
String::new(), String::new(),
&RoomHistoryVisibilityEventContent::new(HistoryVisibility::Shared), &RoomHistoryVisibilityEventContent::new(HistoryVisibility::Shared),
), ),
@@ -415,7 +440,7 @@ pub(crate) async fn create_room_route(
.rooms .rooms
.timeline .timeline
.build_and_append_pdu( .build_and_append_pdu(
PduBuilder::state( PartialPdu::state(
String::new(), String::new(),
&RoomGuestAccessEventContent::new(match preset { &RoomGuestAccessEventContent::new(match preset {
| RoomPreset::PublicChat => GuestAccess::Forbidden, | RoomPreset::PublicChat => GuestAccess::Forbidden,
@@ -431,26 +456,19 @@ pub(crate) async fn create_room_route(
// 6. Events listed in initial_state // 6. Events listed in initial_state
for event in &body.initial_state { for event in &body.initial_state {
let mut pdu_builder = event.deserialize_as::<PduBuilder>().map_err(|e| { let mut partial_pdu = event
.deserialize_as_unchecked::<PartialPdu>()
.map_err(|e| {
err!(Request(InvalidParam(warn!("Invalid initial state event: {e:?}")))) err!(Request(InvalidParam(warn!("Invalid initial state event: {e:?}"))))
})?; })?;
debug_info!("Room creation initial state event: {event:?}"); debug_info!("Room creation initial state event: {event:?}");
// client/appservice workaround: if a user sends an initial_state event with a
// state event in there with the content of literally `{}` (not null or empty
// string), let's just skip it over and warn.
if pdu_builder.content.get().eq("{}") {
debug_warn!("skipping empty initial state event with content of `{{}}`: {event:?}");
debug_warn!("content: {}", pdu_builder.content.get());
continue;
}
// Implicit state key defaults to "" // Implicit state key defaults to ""
pdu_builder.state_key.get_or_insert_with(StateKey::new); partial_pdu.state_key.get_or_insert_with(StateKey::new);
// Silently skip encryption events if they are not allowed // Silently skip encryption events if they are not allowed
if pdu_builder.event_type == TimelineEventType::RoomEncryption if partial_pdu.event_type == TimelineEventType::RoomEncryption
&& !services.config.allow_encryption && !services.config.allow_encryption
{ {
continue; continue;
@@ -459,7 +477,7 @@ pub(crate) async fn create_room_route(
services services
.rooms .rooms
.timeline .timeline
.build_and_append_pdu(pdu_builder, sender_user, Some(&room_id), &state_lock) .build_and_append_pdu(partial_pdu, sender_user, Some(&room_id), &state_lock)
.boxed() .boxed()
.await?; .await?;
} }
@@ -470,7 +488,7 @@ pub(crate) async fn create_room_route(
.rooms .rooms
.timeline .timeline
.build_and_append_pdu( .build_and_append_pdu(
PduBuilder::state(String::new(), &RoomNameEventContent::new(name.clone())), PartialPdu::state(String::new(), &RoomNameEventContent::new(name.clone())),
sender_user, sender_user,
Some(&room_id), Some(&room_id),
&state_lock, &state_lock,
@@ -484,7 +502,7 @@ pub(crate) async fn create_room_route(
.rooms .rooms
.timeline .timeline
.build_and_append_pdu( .build_and_append_pdu(
PduBuilder::state(String::new(), &RoomTopicEventContent { topic: topic.clone() }), PartialPdu::state(String::new(), &RoomTopicEventContent::new(topic.clone())),
sender_user, sender_user,
Some(&room_id), Some(&room_id),
&state_lock, &state_lock,
@@ -539,10 +557,13 @@ fn default_power_levels_content(
visibility: &room::Visibility, visibility: &room::Visibility,
users: BTreeMap<OwnedUserId, Int>, users: BTreeMap<OwnedUserId, Int>,
creators: Vec<OwnedUserId>, creators: Vec<OwnedUserId>,
authorization_rules: &AuthorizationRules,
) -> Result<serde_json::Value> { ) -> Result<serde_json::Value> {
let mut power_levels_content = let mut power_levels_content =
serde_json::to_value(RoomPowerLevelsEventContent { users, ..Default::default() }) serde_json::to_value(assign!(RoomPowerLevelsEventContent::new(authorization_rules), {
.expect("event is valid, we just created it"); users
}))
.unwrap();
// secure proper defaults of sensitive/dangerous permissions that moderators // secure proper defaults of sensitive/dangerous permissions that moderators
// (power level 50) should not have easy access to // (power level 50) should not have easy access to
@@ -632,7 +653,7 @@ async fn room_alias_check(
} }
let server_name = services.globals.server_name(); let server_name = services.globals.server_name();
let full_room_alias = OwnedRoomAliasId::parse(format!("#{room_alias_name}:{server_name}")) let full_room_alias = RoomAliasId::parse(format!("#{room_alias_name}:{server_name}"))
.map_err(|e| { .map_err(|e| {
err!(Request(InvalidParam(debug_error!( err!(Request(InvalidParam(debug_error!(
?e, ?e,
@@ -667,60 +688,3 @@ async fn room_alias_check(
Ok(full_room_alias) Ok(full_room_alias)
} }
/// if a room is being created with a custom room ID, run our checks against it
fn custom_room_id_check(services: &Services, custom_room_id: &str) -> Result<OwnedRoomId> {
// apply forbidden room alias checks to custom room IDs too
if services
.globals
.forbidden_alias_names()
.is_match(custom_room_id)
{
return Err!(Request(Unknown("Custom room ID is forbidden.")));
}
if custom_room_id.contains(':') {
return Err!(Request(InvalidParam(
"Custom room ID contained `:` which is not allowed. Please note that this expects a \
localpart, not the full room ID.",
)));
} else if custom_room_id.contains(char::is_whitespace) {
return Err!(Request(InvalidParam(
"Custom room ID contained spaces which is not valid."
)));
}
let server_name = services.globals.server_name();
let mut room_id = custom_room_id.to_owned();
if custom_room_id.contains(':') {
if !custom_room_id.starts_with('!') {
return Err!(Request(InvalidParam(
"Custom room ID contains an unexpected `:` which is not allowed.",
)));
}
} else if custom_room_id.starts_with('!') {
return Err!(Request(InvalidParam(
"Room ID is prefixed with !, but is not fully qualified. You likely did not want \
this.",
)));
} else {
room_id = format!("!{custom_room_id}:{server_name}");
}
OwnedRoomId::parse(room_id)
.map_err(Into::into)
.and_then(|full_room_id| {
if full_room_id
.server_name()
.expect("failed to extract server name from room ID")
!= server_name
{
Err!(Request(InvalidParam("Custom room ID must be on this server.",)))
} else {
Ok(full_room_id)
}
})
.inspect(|full_room_id| {
debug_info!(%full_room_id, "Full custom room ID");
})
.inspect_err(|e| warn!(?e, %custom_room_id, "Failed to create room with custom room ID",))
}

Some files were not shown because too many files have changed in this diff Show More