Compare commits

..

13 Commits

Author SHA1 Message Date
timedout d3a40c40ba feat: More logging and fix some handling 2026-05-10 05:22:22 +01:00
timedout 7559b31f99 feat: Better prev event fetching 2026-05-10 02:20:05 +01:00
timedout 0cd8b8fc35 feat: Add backfill_missing_events helper 2026-05-09 22:32:52 +01:00
timedout b884fbd240 style: Reformat 2026-05-09 20:06:53 +01:00
timedout a03490c81c fix: Store PDUs as outliers even when rejected
This prevents future network lookups if we've already rejected an event and see a reference to it again
2026-05-09 19:54:48 +01:00
timedout 5a4b8309a7 chore: Update newsfrag 2026-05-09 19:29:31 +01:00
timedout d576620953 style: Rename unmark_pdu to clear_pdu_markers 2026-05-09 19:23:46 +01:00
timedout 36cf5b053b feat: Add !admin debug show-auth-chain
Because why not am I right lads
2026-05-09 19:17:11 +01:00
timedout d95bf66e3d feat: Make !admin debug get-pdu more informative 2026-05-09 18:17:00 +01:00
timedout ab277409bc fix: Don't hard fail on events which depend on soft-failed events 2026-05-09 18:01:04 +01:00
timedout ec76147784 fix: Mark events as rejected in more places, correct soft-fail extremity behaviour 2026-05-09 17:45:02 +01:00
timedout 3f3ca53fdd chore: Add news fragment for 1747
Co-Authored-By: star <star@nexy7574.co.uk>
2026-05-08 19:57:46 +01:00
timedout 70a768a711 feat: Implement event rejection
Co-Authored-By: star <star@nexy7574.co.uk>
2026-05-08 19:49:51 +01:00
55 changed files with 2039 additions and 784 deletions
+1 -1
View File
@@ -24,7 +24,7 @@ repos:
- id: check-added-large-files
- repo: https://github.com/crate-ci/typos
rev: v1.46.1
rev: v1.46.0
hooks:
- id: typos
- id: typos
+1 -1
View File
@@ -1 +1 @@
Contributors are expected to follow the [Continuwuity Community Guidelines](https://continuwuity.org/community/guidelines).
Contributors are expected to follow the [Continuwuity Community Guidelines](continuwuity.org/community/guidelines).
Generated
+424 -11
View File
@@ -39,6 +39,24 @@ dependencies = [
"memchr",
]
[[package]]
name = "aligned"
version = "0.4.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ee4508988c62edf04abd8d92897fca0c2995d907ce1dfeaf369dac3716a40685"
dependencies = [
"as-slice",
]
[[package]]
name = "aligned-vec"
version = "0.6.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dc890384c8602f339876ded803c97ad529f3842aba97f6392b3dba0dd171769b"
dependencies = [
"equator",
]
[[package]]
name = "alloc-no-stdlib"
version = "2.0.4"
@@ -101,6 +119,12 @@ version = "1.0.102"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7f202df86484c868dbad7eaa557ef785d5c66295e41b460ef922eca0723b842c"
[[package]]
name = "arbitrary"
version = "1.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c3d036a3c4ab069c7b410a2ce876bd74808d2d0888a82667669f8e783a898bf1"
[[package]]
name = "arc-swap"
version = "1.9.1"
@@ -110,6 +134,17 @@ dependencies = [
"rustversion",
]
[[package]]
name = "arg_enum_proc_macro"
version = "0.3.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0ae92a5119aa49cdbcf6b9f893fe4e1d98b04ccbf82ee0584ad948a44a734dea"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "argon2"
version = "0.5.3"
@@ -137,6 +172,15 @@ dependencies = [
"serde",
]
[[package]]
name = "as-slice"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "516b6b4f0e40d50dcda9365d53964ec74560ad4284da2e7fc97122cd83174516"
dependencies = [
"stable_deref_trait",
]
[[package]]
name = "as_variant"
version = "1.3.0"
@@ -257,6 +301,49 @@ version = "1.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8"
[[package]]
name = "av-scenechange"
version = "0.14.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0f321d77c20e19b92c39e7471cf986812cbb46659d2af674adc4331ef3f18394"
dependencies = [
"aligned",
"anyhow",
"arg_enum_proc_macro",
"arrayvec",
"log",
"num-rational",
"num-traits",
"pastey",
"rayon",
"thiserror",
"v_frame",
"y4m",
]
[[package]]
name = "av1-grain"
version = "0.2.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8cfddb07216410377231960af4fcab838eaa12e013417781b78bd95ee22077f8"
dependencies = [
"anyhow",
"arrayvec",
"log",
"nom 8.0.0",
"num-rational",
"v_frame",
]
[[package]]
name = "avif-serialize"
version = "0.8.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "375082f007bd67184fb9c0374614b29f9aaa604ec301635f72338bb65386a53d"
dependencies = [
"arrayvec",
]
[[package]]
name = "aws-lc-rs"
version = "1.16.3"
@@ -465,12 +552,27 @@ dependencies = [
"syn",
]
[[package]]
name = "bit_field"
version = "0.10.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1e4b40c7323adcfc0a41c4b88143ed58346ff65a288fc144329c5c45e05d70c6"
[[package]]
name = "bitflags"
version = "2.11.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c4512299f36f043ab09a583e57bceb5a5aab7a73db1805848e8fef3c9e8c78b3"
[[package]]
name = "bitstream-io"
version = "4.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7eff00be299a18769011411c9def0d827e8f2d7bf0c3dbf53633147a8867fd1f"
dependencies = [
"no_std_io2",
]
[[package]]
name = "blake2"
version = "0.10.6"
@@ -507,6 +609,15 @@ dependencies = [
"objc2",
]
[[package]]
name = "blurhash"
version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e79769241dcd44edf79a732545e8b5cec84c247ac060f5252cd51885d093a8fc"
dependencies = [
"image",
]
[[package]]
name = "brotli"
version = "8.0.2"
@@ -1056,6 +1167,7 @@ dependencies = [
"assign",
"async-trait",
"base64 0.22.1",
"blurhash",
"bytes",
"conduwuit_core",
"conduwuit_database",
@@ -1381,6 +1493,12 @@ dependencies = [
"winapi",
]
[[package]]
name = "crunchy"
version = "0.2.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "460fbee9c2c2f33933d720630a6a0bac33ba7053db5344fac858d4b8952d77d5"
[[package]]
name = "crypto-common"
version = "0.1.7"
@@ -1696,6 +1814,26 @@ dependencies = [
"syn",
]
[[package]]
name = "equator"
version = "0.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4711b213838dfee0117e3be6ac926007d7f433d7bbe33595975d4190cb07e6fc"
dependencies = [
"equator-macro",
]
[[package]]
name = "equator-macro"
version = "0.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "44f23cf4b44bfce11a86ace86f8a73ffdec849c9fd00a386a53d278bd9e81fb3"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "equivalent"
version = "1.0.2"
@@ -1731,12 +1869,33 @@ dependencies = [
"pin-project-lite",
]
[[package]]
name = "exr"
version = "1.74.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4300e043a56aa2cb633c01af81ca8f699a321879a7854d3896a0ba89056363be"
dependencies = [
"bit_field",
"half",
"lebe",
"miniz_oxide",
"rayon-core",
"smallvec",
"zune-inflate",
]
[[package]]
name = "fastrand"
version = "2.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9f1f227452a390804cdb637b74a86990f2a7d7ba4b7d5693aac9b4dd6defd8d6"
[[package]]
name = "fax"
version = "0.2.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "caf1079563223d5d59d83c85886a56e586cfd5c1a26292e971a0fa266531ac5a"
[[package]]
name = "fdeflate"
version = "0.3.7"
@@ -2086,6 +2245,17 @@ dependencies = [
"tokio-util",
]
[[package]]
name = "half"
version = "2.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6ea2d84b969582b4b1864a92dc5d27cd2b77b622a8d79306834f1be5ba20d84b"
dependencies = [
"cfg-if",
"crunchy",
"zerocopy",
]
[[package]]
name = "hardened_malloc-rs"
version = "0.1.2+12"
@@ -2521,11 +2691,17 @@ dependencies = [
"bytemuck",
"byteorder-lite",
"color_quant",
"exr",
"gif",
"image-webp",
"moxcms",
"num-traits",
"png",
"qoi",
"ravif",
"rayon",
"rgb",
"tiff",
"zune-core",
"zune-jpeg",
]
@@ -2540,6 +2716,12 @@ dependencies = [
"quick-error",
]
[[package]]
name = "imgref"
version = "1.12.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "40fac9d56ed6437b198fddba683305e8e2d651aa42647f00f5ae542e7f5c94a2"
[[package]]
name = "indexmap"
version = "2.14.0"
@@ -2558,6 +2740,17 @@ version = "0.1.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c8fae54786f62fb2918dcfae3d568594e50eb9b5c25bf04371af6fe7516452fb"
[[package]]
name = "interpolate_name"
version = "0.2.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c34819042dc3d3971c46c2190835914dfbe0c3c13f61449b2997f4e9722dfa60"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "ipaddress"
version = "0.1.3"
@@ -2759,6 +2952,12 @@ version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "09edd9e8b54e49e587e4f6295a7d29c3ea94d469cb40ab8ca70b288248a81db2"
[[package]]
name = "lebe"
version = "0.5.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7a79a3332a6609480d7d0c9eab957bca6b455b91bb84e66d19f5ff66294b85b8"
[[package]]
name = "lettre"
version = "0.11.21"
@@ -2795,6 +2994,16 @@ version = "0.2.186"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "68ab91017fe16c622486840e4c83c9a37afeff978bd239b5293d61ece587de66"
[[package]]
name = "libfuzzer-sys"
version = "0.4.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f12a681b7dd8ce12bff52488013ba614b869148d54dd79836ab85aafdd53f08d"
dependencies = [
"arbitrary",
"cc",
]
[[package]]
name = "libloading"
version = "0.8.9"
@@ -2887,6 +3096,15 @@ dependencies = [
"futures-sink",
]
[[package]]
name = "loop9"
version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0fae87c125b03c1d2c0150c90365d7d6bcc53fb73a9acaef207d2d065860f062"
dependencies = [
"imgref",
]
[[package]]
name = "lru-cache"
version = "0.1.2"
@@ -2965,6 +3183,16 @@ version = "0.8.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "47e1ffaa40ddd1f3ed91f717a33c8c0ee23fff369e3aa8772b9605cc1d22f4c3"
[[package]]
name = "maybe-rayon"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8ea1f30cedd69f0a2954655f7188c6a834246d2bcf1e315e2ac40c4b24dc9519"
dependencies = [
"cfg-if",
"rayon",
]
[[package]]
name = "memchr"
version = "2.8.0"
@@ -3113,6 +3341,15 @@ dependencies = [
"libc",
]
[[package]]
name = "no_std_io2"
version = "0.9.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b51ed7824b6e07d354605f4abb3d9d300350701299da96642ee084f5ce631550"
dependencies = [
"memchr",
]
[[package]]
name = "nohash-hasher"
version = "0.2.0"
@@ -3144,6 +3381,12 @@ version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "38bf9645c8b145698bb0b18a4637dcacbc421ea49bef2317e4fd8065a387cf21"
[[package]]
name = "noop_proc_macro"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0676bb32a98c1a483ce53e500a81ad9c3d5b3f7c920c28c24e9cb0980d0b5bc8"
[[package]]
name = "nu-ansi-term"
version = "0.50.3"
@@ -3192,6 +3435,17 @@ version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c6673768db2d862beb9b39a78fdcb1a69439615d5794a1be50caa9bc92c81967"
[[package]]
name = "num-derive"
version = "0.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "num-integer"
version = "0.1.46"
@@ -3566,6 +3820,12 @@ version = "1.0.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a"
[[package]]
name = "pastey"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "35fb2e5f958ec131621fdd531e9fc186ed768cbe395337403ae56c17a74c68ec"
[[package]]
name = "pear"
version = "0.2.9"
@@ -3797,6 +4057,25 @@ dependencies = [
"yansi",
]
[[package]]
name = "profiling"
version = "1.0.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3eb8486b569e12e2c32ad3e204dbaba5e4b5b216e9367044f25f1dba42341773"
dependencies = [
"profiling-procmacros",
]
[[package]]
name = "profiling-procmacros"
version = "1.0.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "52717f9a02b6965224f95ca2a81e2e0c5c43baacd28ca057577988930b6c3d5b"
dependencies = [
"quote",
"syn",
]
[[package]]
name = "prost"
version = "0.14.3"
@@ -3853,6 +4132,15 @@ version = "0.1.29"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e0c5ccf5294c6ccd63a74f1565028353830a9c2f5eb0c682c355c471726a6e3f"
[[package]]
name = "qoi"
version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7f6d64c71eb498fe9eae14ce4ec935c555749aef511cca85b5568910d6e48001"
dependencies = [
"bytemuck",
]
[[package]]
name = "quick-error"
version = "2.0.1"
@@ -4007,6 +4295,76 @@ version = "0.10.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "63b8176103e19a2643978565ca18b50549f6101881c443590420e4dc998a3c69"
[[package]]
name = "rav1e"
version = "0.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "43b6dd56e85d9483277cde964fd1bdb0428de4fec5ebba7540995639a21cb32b"
dependencies = [
"aligned-vec",
"arbitrary",
"arg_enum_proc_macro",
"arrayvec",
"av-scenechange",
"av1-grain",
"bitstream-io",
"built",
"cfg-if",
"interpolate_name",
"itertools 0.14.0",
"libc",
"libfuzzer-sys",
"log",
"maybe-rayon",
"new_debug_unreachable",
"noop_proc_macro",
"num-derive",
"num-traits",
"paste",
"profiling",
"rand 0.9.4",
"rand_chacha",
"simd_helpers",
"thiserror",
"v_frame",
"wasm-bindgen",
]
[[package]]
name = "ravif"
version = "0.13.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e52310197d971b0f5be7fe6b57530dcd27beb35c1b013f29d66c1ad73fbbcc45"
dependencies = [
"avif-serialize",
"imgref",
"loop9",
"quick-error",
"rav1e",
"rayon",
"rgb",
]
[[package]]
name = "rayon"
version = "1.12.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fb39b166781f92d482534ef4b4b1b2568f42613b53e5b6c160e24cfbfa30926d"
dependencies = [
"either",
"rayon-core",
]
[[package]]
name = "rayon-core"
version = "1.13.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "22e18b0f0062d30d4230b2e85ff77fdfe4326feb054b9783a3460d8435c8ab91"
dependencies = [
"crossbeam-deque",
"crossbeam-utils",
]
[[package]]
name = "recaptcha-verify"
version = "0.2.0"
@@ -4147,6 +4505,12 @@ version = "0.7.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1e061d1b48cb8d38042de4ae0a7a6401009d6143dc80d2e2d6f31f0bdd6470c7"
[[package]]
name = "rgb"
version = "0.8.53"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "47b34b781b31e5d73e9fbc8689c70551fd1ade9a19e3e28cfec8580a79290cc4"
[[package]]
name = "ring"
version = "0.17.14"
@@ -4164,7 +4528,7 @@ dependencies = [
[[package]]
name = "ruma"
version = "0.15.1"
source = "git+https://github.com/ruma/ruma.git?rev=9c9dccc93f054bbd28f23f630223fffa6289ecbc#9c9dccc93f054bbd28f23f630223fffa6289ecbc"
source = "git+https://github.com/ruma/ruma.git?rev=5742fec0021b85fedbf5cd1f59c50a00bb5b9f7c#5742fec0021b85fedbf5cd1f59c50a00bb5b9f7c"
dependencies = [
"assign",
"js_int",
@@ -4183,7 +4547,7 @@ dependencies = [
[[package]]
name = "ruma-appservice-api"
version = "0.15.0"
source = "git+https://github.com/ruma/ruma.git?rev=9c9dccc93f054bbd28f23f630223fffa6289ecbc#9c9dccc93f054bbd28f23f630223fffa6289ecbc"
source = "git+https://github.com/ruma/ruma.git?rev=5742fec0021b85fedbf5cd1f59c50a00bb5b9f7c#5742fec0021b85fedbf5cd1f59c50a00bb5b9f7c"
dependencies = [
"js_int",
"ruma-common",
@@ -4195,7 +4559,7 @@ dependencies = [
[[package]]
name = "ruma-client-api"
version = "0.23.1"
source = "git+https://github.com/ruma/ruma.git?rev=9c9dccc93f054bbd28f23f630223fffa6289ecbc#9c9dccc93f054bbd28f23f630223fffa6289ecbc"
source = "git+https://github.com/ruma/ruma.git?rev=5742fec0021b85fedbf5cd1f59c50a00bb5b9f7c#5742fec0021b85fedbf5cd1f59c50a00bb5b9f7c"
dependencies = [
"as_variant",
"assign",
@@ -4217,7 +4581,7 @@ dependencies = [
[[package]]
name = "ruma-common"
version = "0.18.0"
source = "git+https://github.com/ruma/ruma.git?rev=9c9dccc93f054bbd28f23f630223fffa6289ecbc#9c9dccc93f054bbd28f23f630223fffa6289ecbc"
source = "git+https://github.com/ruma/ruma.git?rev=5742fec0021b85fedbf5cd1f59c50a00bb5b9f7c#5742fec0021b85fedbf5cd1f59c50a00bb5b9f7c"
dependencies = [
"as_variant",
"base64 0.22.1",
@@ -4250,7 +4614,7 @@ dependencies = [
[[package]]
name = "ruma-events"
version = "0.33.0"
source = "git+https://github.com/ruma/ruma.git?rev=9c9dccc93f054bbd28f23f630223fffa6289ecbc#9c9dccc93f054bbd28f23f630223fffa6289ecbc"
source = "git+https://github.com/ruma/ruma.git?rev=5742fec0021b85fedbf5cd1f59c50a00bb5b9f7c#5742fec0021b85fedbf5cd1f59c50a00bb5b9f7c"
dependencies = [
"as_variant",
"indexmap",
@@ -4271,7 +4635,7 @@ dependencies = [
[[package]]
name = "ruma-federation-api"
version = "0.14.0"
source = "git+https://github.com/ruma/ruma.git?rev=9c9dccc93f054bbd28f23f630223fffa6289ecbc#9c9dccc93f054bbd28f23f630223fffa6289ecbc"
source = "git+https://github.com/ruma/ruma.git?rev=5742fec0021b85fedbf5cd1f59c50a00bb5b9f7c#5742fec0021b85fedbf5cd1f59c50a00bb5b9f7c"
dependencies = [
"bytes",
"headers",
@@ -4294,7 +4658,7 @@ dependencies = [
[[package]]
name = "ruma-identifiers-validation"
version = "0.12.1"
source = "git+https://github.com/ruma/ruma.git?rev=9c9dccc93f054bbd28f23f630223fffa6289ecbc#9c9dccc93f054bbd28f23f630223fffa6289ecbc"
source = "git+https://github.com/ruma/ruma.git?rev=5742fec0021b85fedbf5cd1f59c50a00bb5b9f7c#5742fec0021b85fedbf5cd1f59c50a00bb5b9f7c"
dependencies = [
"js_int",
"thiserror",
@@ -4303,7 +4667,7 @@ dependencies = [
[[package]]
name = "ruma-macros"
version = "0.18.0"
source = "git+https://github.com/ruma/ruma.git?rev=9c9dccc93f054bbd28f23f630223fffa6289ecbc#9c9dccc93f054bbd28f23f630223fffa6289ecbc"
source = "git+https://github.com/ruma/ruma.git?rev=5742fec0021b85fedbf5cd1f59c50a00bb5b9f7c#5742fec0021b85fedbf5cd1f59c50a00bb5b9f7c"
dependencies = [
"as_variant",
"cfg-if",
@@ -4319,7 +4683,7 @@ dependencies = [
[[package]]
name = "ruma-push-gateway-api"
version = "0.14.0"
source = "git+https://github.com/ruma/ruma.git?rev=9c9dccc93f054bbd28f23f630223fffa6289ecbc#9c9dccc93f054bbd28f23f630223fffa6289ecbc"
source = "git+https://github.com/ruma/ruma.git?rev=5742fec0021b85fedbf5cd1f59c50a00bb5b9f7c#5742fec0021b85fedbf5cd1f59c50a00bb5b9f7c"
dependencies = [
"js_int",
"ruma-common",
@@ -4331,7 +4695,7 @@ dependencies = [
[[package]]
name = "ruma-signatures"
version = "0.20.0"
source = "git+https://github.com/ruma/ruma.git?rev=9c9dccc93f054bbd28f23f630223fffa6289ecbc#9c9dccc93f054bbd28f23f630223fffa6289ecbc"
source = "git+https://github.com/ruma/ruma.git?rev=5742fec0021b85fedbf5cd1f59c50a00bb5b9f7c#5742fec0021b85fedbf5cd1f59c50a00bb5b9f7c"
dependencies = [
"base64 0.22.1",
"ed25519-dalek",
@@ -4347,7 +4711,7 @@ dependencies = [
[[package]]
name = "ruma-state-res"
version = "0.16.0"
source = "git+https://github.com/ruma/ruma.git?rev=9c9dccc93f054bbd28f23f630223fffa6289ecbc#9c9dccc93f054bbd28f23f630223fffa6289ecbc"
source = "git+https://github.com/ruma/ruma.git?rev=5742fec0021b85fedbf5cd1f59c50a00bb5b9f7c#5742fec0021b85fedbf5cd1f59c50a00bb5b9f7c"
dependencies = [
"js_int",
"ruma-common",
@@ -5007,6 +5371,15 @@ dependencies = [
"simdutf8",
]
[[package]]
name = "simd_helpers"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "95890f873bec569a0362c235787f3aca6e1e887302ba4840839bcc6459c42da6"
dependencies = [
"quote",
]
[[package]]
name = "simdutf8"
version = "0.1.5"
@@ -5225,6 +5598,20 @@ dependencies = [
"cfg-if",
]
[[package]]
name = "tiff"
version = "0.11.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b63feaf3343d35b6ca4d50483f94843803b0f51634937cc2ec519fc32232bc52"
dependencies = [
"fax",
"flate2",
"half",
"quick-error",
"weezl",
"zune-jpeg",
]
[[package]]
name = "tikv-jemalloc-ctl"
version = "0.6.1"
@@ -5825,6 +6212,17 @@ dependencies = [
"wasm-bindgen",
]
[[package]]
name = "v_frame"
version = "0.3.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "666b7727c8875d6ab5db9533418d7c764233ac9c0cff1d469aec8fa127597be2"
dependencies = [
"aligned-vec",
"num-traits",
"wasm-bindgen",
]
[[package]]
name = "validator"
version = "0.20.0"
@@ -6439,6 +6837,12 @@ dependencies = [
"conduwuit_admin",
]
[[package]]
name = "y4m"
version = "0.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7a5a4b21e1a62b67a2970e6831bc091d7b87e119e7f9791aef9702e3bef04448"
[[package]]
name = "yansi"
version = "1.0.1"
@@ -6588,6 +6992,15 @@ version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cb8a0807f7c01457d0379ba880ba6322660448ddebc890ce29bb64da71fb40f9"
[[package]]
name = "zune-inflate"
version = "0.2.54"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "73ab332fe2f6680068f3582b16a24f90ad7096d5d39b974d1c0aff0125116f02"
dependencies = [
"simd-adler32",
]
[[package]]
name = "zune-jpeg"
version = "0.5.15"
+11 -2
View File
@@ -180,7 +180,7 @@ version = "0.5.3"
features = ["alloc", "rand"]
default-features = false
# Used to generate thumbnails for images
# Used to generate thumbnails for images & blurhashes
[workspace.dependencies.image]
version = "0.25.5"
default-features = false
@@ -191,6 +191,14 @@ features = [
"webp",
]
[workspace.dependencies.blurhash]
version = "0.2.3"
default-features = false
features = [
"fast-linear-to-srgb",
"image",
]
# logging
[workspace.dependencies.log]
version = "0.4.27"
@@ -344,7 +352,7 @@ version = "1.1.1"
[workspace.dependencies.ruma]
# version = "0.14.1"
git = "https://github.com/ruma/ruma.git"
rev = "9c9dccc93f054bbd28f23f630223fffa6289ecbc"
rev = "5742fec0021b85fedbf5cd1f59c50a00bb5b9f7c"
features = [
"appservice-api-c",
"client-api",
@@ -356,6 +364,7 @@ features = [
"ring-compat",
"compat-upload-signatures",
"compat-optional-txn-pdus",
"unstable-msc2448",
"unstable-msc2666",
"unstable-msc2867",
"unstable-msc2870",
-1
View File
@@ -1 +0,0 @@
The deprecated `well_known.rtc_focus_server_urls` config option has been removed. MatrixRTC foci should be configured using the `matrix_rtc.foci` config option.
-1
View File
@@ -1 +0,0 @@
Support for server-side blurhashing (part of MSC2448) has been removed.
-1
View File
@@ -1 +0,0 @@
fix `!admin query account-data account-data-get` not returning the content
+9
View File
@@ -0,0 +1,9 @@
Implemented event rejection, which should resolve and prevent future netsplits of the kinds observed
within some Continuwuity rooms.
Also resolved several bugs related to both soft-failing events, and event backfilling, which should
improve state resolution stability.
The `!admin debug get-pdu` command was updated to disambiguate event acceptance status, and
`!admin debug show-auth-chain` was added to visually display event auth chains, which may assist
developers in debugging strangely complex events.
Contributed by @nex.
-1
View File
@@ -1 +0,0 @@
Fixed an issue where Continuwuity would only advertise support for the unstable endpoint for Mutual Rooms (MSC2666), despite only supporting the stable endpoint. Contributed by @Henry-Hiles (QuadRadical)
+28
View File
@@ -1874,6 +1874,34 @@
#
#support_pgp_key =
# **DEPRECATED**: Use `[global.matrix_rtc].foci` instead.
#
# A list of MatrixRTC foci URLs which will be served as part of the
# MSC4143 client endpoint at /.well-known/matrix/client.
#
# This option is deprecated and will be removed in a future release.
# Please migrate to the new `[global.matrix_rtc]` config section.
#
#rtc_focus_server_urls = []
[global.blurhashing]
# blurhashing x component, 4 is recommended by https://blurha.sh/
#
#components_x = 4
# blurhashing y component, 3 is recommended by https://blurha.sh/
#
#components_y = 3
# Max raw size that the server will blurhash, this is the size of the
# image after converting it to raw data, it should be higher than the
# upload limit but not too high. The higher it is the higher the
# potential load will be for clients requesting blurhashes. The default
# is 33.55MB. Setting it to 0 disables blurhashing.
#
#blurhash_max_raw_size = 33554432
[global.matrix_rtc]
# A list of MatrixRTC foci (transports) which will be served via the
+1 -1
View File
@@ -146,7 +146,7 @@ cargo clippy \
--locked \
--profile test \
--no-default-features \
--features=console,systemd,element_hacks,direct_tls,perf_measurements,brotli_compression \
--features=console,systemd,element_hacks,direct_tls,perf_measurements,brotli_compression,blurhashing \
--color=always \
-- \
-D warnings
+94 -94
View File
@@ -125,13 +125,13 @@
}
},
"node_modules/@rsbuild/core": {
"version": "2.0.5",
"resolved": "https://registry.npmjs.org/@rsbuild/core/-/core-2.0.5.tgz",
"integrity": "sha512-KajO50hbXb32S8MsyDh2f+xKcVeRy9Gfzdcy0JjpMLj22djHugly6jrGo7jH7ls9X6/TDcyCTncSuNK4+D2lTw==",
"version": "2.0.3",
"resolved": "https://registry.npmjs.org/@rsbuild/core/-/core-2.0.3.tgz",
"integrity": "sha512-2myp7jUgGen50saxW8OJD/eMVKp7HnuBN5MUzwRb6mDbRZZVpoorfI4LQqiGSBNjGLB6jltvx/R2yHmcmnchwg==",
"dev": true,
"license": "MIT",
"dependencies": {
"@rspack/core": "~2.0.2",
"@rspack/core": "~2.0.1",
"@swc/helpers": "^0.5.21"
},
"bin": {
@@ -169,28 +169,28 @@
}
},
"node_modules/@rspack/binding": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/@rspack/binding/-/binding-2.0.2.tgz",
"integrity": "sha512-0kZPplW9GWx8mfC6DfsaRY3QBIYPuUs42JfmSM6aSb8tMHZAXQeLeMB8M+h8i4SeI+aFtCgO6UuYGtyWf7+L+A==",
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/@rspack/binding/-/binding-2.0.1.tgz",
"integrity": "sha512-ynV1gw4KqFtQ0P+ZZh76SUj49wBb2FuHW3zSmHverHWuxBhzvrZS6/dZ+fCFQG8bTTPtrPz0RQUTN3uEDbPVBQ==",
"dev": true,
"license": "MIT",
"optionalDependencies": {
"@rspack/binding-darwin-arm64": "2.0.2",
"@rspack/binding-darwin-x64": "2.0.2",
"@rspack/binding-linux-arm64-gnu": "2.0.2",
"@rspack/binding-linux-arm64-musl": "2.0.2",
"@rspack/binding-linux-x64-gnu": "2.0.2",
"@rspack/binding-linux-x64-musl": "2.0.2",
"@rspack/binding-wasm32-wasi": "2.0.2",
"@rspack/binding-win32-arm64-msvc": "2.0.2",
"@rspack/binding-win32-ia32-msvc": "2.0.2",
"@rspack/binding-win32-x64-msvc": "2.0.2"
"@rspack/binding-darwin-arm64": "2.0.1",
"@rspack/binding-darwin-x64": "2.0.1",
"@rspack/binding-linux-arm64-gnu": "2.0.1",
"@rspack/binding-linux-arm64-musl": "2.0.1",
"@rspack/binding-linux-x64-gnu": "2.0.1",
"@rspack/binding-linux-x64-musl": "2.0.1",
"@rspack/binding-wasm32-wasi": "2.0.1",
"@rspack/binding-win32-arm64-msvc": "2.0.1",
"@rspack/binding-win32-ia32-msvc": "2.0.1",
"@rspack/binding-win32-x64-msvc": "2.0.1"
}
},
"node_modules/@rspack/binding-darwin-arm64": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/@rspack/binding-darwin-arm64/-/binding-darwin-arm64-2.0.2.tgz",
"integrity": "sha512-0o7lbgBBsDlICWdjIH0q3e0BsSco4GRiImHWVfZSVEG+q2+ykZJvSvYCVhPM1Co375Z0S3VMPa/8SjcY1FHwlw==",
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/@rspack/binding-darwin-arm64/-/binding-darwin-arm64-2.0.1.tgz",
"integrity": "sha512-CGFO5zmajD1Itch1lxAI7+gvKiagzyqXopHv/jHG9Su2WWQ2/Nhn2/rkSpdp6ptE9ri6+6tCOOahf099/v/Xog==",
"cpu": [
"arm64"
],
@@ -202,9 +202,9 @@
]
},
"node_modules/@rspack/binding-darwin-x64": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/@rspack/binding-darwin-x64/-/binding-darwin-x64-2.0.2.tgz",
"integrity": "sha512-tOwxZpoPlTlRs/w6UyUinXJ4TYRVHMlR7+eQxO1R3muKpixvhXQjtvoaY16HuFyTVky5F0IfOoWr3x9FEsgdLg==",
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/@rspack/binding-darwin-x64/-/binding-darwin-x64-2.0.1.tgz",
"integrity": "sha512-2vvBNBoS09/PurupBwSrlTZd8283o00B8v20ncsNUdEff41uCR/hzIrYoTIVWnVST+Gt5O1+cfcfORp397lajg==",
"cpu": [
"x64"
],
@@ -216,9 +216,9 @@
]
},
"node_modules/@rspack/binding-linux-arm64-gnu": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/@rspack/binding-linux-arm64-gnu/-/binding-linux-arm64-gnu-2.0.2.tgz",
"integrity": "sha512-1ZD4YFhG1rmgqj+W8hfwHyKV8xDxGsc/3KgU0FwmiVEX7JfzhCkgBO/xlCG79kRKSrzuVzt4icO/G3cCKn0pag==",
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/@rspack/binding-linux-arm64-gnu/-/binding-linux-arm64-gnu-2.0.1.tgz",
"integrity": "sha512-uvNXk6ahE3AH3h2avnd1Mgno68YQpS4cfX1OkOGWIC/roL+NrOP2XVXV4yfVAoydPALDO7AfbIfN0QdmBK3rsA==",
"cpu": [
"arm64"
],
@@ -233,9 +233,9 @@
]
},
"node_modules/@rspack/binding-linux-arm64-musl": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/@rspack/binding-linux-arm64-musl/-/binding-linux-arm64-musl-2.0.2.tgz",
"integrity": "sha512-/PtTkM/DsDLjeuXTmeJeRfbjCDbcL9jvoVgZrgxYFZ28y2cdLvbChbW9uigOzs5dQEs1CIBQXMTTj7KhdBTuQg==",
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/@rspack/binding-linux-arm64-musl/-/binding-linux-arm64-musl-2.0.1.tgz",
"integrity": "sha512-S/a6uN9PiZ5O/PjSqyIXhuRC1lVzeJkJV69NeLk5sIEUiDQ/aQGZG97uN+tluwpbo1tPbLJkdHYETfjspOX4Pg==",
"cpu": [
"arm64"
],
@@ -250,9 +250,9 @@
]
},
"node_modules/@rspack/binding-linux-x64-gnu": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/@rspack/binding-linux-x64-gnu/-/binding-linux-x64-gnu-2.0.2.tgz",
"integrity": "sha512-bBjsZxMHRaPo6X9SokApm6ucs+UhXtAJFyJJyuk2BH4XJsLeCU9Dz1vMwioeohFbJUUeTASVPm6/BL+RhSaunw==",
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/@rspack/binding-linux-x64-gnu/-/binding-linux-x64-gnu-2.0.1.tgz",
"integrity": "sha512-C13Kk0OkZiocZVj187Sf753UH6pDXnuEu6vzUvi3qv9ltibG1ki0H2Y8isXBYL2cHQOV+hk0g1S6/4z3TTB97A==",
"cpu": [
"x64"
],
@@ -267,9 +267,9 @@
]
},
"node_modules/@rspack/binding-linux-x64-musl": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/@rspack/binding-linux-x64-musl/-/binding-linux-x64-musl-2.0.2.tgz",
"integrity": "sha512-HjlpInqzabDNkhVsUJpsHPqa9QYVWBViJoyWNjzXCAW0vKMDvwaphyUvokSinX8FGTlZi/sr5UEaHJo6XtQ35g==",
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/@rspack/binding-linux-x64-musl/-/binding-linux-x64-musl-2.0.1.tgz",
"integrity": "sha512-TQsiBFpEDGkuvK9tNdGj/Uc+AIytzqhxXH/1jKU6M24cWB1DTw/Cx7DdrkCBDyq3129K3POLdujvbWCGqBzQUw==",
"cpu": [
"x64"
],
@@ -284,9 +284,9 @@
]
},
"node_modules/@rspack/binding-wasm32-wasi": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/@rspack/binding-wasm32-wasi/-/binding-wasm32-wasi-2.0.2.tgz",
"integrity": "sha512-YaRYNFLJRpkGfYjSWR7n9f+nQKtrlmrrffpAn/blc2geHcRvXoBc5SCs1idPtsLhj7H9qWWhs7ucjyHy4csWFg==",
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/@rspack/binding-wasm32-wasi/-/binding-wasm32-wasi-2.0.1.tgz",
"integrity": "sha512-wk3gyUgBW/ayP49bI54bkY8+EQnfBHxdoe9dz3oobSTZQc8AOWwmUUDEPltW8rUvPOM6dfHECTOUMnfaf2f5yA==",
"cpu": [
"wasm32"
],
@@ -300,9 +300,9 @@
}
},
"node_modules/@rspack/binding-win32-arm64-msvc": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/@rspack/binding-win32-arm64-msvc/-/binding-win32-arm64-msvc-2.0.2.tgz",
"integrity": "sha512-d/3kTEKq+asLjRFPO96t+wfWiM7DLN76VQEPDD9bc1kdsZXlVJBuvyXfsgK8bbEvKplWXYcSsokhmEnuXrLOpg==",
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/@rspack/binding-win32-arm64-msvc/-/binding-win32-arm64-msvc-2.0.1.tgz",
"integrity": "sha512-rHjLcy3VcAC3+x+PxH+gwhwv6tPe0JdXTNT5eAOs9wgZIM6T9p4wre49+K4Qy98+Fb7TTbLX0ObUitlOkGwTSA==",
"cpu": [
"arm64"
],
@@ -314,9 +314,9 @@
]
},
"node_modules/@rspack/binding-win32-ia32-msvc": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/@rspack/binding-win32-ia32-msvc/-/binding-win32-ia32-msvc-2.0.2.tgz",
"integrity": "sha512-161cWineq3RW+Jdm1FAfSpXeUtYWvhB3kAbm46vNT9h/YYz+spwsFMvveAZ1nsVSVL0IC5lDBGUte7yUAY8K2g==",
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/@rspack/binding-win32-ia32-msvc/-/binding-win32-ia32-msvc-2.0.1.tgz",
"integrity": "sha512-Ad1vVqMBBnd4T8rsORngu9sl2kyRTlS4kMlvFudjzl1X2UFArEDBe0YVGNN7ZvahM12CErUx2WiN8Sd8pb+qXQ==",
"cpu": [
"ia32"
],
@@ -328,9 +328,9 @@
]
},
"node_modules/@rspack/binding-win32-x64-msvc": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/@rspack/binding-win32-x64-msvc/-/binding-win32-x64-msvc-2.0.2.tgz",
"integrity": "sha512-y7Q0S1FE+OlkL5GMqLG0PwxrPw6E1r892KhGrGKE1Vdufe5YTEx6xTPxzZ+b7N2KPD7s9G1/iJmWHQxb1+Bjkg==",
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/@rspack/binding-win32-x64-msvc/-/binding-win32-x64-msvc-2.0.1.tgz",
"integrity": "sha512-oPM2Jtm7HOlmxl/aBfleAVlL6t9VeHx6WvEets7BBJMInemFXAQd4CErRqybf7rXutACzLeUWBOue4Jpd1/ykw==",
"cpu": [
"x64"
],
@@ -342,13 +342,13 @@
]
},
"node_modules/@rspack/core": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/@rspack/core/-/core-2.0.2.tgz",
"integrity": "sha512-VM3UHOo26uC+4QSqY5tU1ybI7KuXY5rTof8nhFOaBY9SYau0Smvr+hMSAPmrmHwknB6dXT8yaNVxrj7I+qxE1Q==",
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/@rspack/core/-/core-2.0.1.tgz",
"integrity": "sha512-lgfZiExh8kDR/3obgi3RQKwKG5av1Xf5qDN1aVde777W9pbmx0Pqvrww1qtNvJ+gobEjbrrn5HEZWYGe0VLmcA==",
"dev": true,
"license": "MIT",
"dependencies": {
"@rspack/binding": "2.0.2"
"@rspack/binding": "2.0.1"
},
"engines": {
"node": "^20.19.0 || >=22.12.0"
@@ -383,20 +383,20 @@
}
},
"node_modules/@rspress/core": {
"version": "2.0.11",
"resolved": "https://registry.npmjs.org/@rspress/core/-/core-2.0.11.tgz",
"integrity": "sha512-4YBOFmSMFv5GWrCa80qSIW8VxqZQQS/PknVq2r7Hb7kgfB38Fzciopn3hjb3hNwI4TTRbsi/Jev2HyRWD4bYAQ==",
"version": "2.0.10",
"resolved": "https://registry.npmjs.org/@rspress/core/-/core-2.0.10.tgz",
"integrity": "sha512-DvoV7YUW538x0CVAGyYPKfjUHgEuq7Z8LZq1cpfUgBpA1DynFUK3Ls6spvdoAHAl3l0AN+xxOHpu/sRVhzqi/A==",
"dev": true,
"license": "MIT",
"dependencies": {
"@mdx-js/mdx": "^3.1.1",
"@mdx-js/react": "^3.1.1",
"@rsbuild/core": "^2.0.5",
"@rsbuild/core": "^2.0.2",
"@rsbuild/plugin-react": "~2.0.0",
"@rspress/shared": "2.0.11",
"@rspress/shared": "2.0.10",
"@shikijs/rehype": "^4.0.2",
"@types/unist": "^3.0.3",
"@unhead/react": "^2.1.15",
"@unhead/react": "^2.1.13",
"body-scroll-lock": "4.0.0-beta.0",
"clsx": "2.1.1",
"copy-to-clipboard": "^3.3.3",
@@ -407,12 +407,12 @@
"mdast-util-mdxjs-esm": "^2.0.1",
"medium-zoom": "1.1.0",
"nprogress": "^0.2.0",
"react": "^19.2.6",
"react-dom": "^19.2.6",
"react": "^19.2.5",
"react-dom": "^19.2.5",
"react-lazy-with-preload": "^2.2.1",
"react-reconciler": "0.33.0",
"react-render-to-markdown": "19.0.1",
"react-router-dom": "^7.15.0",
"react-router-dom": "^7.13.2",
"rehype-external-links": "^3.0.0",
"rehype-raw": "^7.0.0",
"remark-cjk-friendly": "^2.0.1",
@@ -436,9 +436,9 @@
}
},
"node_modules/@rspress/plugin-client-redirects": {
"version": "2.0.11",
"resolved": "https://registry.npmjs.org/@rspress/plugin-client-redirects/-/plugin-client-redirects-2.0.11.tgz",
"integrity": "sha512-DI9vod5mGccg57c19CuFpN3mGP1FEEueOUnEUz1UHXSyXg9YTj+ox7Xla4jUUzAzoPVGiWSSsfbtCTwdoxAsbg==",
"version": "2.0.10",
"resolved": "https://registry.npmjs.org/@rspress/plugin-client-redirects/-/plugin-client-redirects-2.0.10.tgz",
"integrity": "sha512-ImOm3h/cbXiJXIvpwv3Wn9rM91xgdhKbD2WX+WlMlWO4AtQfKR4XFrVhIZZAkrt09eeotRIklA7nu8Nuzzzbsw==",
"dev": true,
"license": "MIT",
"engines": {
@@ -449,9 +449,9 @@
}
},
"node_modules/@rspress/plugin-sitemap": {
"version": "2.0.11",
"resolved": "https://registry.npmjs.org/@rspress/plugin-sitemap/-/plugin-sitemap-2.0.11.tgz",
"integrity": "sha512-046LCHgbJXdaPipWB2SWMjZcAtIrOjXGZOD92xlTjhZ74D7Mk1Nod1MQdtOEoISWedcHdgpUVXMDbB1doKBpPQ==",
"version": "2.0.10",
"resolved": "https://registry.npmjs.org/@rspress/plugin-sitemap/-/plugin-sitemap-2.0.10.tgz",
"integrity": "sha512-PZLig9+OlnyLcy6x9BlEqWSRef6TzDWB6Dlh2/hY41FtKlhyb7d7U56RGlLselWaQV54SHVa6H/y611A56ZI2g==",
"dev": true,
"license": "MIT",
"engines": {
@@ -462,13 +462,13 @@
}
},
"node_modules/@rspress/shared": {
"version": "2.0.11",
"resolved": "https://registry.npmjs.org/@rspress/shared/-/shared-2.0.11.tgz",
"integrity": "sha512-7l5Pso4s597utJyisVEnd7n/40h053nfE8DwGQMeS8RLGtSwVgxFwNHsSrvQEGtFlLrg2aWWSITqnAVO1wfTew==",
"version": "2.0.10",
"resolved": "https://registry.npmjs.org/@rspress/shared/-/shared-2.0.10.tgz",
"integrity": "sha512-Kx10OAHWqi2jvW7ScmBUbkGjnwv4E6rEoelUchcL8It8nQ4nAVk0xvvES7m64knEon55zDbs8JQumCjbHu801Q==",
"dev": true,
"license": "MIT",
"dependencies": {
"@rsbuild/core": "^2.0.5",
"@rsbuild/core": "^2.0.2",
"@shikijs/rehype": "^4.0.2",
"unified": "^11.0.5"
}
@@ -610,9 +610,9 @@
}
},
"node_modules/@tybys/wasm-util": {
"version": "0.10.2",
"resolved": "https://registry.npmjs.org/@tybys/wasm-util/-/wasm-util-0.10.2.tgz",
"integrity": "sha512-RoBvJ2X0wuKlWFIjrwffGw1IqZHKQqzIchKaadZZfnNpsAYp2mM0h36JtPCjNDAHGgYez/15uMBpfGwchhiMgg==",
"version": "0.10.1",
"resolved": "https://registry.npmjs.org/@tybys/wasm-util/-/wasm-util-0.10.1.tgz",
"integrity": "sha512-9tTaPJLSiejZKx+Bmog4uSubteqTvFrVrURwkmHixBo0G4seD0zUxp98E1DzUBJxLQ3NPwXrGKDiVjwx/DpPsg==",
"dev": true,
"license": "MIT",
"optional": true,
@@ -707,13 +707,13 @@
"license": "ISC"
},
"node_modules/@unhead/react": {
"version": "2.1.15",
"resolved": "https://registry.npmjs.org/@unhead/react/-/react-2.1.15.tgz",
"integrity": "sha512-5hfAaZ3XJq9JkspRzZdSPsMrXXA8v/SKiEOxZcN9L40o44byF/50bcQuOLgSSCAx8802mI5VG32KZXWTtsLu9Q==",
"version": "2.1.13",
"resolved": "https://registry.npmjs.org/@unhead/react/-/react-2.1.13.tgz",
"integrity": "sha512-gC48tNJ0UtbithkiKCc2WUlxbVVk5o171EtruS2w2hQUblfYFHzCPu2hljjT1e0tUHXXqN8EMv7mpxHddMB2sg==",
"dev": true,
"license": "MIT",
"dependencies": {
"unhead": "2.1.15"
"unhead": "2.1.13"
},
"funding": {
"url": "https://github.com/sponsors/harlan-zw"
@@ -2753,9 +2753,9 @@
}
},
"node_modules/react": {
"version": "19.2.6",
"resolved": "https://registry.npmjs.org/react/-/react-19.2.6.tgz",
"integrity": "sha512-sfWGGfavi0xr8Pg0sVsyHMAOziVYKgPLNrS7ig+ivMNb3wbCBw3KxtflsGBAwD3gYQlE/AEZsTLgToRrSCjb0Q==",
"version": "19.2.5",
"resolved": "https://registry.npmjs.org/react/-/react-19.2.5.tgz",
"integrity": "sha512-llUJLzz1zTUBrskt2pwZgLq59AemifIftw4aB7JxOqf1HY2FDaGDxgwpAPVzHU1kdWabH7FauP4i1oEeer2WCA==",
"dev": true,
"license": "MIT",
"engines": {
@@ -2763,16 +2763,16 @@
}
},
"node_modules/react-dom": {
"version": "19.2.6",
"resolved": "https://registry.npmjs.org/react-dom/-/react-dom-19.2.6.tgz",
"integrity": "sha512-0prMI+hvBbPjsWnxDLxlCGyM8PN6UuWjEUCYmZhO67xIV9Xasa/r/vDnq+Xyq4Lo27g8QSbO5YzARu0D1Sps3g==",
"version": "19.2.5",
"resolved": "https://registry.npmjs.org/react-dom/-/react-dom-19.2.5.tgz",
"integrity": "sha512-J5bAZz+DXMMwW/wV3xzKke59Af6CHY7G4uYLN1OvBcKEsWOs4pQExj86BBKamxl/Ik5bx9whOrvBlSDfWzgSag==",
"dev": true,
"license": "MIT",
"dependencies": {
"scheduler": "^0.27.0"
},
"peerDependencies": {
"react": "^19.2.6"
"react": "^19.2.5"
}
},
"node_modules/react-lazy-with-preload": {
@@ -2822,9 +2822,9 @@
}
},
"node_modules/react-router": {
"version": "7.15.0",
"resolved": "https://registry.npmjs.org/react-router/-/react-router-7.15.0.tgz",
"integrity": "sha512-HW9vYwuM8f4yx66Izy8xfrzCM+SBJluoZcCbww9A1TySax11S5Vgw6fi3ZjMONw9J4gQwngL7PzkyIpJJpJ7RQ==",
"version": "7.14.2",
"resolved": "https://registry.npmjs.org/react-router/-/react-router-7.14.2.tgz",
"integrity": "sha512-yCqNne6I8IB6rVCH7XUvlBK7/QKyqypBFGv+8dj4QBFJiiRX+FG7/nkdAvGElyvVZ/HQP5N19wzteuTARXi5Gw==",
"dev": true,
"license": "MIT",
"dependencies": {
@@ -2845,13 +2845,13 @@
}
},
"node_modules/react-router-dom": {
"version": "7.15.0",
"resolved": "https://registry.npmjs.org/react-router-dom/-/react-router-dom-7.15.0.tgz",
"integrity": "sha512-VcrVg64Fo8nwBvDscajG8gRTLIuTC6N50nb22l2HOOV4PTOHgoGp8mUjy9wLiHYoYTSYI36tUnXZgasSRFZorQ==",
"version": "7.14.2",
"resolved": "https://registry.npmjs.org/react-router-dom/-/react-router-dom-7.14.2.tgz",
"integrity": "sha512-YZcM5ES8jJSM+KrJ9BdvHHqlnGTg5tH3sC5ChFRj4inosKctdyzBDhOyyHdGk597q2OT6NTrCA1OvB/YDwfekQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"react-router": "7.15.0"
"react-router": "7.14.2"
},
"engines": {
"node": ">=20.0.0"
@@ -3290,9 +3290,9 @@
}
},
"node_modules/unhead": {
"version": "2.1.15",
"resolved": "https://registry.npmjs.org/unhead/-/unhead-2.1.15.tgz",
"integrity": "sha512-MCt5T90mCWyr3Z6pUCdM9lVRXoMoVBlL7z7U4CYVIiaDiuzad/UCfLuMqz5MeNmpZUgoBCQnrucJimU7EZR+XA==",
"version": "2.1.13",
"resolved": "https://registry.npmjs.org/unhead/-/unhead-2.1.13.tgz",
"integrity": "sha512-jO9M1sI6b2h/1KpIu4Jeu+ptumLmUKboRRLxys5pYHFeT+lqTzfNHbYUX9bxVDhC1FBszAGuWcUVlmvIPsah8Q==",
"dev": true,
"license": "MIT",
"dependencies": {
+213 -9
View File
@@ -1,5 +1,5 @@
use std::{
collections::HashMap,
collections::{HashMap, HashSet},
fmt::Write,
iter::once,
time::{Instant, SystemTime},
@@ -22,7 +22,7 @@
use lettre::message::Mailbox;
use ruma::{
CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId,
OwnedRoomOrAliasId, OwnedServerName, RoomId, RoomVersionId,
OwnedRoomOrAliasId, OwnedServerName, RoomId, RoomVersionId, UInt,
api::federation::event::get_room_state, events::AnyStateEvent, serde::Raw,
};
use service::rooms::{
@@ -69,6 +69,189 @@ pub(super) async fn get_auth_chain(&self, event_id: OwnedEventId) -> Result {
self.write_str(&out).await
}
#[derive(Clone, Copy, Eq, PartialEq)]
enum NodeStatus {
Normal(bool),
SoftFailed(bool),
Rejected(bool),
}
struct AuthChild {
node_id: String,
event_id: OwnedEventId,
depth: UInt,
ts: UInt,
first_seen: bool,
pdu: Option<PduEvent>,
}
fn render_node(
graph: &mut String,
node_id: &str,
event_id: &EventId,
status: NodeStatus,
) -> Result {
let evt_str = event_id.to_string();
let status_label = match status {
| NodeStatus::Normal(false) => evt_str,
| NodeStatus::Normal(true) => format!("{evt_str} (missing locally)"),
| NodeStatus::SoftFailed(false) => format!("{evt_str} (soft-failed)"),
| NodeStatus::SoftFailed(true) => format!("{evt_str} (soft-failed & missing locally)"),
| NodeStatus::Rejected(false) => format!("{evt_str} (rejected)"),
| NodeStatus::Rejected(true) => format!("{evt_str} (rejected & missing locally)"),
};
writeln!(graph, "{node_id}[\"{}\"]", status_label.as_str())?;
match status {
| NodeStatus::Rejected(_) => writeln!(graph, "class {node_id} rejected;")?,
| NodeStatus::SoftFailed(_) => writeln!(graph, "class {node_id} soft_failed;")?,
| NodeStatus::Normal(_) => {},
}
Ok(())
}
#[admin_command]
pub(super) async fn show_auth_chain(&self, event_id: OwnedEventId) -> Result {
let node_status = async |event_id: &EventId, missing: bool| -> NodeStatus {
if self
.services
.rooms
.pdu_metadata
.is_event_rejected(event_id)
.await
{
NodeStatus::Rejected(missing)
} else if self
.services
.rooms
.pdu_metadata
.is_event_soft_failed(event_id)
.await
{
NodeStatus::SoftFailed(missing)
} else {
NodeStatus::Normal(missing)
}
};
let Ok(root) = self.services.rooms.timeline.get_pdu(&event_id).await else {
return Err!("Event not found.");
};
let mut graph = String::from(
"```mermaid\n%% This is a mermaid graph. You can plug this output into\n\
%% https://mermaid.live/edit to visualise it on-the-fly.\nflowchart TD\n\
classDef rejected fill:#ffe5e5,stroke:#cc0000,stroke-width:2px,color:#000;\n\
classDef soft_failed fill:#fff6cc,stroke:#c9a400,stroke-width:2px,color:#000;\n"
);
let mut node_ids: HashMap<OwnedEventId, String> = HashMap::new();
let mut cached_events: HashMap<OwnedEventId, PduEvent> =
HashMap::from([(event_id.clone(), root.clone())]);
let mut scheduled: HashSet<OwnedEventId> = HashSet::from([event_id.clone()]);
let mut visited: HashSet<OwnedEventId> = HashSet::new();
let mut stack = vec![root];
let mut next_node_id = 0_usize;
let node_id_for = |event_id: &OwnedEventId,
node_ids: &mut HashMap<OwnedEventId, String>,
next_node_id: &mut usize| {
node_ids
.entry(event_id.clone())
.or_insert_with(|| {
let id = format!("n{}", *next_node_id);
*next_node_id = next_node_id.saturating_add(1);
id
})
.clone()
};
while let Some(event) = stack.pop() {
let current_event_id = event.event_id().to_owned();
if !visited.insert(current_event_id.clone()) {
continue;
}
let current_node_id = node_id_for(&current_event_id, &mut node_ids, &mut next_node_id);
let current_status = node_status(&current_event_id, false).await;
render_node(&mut graph, &current_node_id, &current_event_id, current_status)?;
let mut children = Vec::with_capacity(event.auth_events.len());
for auth_event_id in event.auth_events().rev() {
let auth_event_id = auth_event_id.to_owned();
let auth_node_id = node_id_for(&auth_event_id, &mut node_ids, &mut next_node_id);
writeln!(graph, "{current_node_id} --> {auth_node_id}")?;
let first_seen = scheduled.insert(auth_event_id.clone());
let auth_pdu = if let Some(auth_pdu) = cached_events.get(&auth_event_id) {
// NOTE: events might be referenced multiple times (like the create event)
// so this saves some cheeky db lookup time
Some(auth_pdu.clone())
} else if first_seen {
match self.services.rooms.timeline.get_pdu(&auth_event_id).await {
| Ok(auth_event) => {
cached_events.insert(auth_event_id.clone(), auth_event.clone());
Some(auth_event)
},
| Err(_) => None,
}
} else {
None
};
// NOTE: Depth is used as the primary sorting key here, even though it has no
// bearing on state resolution or anything. Timestamp is used as a
// tiebreaker, failing back to lexicographical comparison.
let (depth, ts) = auth_pdu
.as_ref()
.map_or((UInt::MAX, UInt::MAX), |pdu| (pdu.depth, pdu.origin_server_ts));
children.push(AuthChild {
node_id: auth_node_id,
event_id: auth_event_id,
depth,
ts,
first_seen,
pdu: auth_pdu,
});
}
children.sort_by(|a, b| {
a.depth
.cmp(&b.depth)
.then(a.ts.cmp(&b.ts))
.then(a.event_id.as_str().cmp(b.event_id.as_str()))
});
for child in children.into_iter().rev() {
if !child.first_seen {
continue;
}
if let Some(child_pdu) = child.pdu {
// We have this PDU so will want to traverse it.
stack.push(child_pdu);
} else {
// We don't have this PDU locally so we can't traverse its auth events,
// but we can still render it as a node.
render_node(
&mut graph,
&child.node_id,
&child.event_id,
node_status(&child.event_id, true).await,
)?;
}
}
}
graph.push_str("```\n");
self.write_str(&graph).await
}
#[admin_command]
pub(super) async fn parse_pdu(&self) -> Result {
if self.body.len() < 2
@@ -111,15 +294,31 @@ pub(super) async fn get_pdu(&self, event_id: OwnedEventId) -> Result {
outlier = true;
pdu_json = self.services.rooms.timeline.get_pdu_json(&event_id).await;
}
let rejected = self
.services
.rooms
.pdu_metadata
.is_event_rejected(&event_id)
.await;
let soft_failed = self
.services
.rooms
.pdu_metadata
.is_event_soft_failed(&event_id)
.await;
match pdu_json {
| Err(_) => return Err!("PDU not found locally."),
| Ok(json) => {
let text = serde_json::to_string_pretty(&json)?;
let msg = if outlier {
"Outlier (Rejected / Soft Failed) PDU found in our database"
let msg = if rejected {
"Rejected PDU:"
} else if soft_failed {
"Soft-failed PDU:"
} else if outlier {
"Outlier PDU:"
} else {
"PDU found in our database"
"PDU:"
};
write!(self, "{msg}\n```json\n{text}\n```")
},
@@ -614,6 +813,10 @@ pub(super) async fn force_set_room_state_from_server(
.await;
state.insert(shortstatekey, pdu.event_id.clone());
self.services
.rooms
.pdu_metadata
.clear_pdu_markers(pdu.event_id());
}
}
@@ -631,6 +834,10 @@ pub(super) async fn force_set_room_state_from_server(
.rooms
.outlier
.add_pdu_outlier(&event_id, &value);
self.services
.rooms
.pdu_metadata
.clear_pdu_markers(&event_id);
}
info!("Resolving new room state");
@@ -662,10 +869,7 @@ pub(super) async fn force_set_room_state_from_server(
.force_state(room_id.clone().as_ref(), short_state_hash, added, removed, &state_lock)
.await?;
info!(
"Updating joined counts for room just in case (e.g. we may have found a difference in \
the room's m.room.member state"
);
info!("Updating joined counts for room");
self.services
.rooms
.state_cache
+10 -1
View File
@@ -17,12 +17,21 @@ pub enum DebugCommand {
message: Vec<String>,
},
/// Get the auth_chain of a PDU
/// Loads the auth_chain of a PDU, reporting how long it took.
GetAuthChain {
/// An event ID (the $ character followed by the base64 reference hash)
event_id: OwnedEventId,
},
/// Walks & displays the auth_chain of a PDU in a mermaid graph format.
///
/// This is useless to basically anyone but developers, and is also probably
/// slow and memory hungry.
ShowAuthChain {
/// The root event ID to start walking back from.
event_id: OwnedEventId,
},
/// Parse and print a PDU from a JSON
///
/// The PDU event is only checked for validity and is not added to the
+3 -13
View File
@@ -1,8 +1,7 @@
use clap::Subcommand;
use conduwuit::Result;
use conduwuit_database::Deserialized as _;
use futures::StreamExt;
use ruma::{OwnedRoomId, OwnedUserId, exports::serde::Serialize};
use ruma::{OwnedRoomId, OwnedUserId};
use crate::{admin_command, admin_command_dispatch};
@@ -59,22 +58,13 @@ async fn account_data_get(
room_id: Option<OwnedRoomId>,
) -> Result {
let timer = tokio::time::Instant::now();
let result = self
let results = self
.services
.account_data
.get_raw(room_id.as_deref(), &user_id, &kind)
.await;
let query_time = timer.elapsed();
let json = serde_json::to_string_pretty(&match room_id {
| None => result
.deserialized::<ruma::serde::Raw<ruma::events::AnyGlobalAccountDataEvent>>()?
.serialize(serde_json::value::Serializer)?,
| Some(_) => result
.deserialized::<ruma::serde::Raw<ruma::events::AnyRoomAccountDataEvent>>()?
.serialize(serde_json::value::Serializer)?,
})?;
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{json}\n```"))
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"))
.await
}
+35
View File
@@ -763,6 +763,41 @@ pub(super) async fn force_join_room(
.await
}
#[admin_command]
pub(super) async fn force_join_room_remotely(
&self,
user_id: String,
room_id: OwnedRoomOrAliasId,
via: String,
) -> Result {
let via = ServerName::parse(&via)?;
let user_id = parse_local_user_id(self.services, &user_id)?;
let (room_id, mut servers) = self
.services
.rooms
.alias
.resolve_with_servers(&room_id, None)
.await?;
if servers.contains(&via) {
servers.retain(|n| *n != via);
}
servers.insert(0, via);
assert!(
self.services.globals.user_is_local(&user_id),
"Parsed user_id must be a local user"
);
let state_lock = self.services.rooms.state.mutex.lock(&*room_id).await;
self.services
.rooms
.membership
.join_remote_room(&user_id, &room_id, None, &servers, state_lock)
.await?;
self.write_str(&format!("{user_id} has been joined to {room_id}."))
.await
}
#[admin_command]
pub(super) async fn force_leave_room(
&self,
+14
View File
@@ -183,6 +183,20 @@ pub enum UserCommand {
room_id: OwnedRoomOrAliasId,
},
/// Manually join a local user to a room via a remote server, regardless of
/// our current residency.
ForceJoinRoomRemotely {
/// The user to join
user_id: String,
/// The room to join
room_id: OwnedRoomOrAliasId,
/// The server name to join via.
///
/// This server will always be tried first, however if more are
/// available, they may be tried after.
via: String,
},
/// Manually leave a local user from a room.
ForceLeaveRoom {
user_id: String,
+3 -1
View File
@@ -187,7 +187,7 @@ pub(crate) async fn change_password_route(
if services.server.config.admin_room_notices {
services
.admin
.notice(&format!("User {} changed their password.", &sender_user))
.notice(&format!("User {sender_user} changed their password."))
.await;
}
@@ -322,6 +322,8 @@ pub(crate) async fn check_registration_token_validity(
/// Runs through all the deactivation steps:
///
/// - Mark as deactivated
/// - Removing display name
/// - Removing avatar URL and blurhash
/// - Removing all profile data
/// - Leaving all rooms (and forgets all of them)
pub async fn full_user_deactivate(
+12 -1
View File
@@ -21,6 +21,7 @@
},
media::create_content,
},
assign,
};
use service::media::mxc::Mxc;
@@ -75,7 +76,17 @@ pub(crate) async fn create_content_route(
return Err!(Request(Unknown("Failed to save uploaded media")));
}
Ok(create_content::v3::Response::new(mxc.to_string().into()))
let blurhash = body.generate_blurhash.then(|| {
services
.media
.create_blurhash(&body.file, content_type, filename)
.ok()
.flatten()
});
Ok(assign!(create_content::v3::Response::new(mxc.to_string().into()), {
blurhash: blurhash.flatten(),
}))
}
/// # `GET /_matrix/client/v1/media/thumbnail/{serverName}/{mediaId}`
+1
View File
@@ -247,6 +247,7 @@ pub(crate) async fn invite_helper(
let mut content = RoomMemberEventContent::new(MembershipState::Invite);
content.displayname = services.users.displayname(recipient_user).await.ok();
content.avatar_url = services.users.avatar_url(recipient_user).await.ok();
content.blurhash = services.users.blurhash(recipient_user).await.ok();
content.is_direct = Some(is_direct);
content.reason = reason;
+2
View File
@@ -343,6 +343,7 @@ async fn knock_room_helper_local(
let mut content = RoomMemberEventContent::new(MembershipState::Knock);
content.displayname = services.users.displayname(sender_user).await.ok();
content.avatar_url = services.users.avatar_url(sender_user).await.ok();
content.blurhash = services.users.blurhash(sender_user).await.ok();
content.reason.clone_from(&reason.clone());
// Try normal knock first
@@ -526,6 +527,7 @@ async fn knock_room_helper_remote(
let mut knock_content = RoomMemberEventContent::new(MembershipState::Knock);
knock_content.displayname = services.users.displayname(sender_user).await.ok();
knock_content.avatar_url = services.users.avatar_url(sender_user).await.ok();
knock_content.blurhash = services.users.blurhash(sender_user).await.ok();
knock_content.reason = reason;
knock_event_stub.insert(
+4
View File
@@ -21,6 +21,10 @@ pub(crate) async fn get_mutual_rooms_route(
return Err!(Request(Unknown("You cannot request rooms in common with yourself.")));
}
if !services.users.exists(&body.user_id).await {
return Ok(mutual_rooms::unstable::Response::new(vec![]));
}
let mutual_rooms = services
.rooms
.state_cache
+15 -4
View File
@@ -23,7 +23,8 @@
/// # `GET /_matrix/client/v3/profile/{userId}`
///
/// Returns the user's profile information.
/// Returns the displayname, avatar_url, blurhash, and custom profile fields of
/// the user.
///
/// - If user is on another server and we do not have a local copy already,
/// fetch profile over federation.
@@ -321,9 +322,19 @@ async fn set_profile_field(
services.users.set_avatar_url(user_id, None);
},
| other =>
services
.users
.set_profile_key(user_id, other.field_name().as_str(), other.value()),
if other.field_name().as_str() == "blurhash" {
if let Some(Value::String(blurhash)) = other.value() {
services.users.set_blurhash(user_id, Some(blurhash));
} else {
services.users.set_blurhash(user_id, None);
}
} else {
services.users.set_profile_key(
user_id,
other.field_name().as_str(),
other.value(),
);
},
}
// If the user is local and changed their displayname or avatar_url, update it
+2 -4
View File
@@ -288,6 +288,7 @@ pub(crate) async fn create_room_route(
let mut join_event = RoomMemberEventContent::new(MembershipState::Join);
join_event.displayname = services.users.displayname(sender_user).await.ok();
join_event.avatar_url = services.users.avatar_url(sender_user).await.ok();
join_event.blurhash = services.users.blurhash(sender_user).await.ok();
join_event.is_direct = Some(body.is_direct);
debug_info!("Joining {sender_user} to room {room_id}");
@@ -536,10 +537,7 @@ pub(crate) async fn create_room_route(
if services.server.config.admin_room_notices {
services
.admin
.send_text(&format!(
"{sender_user} made {} public to the room directory",
&room_id
))
.send_text(&format!("{sender_user} made {room_id} public to the room directory"))
.await;
}
info!("{sender_user} made {0} public to the room directory", &room_id);
+1
View File
@@ -271,6 +271,7 @@ pub(crate) async fn upgrade_room_route(
&assign!(RoomMemberEventContent::new(MembershipState::Join), {
displayname: services.users.displayname(sender_user).await.ok(),
avatar_url: services.users.avatar_url(sender_user).await.ok(),
blurhash: services.users.blurhash(sender_user).await.ok(),
}),
),
sender_user,
+44 -8
View File
@@ -1,8 +1,7 @@
use std::collections::BTreeMap;
use axum::{Json, extract::State, response::IntoResponse};
use conduwuit::{
Result,
matrix::versions::{unstable_features, versions},
};
use conduwuit::Result;
use futures::StreamExt;
use ruma::{api::client::discovery::get_supported_versions, assign};
@@ -23,10 +22,47 @@
pub(crate) async fn get_supported_versions_route(
_body: Ruma<get_supported_versions::Request>,
) -> Result<get_supported_versions::Response> {
Ok(assign!(
get_supported_versions::Response::new(versions()),
{ unstable_features: unstable_features() }
))
let versions = vec![
"r0.0.1".to_owned(),
"r0.1.0".to_owned(),
"r0.2.0".to_owned(),
"r0.3.0".to_owned(),
"r0.4.0".to_owned(),
"r0.5.0".to_owned(),
"r0.6.0".to_owned(),
"r0.6.1".to_owned(),
"v1.1".to_owned(),
"v1.2".to_owned(),
"v1.3".to_owned(),
"v1.4".to_owned(),
"v1.5".to_owned(),
"v1.8".to_owned(),
"v1.11".to_owned(),
"v1.12".to_owned(),
"v1.13".to_owned(),
"v1.14".to_owned(),
];
let unstable_features = BTreeMap::from_iter([
("org.matrix.e2e_cross_signing".to_owned(), true),
("org.matrix.msc2285.stable".to_owned(), true), /* private read receipts (https://github.com/matrix-org/matrix-spec-proposals/pull/2285) */
("uk.half-shot.msc2666.query_mutual_rooms".to_owned(), true), /* query mutual rooms (https://github.com/matrix-org/matrix-spec-proposals/pull/2666) */
("org.matrix.msc2836".to_owned(), true), /* threading/threads (https://github.com/matrix-org/matrix-spec-proposals/pull/2836) */
("org.matrix.msc2946".to_owned(), true), /* spaces/hierarchy summaries (https://github.com/matrix-org/matrix-spec-proposals/pull/2946) */
("org.matrix.msc3026.busy_presence".to_owned(), true), /* busy presence status (https://github.com/matrix-org/matrix-spec-proposals/pull/3026) */
("org.matrix.msc3814".to_owned(), true), /* dehydrated devices */
("org.matrix.msc3827".to_owned(), true), /* filtering of /publicRooms by room type (https://github.com/matrix-org/matrix-spec-proposals/pull/3827) */
("org.matrix.msc3952_intentional_mentions".to_owned(), true), /* intentional mentions (https://github.com/matrix-org/matrix-spec-proposals/pull/3952) */
("org.matrix.msc3916.stable".to_owned(), true), /* authenticated media (https://github.com/matrix-org/matrix-spec-proposals/pull/3916) */
("org.matrix.msc4180".to_owned(), true), /* stable flag for 3916 (https://github.com/matrix-org/matrix-spec-proposals/pull/4180) */
("uk.tcpip.msc4133".to_owned(), true), /* Extending User Profile API with Key:Value Pairs (https://github.com/matrix-org/matrix-spec-proposals/pull/4133) */
("us.cloke.msc4175".to_owned(), true), /* Profile field for user time zone (https://github.com/matrix-org/matrix-spec-proposals/pull/4175) */
("org.matrix.simplified_msc3575".to_owned(), true), /* Simplified Sliding sync (https://github.com/matrix-org/matrix-spec-proposals/pull/4186) */
("uk.timedout.msc4323".to_owned(), true), /* agnostic suspend (https://github.com/matrix-org/matrix-spec-proposals/pull/4323) */
("org.matrix.msc4155".to_owned(), true), /* invite filtering (https://github.com/matrix-org/matrix-spec-proposals/pull/4155) */
]);
Ok(assign!(get_supported_versions::Response::new(versions), { unstable_features }))
}
/// # `GET /_conduwuit/server_version`
+9 -4
View File
@@ -2,7 +2,7 @@
use conduwuit::{Err, Result};
use ruma::{
api::client::discovery::{
discover_homeserver::{self, HomeserverInfo},
discover_homeserver::{self, HomeserverInfo, RtcFocusInfo},
discover_support::{self, Contact, ContactRole},
},
assign,
@@ -31,8 +31,10 @@ pub(crate) async fn well_known_client(
rtc_foci: services
.config
.matrix_rtc
.foci
.clone()
.effective_foci(&services.config.well_known.rtc_focus_server_urls)
.into_iter()
.map(|focus| RtcFocusInfo::new(focus.transport_type(), focus.data().into_owned()).unwrap())
.collect()
}))
}
@@ -46,7 +48,10 @@ pub(crate) async fn get_rtc_transports(
_body: Ruma<ruma::api::client::rtc::transports::v1::Request>,
) -> Result<ruma::api::client::rtc::transports::v1::Response> {
Ok(ruma::api::client::rtc::transports::v1::Response::new(
services.config.matrix_rtc.foci.clone(),
services
.config
.matrix_rtc
.effective_foci(&services.config.well_known.rtc_focus_server_urls),
))
}
+1
View File
@@ -381,6 +381,7 @@ async fn handle_room(
.rooms
.event_handler
.handle_incoming_pdu(origin, room_id, &event_id, value, true)
.boxed()
.await
.map(|_| ());
results.push((event_id, result));
+74 -1
View File
@@ -20,7 +20,10 @@
use regex::RegexSet;
use ruma::{
OwnedRoomId, OwnedRoomOrAliasId, OwnedServerName, OwnedUserId, RoomVersionId,
api::client::{discovery::discover_support::ContactRole, rtc::RtcTransport},
api::client::{
discovery::{discover_homeserver::RtcFocusInfo, discover_support::ContactRole},
rtc::transports::v1::RtcTransport,
},
};
use serde::{Deserialize, Serialize, de::IgnoredAny};
use url::Url;
@@ -2117,6 +2120,10 @@ pub struct Config {
#[serde(default)]
pub antispam: Option<Antispam>,
/// display: nested
#[serde(default)]
pub blurhashing: BlurhashConfig,
/// Configuration for MatrixRTC (MSC4143) transport discovery.
/// display: nested
#[serde(default)]
@@ -2190,6 +2197,43 @@ pub struct WellKnownConfig {
/// PGP key URI for server support contacts, to be served as part of the
/// MSC1929 server support endpoint.
pub support_pgp_key: Option<String>,
/// **DEPRECATED**: Use `[global.matrix_rtc].foci` instead.
///
/// A list of MatrixRTC foci URLs which will be served as part of the
/// MSC4143 client endpoint at /.well-known/matrix/client.
///
/// This option is deprecated and will be removed in a future release.
/// Please migrate to the new `[global.matrix_rtc]` config section.
///
/// default: []
#[serde(default)]
pub rtc_focus_server_urls: Vec<RtcFocusInfo>,
}
#[derive(Clone, Copy, Debug, Deserialize, Default)]
#[allow(rustdoc::broken_intra_doc_links, rustdoc::bare_urls)]
#[config_example_generator(filename = "conduwuit-example.toml", section = "global.blurhashing")]
pub struct BlurhashConfig {
/// blurhashing x component, 4 is recommended by https://blurha.sh/
///
/// default: 4
#[serde(default = "default_blurhash_x_component")]
pub components_x: u32,
/// blurhashing y component, 3 is recommended by https://blurha.sh/
///
/// default: 3
#[serde(default = "default_blurhash_y_component")]
pub components_y: u32,
/// Max raw size that the server will blurhash, this is the size of the
/// image after converting it to raw data, it should be higher than the
/// upload limit but not too high. The higher it is the higher the
/// potential load will be for clients requesting blurhashes. The default
/// is 33.55MB. Setting it to 0 disables blurhashing.
///
/// default: 33554432
#[serde(default = "default_blurhash_max_raw_size")]
pub blurhash_max_raw_size: u64,
}
#[derive(Clone, Debug, Deserialize, Default)]
@@ -2213,6 +2257,25 @@ pub struct MatrixRtcConfig {
pub foci: Vec<RtcTransport>,
}
impl MatrixRtcConfig {
/// Returns the effective foci, falling back to the deprecated
/// `rtc_focus_server_urls` if the new config is empty.
#[must_use]
pub fn effective_foci(&self, deprecated_foci: &[RtcFocusInfo]) -> Vec<RtcTransport> {
if !self.foci.is_empty() {
self.foci.clone()
} else {
deprecated_foci
.iter()
.map(|focus| {
RtcTransport::new(focus.focus_type().to_owned(), focus.data().into_owned())
.unwrap()
})
.collect()
}
}
}
#[derive(Deserialize, Clone, Debug)]
#[serde(transparent)]
struct ListeningPort {
@@ -2723,3 +2786,13 @@ fn default_client_response_timeout() -> u64 { 120 }
fn default_client_shutdown_timeout() -> u64 { 15 }
fn default_sender_shutdown_timeout() -> u64 { 5 }
// blurhashing defaults recommended by https://blurha.sh/
// 2^25
pub(super) fn default_blurhash_max_raw_size() -> u64 { 33_554_432 }
pub(super) fn default_blurhash_x_component() -> u32 { 4 }
pub(super) fn default_blurhash_y_component() -> u32 { 3 }
// end recommended & blurhashing defaults
+1 -1
View File
@@ -260,7 +260,7 @@ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{real_error}")
}
} else {
write!(f, "Request error: {}", &self.0)
write!(f, "Request error: {}", self.0)
}
}
}
+6 -1
View File
@@ -311,6 +311,11 @@ pub(super) fn open_list(db: &Arc<Engine>, maps: &[Descriptor]) -> Result<Maps> {
key_size_hint: Some(48),
..descriptor::RANDOM_SMALL
},
Descriptor {
name: "rejectedeventids",
key_size_hint: Some(48),
..descriptor::RANDOM_SMALL
},
Descriptor {
name: "statehash_shortstatehash",
val_size_hint: Some(8),
@@ -376,7 +381,7 @@ pub(super) fn open_list(db: &Arc<Engine>, maps: &[Descriptor]) -> Result<Maps> {
},
Descriptor {
name: "userid_blurhash",
..descriptor::DROPPED
..descriptor::RANDOM_SMALL
},
Descriptor {
name: "userid_dehydrateddevice",
+4
View File
@@ -47,6 +47,7 @@ default = [
"bindgen-runtime", # replace with bindgen-static on alpine
]
standard = [
"blurhashing",
"brotli_compression",
"element_hacks",
"gzip_compression",
@@ -70,6 +71,9 @@ full = [
"tokio_console",
]
blurhashing = [
"conduwuit-service/blurhashing",
]
brotli_compression = [
"conduwuit-api/brotli_compression",
"conduwuit-core/brotli_compression",
+6
View File
@@ -16,6 +16,10 @@ crate-type = [
]
[features]
blurhashing = [
"dep:image",
"dep:blurhash",
]
brotli_compression = [
"conduwuit-core/brotli_compression",
"reqwest/brotli",
@@ -115,6 +119,8 @@ tracing.workspace = true
url.workspace = true
webpage.workspace = true
webpage.optional = true
blurhash.workspace = true
blurhash.optional = true
recaptcha-verify = { version = "0.2.0", default-features = false }
reqwest_recaptcha = { package = "reqwest", version = "0.12.28", default-features = false, features = ["rustls-tls-native-roots-no-provider"] } # As long as recaptcha-verify's reqwest is outdated
yansi.workspace = true
+1 -1
View File
@@ -44,7 +44,7 @@ fn build(args: crate::Args<'_>) -> Result<Arc<Self>> {
db,
server: args.server.clone(),
bad_event_ratelimiter: Arc::new(SyncRwLock::new(HashMap::new())),
admin_alias: OwnedRoomAliasId::try_from(format!("#admins:{}", &args.server.name))
admin_alias: OwnedRoomAliasId::try_from(format!("#admins:{}", args.server.name))
.expect("#admins:server_name is valid alias name"),
server_user: UserId::parse_with_server_name(
String::from("conduit"),
+1 -1
View File
@@ -37,7 +37,7 @@ pub struct PasswordReset<'a> {
}
impl MessageTemplate for PasswordReset<'_> {
fn subject(&self) -> String { format!("Password reset request for {}", &self.user_id) }
fn subject(&self) -> String { format!("Password reset request for {}", self.user_id) }
}
#[derive(Template)]
+179
View File
@@ -0,0 +1,179 @@
#[cfg(feature = "blurhashing")]
use conduwuit::config::BlurhashConfig as CoreBlurhashConfig;
use conduwuit::{Result, implement};
use super::Service;
#[implement(Service)]
#[cfg(not(feature = "blurhashing"))]
pub fn create_blurhash(
&self,
_file: &[u8],
_content_type: Option<&str>,
_file_name: Option<&str>,
) -> Result<Option<String>> {
conduwuit::debug_warn!("blurhashing on upload support was not compiled");
Ok(None)
}
#[implement(Service)]
#[cfg(feature = "blurhashing")]
pub fn create_blurhash(
&self,
file: &[u8],
content_type: Option<&str>,
file_name: Option<&str>,
) -> Result<Option<String>> {
let config = BlurhashConfig::from(self.services.server.config.blurhashing);
// since 0 means disabled blurhashing, skipped blurhashing
if config.size_limit == 0 {
return Ok(None);
}
get_blurhash_from_request(file, content_type, file_name, config)
.map_err(|e| conduwuit::err!(debug_error!("blurhashing error: {e}")))
.map(Some)
}
/// Returns the blurhash or a blurhash error which implements Display.
#[tracing::instrument(
name = "blurhash",
level = "debug",
skip(data),
fields(
bytes = data.len(),
),
)]
#[cfg(feature = "blurhashing")]
fn get_blurhash_from_request(
data: &[u8],
mime: Option<&str>,
filename: Option<&str>,
config: BlurhashConfig,
) -> Result<String, BlurhashingError> {
// Get format image is supposed to be in
let format = get_format_from_data_mime_and_filename(data, mime, filename)?;
// Get the image reader for said image format
let decoder = get_image_decoder_with_format_and_data(format, data)?;
// Check image size makes sense before unpacking whole image
if is_image_above_size_limit(&decoder, config) {
return Err(BlurhashingError::ImageTooLarge);
}
let image = image::DynamicImage::from_decoder(decoder)?;
blurhash_an_image(&image, config)
}
/// Gets the Image Format value from the data,mime, and filename
/// It first checks if the mime is a valid image format
/// Then it checks if the filename has a format, otherwise just guess based on
/// the binary data Assumes that mime and filename extension won't be for a
/// different file format than file.
#[cfg(feature = "blurhashing")]
fn get_format_from_data_mime_and_filename(
data: &[u8],
mime: Option<&str>,
filename: Option<&str>,
) -> Result<image::ImageFormat, BlurhashingError> {
let extension = filename
.map(std::path::Path::new)
.and_then(std::path::Path::extension)
.map(std::ffi::OsStr::to_string_lossy);
mime.or(extension.as_deref())
.and_then(image::ImageFormat::from_mime_type)
.map_or_else(|| image::guess_format(data).map_err(Into::into), Ok)
}
#[cfg(feature = "blurhashing")]
fn get_image_decoder_with_format_and_data(
image_format: image::ImageFormat,
data: &[u8],
) -> Result<Box<dyn image::ImageDecoder + '_>, BlurhashingError> {
let mut image_reader = image::ImageReader::new(std::io::Cursor::new(data));
image_reader.set_format(image_format);
Ok(Box::new(image_reader.into_decoder()?))
}
#[cfg(feature = "blurhashing")]
fn is_image_above_size_limit<T: image::ImageDecoder>(
decoder: &T,
blurhash_config: BlurhashConfig,
) -> bool {
decoder.total_bytes() >= blurhash_config.size_limit
}
#[cfg(feature = "blurhashing")]
#[tracing::instrument(name = "encode", level = "debug", skip_all)]
#[inline]
fn blurhash_an_image(
image: &image::DynamicImage,
blurhash_config: BlurhashConfig,
) -> Result<String, BlurhashingError> {
Ok(blurhash::encode_image(
blurhash_config.components_x,
blurhash_config.components_y,
&image.to_rgba8(),
)?)
}
#[derive(Clone, Copy, Debug)]
pub struct BlurhashConfig {
pub components_x: u32,
pub components_y: u32,
/// size limit in bytes
pub size_limit: u64,
}
#[cfg(feature = "blurhashing")]
impl From<CoreBlurhashConfig> for BlurhashConfig {
fn from(value: CoreBlurhashConfig) -> Self {
Self {
components_x: value.components_x,
components_y: value.components_y,
size_limit: value.blurhash_max_raw_size,
}
}
}
#[derive(Debug)]
#[cfg(feature = "blurhashing")]
pub enum BlurhashingError {
HashingLibError(Box<dyn std::error::Error + Send>),
#[cfg(feature = "blurhashing")]
ImageError(Box<image::ImageError>),
ImageTooLarge,
}
#[cfg(feature = "blurhashing")]
impl From<image::ImageError> for BlurhashingError {
fn from(value: image::ImageError) -> Self { Self::ImageError(Box::new(value)) }
}
#[cfg(feature = "blurhashing")]
impl From<blurhash::Error> for BlurhashingError {
fn from(value: blurhash::Error) -> Self { Self::HashingLibError(Box::new(value)) }
}
#[cfg(feature = "blurhashing")]
impl std::fmt::Display for BlurhashingError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "Blurhash Error:")?;
match &self {
| Self::ImageTooLarge => write!(f, "Image was too large to blurhash")?,
| Self::HashingLibError(e) =>
write!(f, "There was an error with the blurhashing library => {e}")?,
#[cfg(feature = "blurhashing")]
| Self::ImageError(e) =>
write!(f, "There was an error with the image loading library => {e}")?,
}
Ok(())
}
}
+1
View File
@@ -1,3 +1,4 @@
pub mod blurhash;
mod data;
pub(super) mod migrations;
pub mod mxc;
+1 -1
View File
@@ -48,7 +48,7 @@ pub fn is_valid(&self) -> bool {
impl std::fmt::Display for DatabaseTokenInfo {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "Token created by {} and used {} times. ", &self.creator, self.uses)?;
write!(f, "Token created by {} and used {} times. ", self.creator, self.uses)?;
if let Some(expires) = &self.expires {
write!(f, "{expires}.")?;
} else {
+1 -1
View File
@@ -35,7 +35,7 @@ pub struct ValidToken {
impl std::fmt::Display for ValidToken {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "`{}` --- {}", self.token, &self.source)
write!(f, "`{}` --- {}", self.token, self.source)
}
}
@@ -1,233 +1,454 @@
use std::{
collections::{BTreeMap, HashSet, VecDeque, hash_map},
cmp::min,
collections::{BTreeMap, HashMap, HashSet, VecDeque, hash_map},
time::Instant,
};
use conduwuit::{
Event, PduEvent, debug, debug_warn, implement, matrix::event::gen_event_id_canonical_json,
Err, Event, PduEvent, debug, debug_info, debug_warn, err,
matrix::event::gen_event_id_canonical_json, state_res::lexicographical_topological_sort,
trace, utils::continue_exponential_backoff_secs, warn,
};
use futures::StreamExt;
use ruma::{
CanonicalJsonValue, EventId, OwnedEventId, RoomId, ServerName,
api::federation::event::get_event,
CanonicalJsonObject, CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch, OwnedEventId,
OwnedRoomId, OwnedServerName, RoomId, ServerName, UInt,
api::federation::event::{get_event, get_missing_events},
int,
};
use super::get_room_version_rules;
/// Find the event and auth it. Once the event is validated (steps 1 - 8)
/// it is appended to the outliers Tree.
/// Attempts to build a localised directed acyclic graph out of the given PDUs,
/// returning them in a topologically sorted order.
///
/// Returns pdu and if we fetched it over federation the raw json.
///
/// a. Look in the main timeline (pduid_pdu tree)
/// b. Look at outlier pdu tree
/// c. Ask origin server over federation
/// d. TODO: Ask other servers over federation?
#[implement(super::Service)]
pub(super) async fn fetch_and_handle_outliers<'a, Pdu, Events>(
&self,
origin: &'a ServerName,
events: Events,
create_event: &'a Pdu,
room_id: &'a RoomId,
) -> Vec<(PduEvent, Option<BTreeMap<String, CanonicalJsonValue>>)>
where
Pdu: Event + Send + Sync,
Events: Iterator<Item = &'a EventId> + Clone + Send,
{
let back_off = |id| match self
.services
.globals
.bad_event_ratelimiter
.write()
.entry(id)
{
| hash_map::Entry::Vacant(e) => {
e.insert((Instant::now(), 1));
},
| hash_map::Entry::Occupied(mut e) => {
*e.get_mut() = (Instant::now(), e.get().1.saturating_add(1));
},
};
/// This is used to attempt to process PDUs in an order that respects their
/// dependencies, however it is ultimately the sender's responsibility to send
/// them in a processable order, so this is just a best effort attempt. It does
/// not account for power levels or other tie breaks.
pub async fn build_local_dag<S: std::hash::BuildHasher>(
pdu_map: &HashMap<OwnedEventId, CanonicalJsonObject, S>,
) -> conduwuit::Result<Vec<OwnedEventId>> {
debug_assert!(pdu_map.len() >= 2, "needless call to build_local_dag with less than 2 PDUs");
let mut dag: HashMap<OwnedEventId, HashSet<OwnedEventId>> =
HashMap::with_capacity(pdu_map.len());
let mut id_origin_ts: HashMap<OwnedEventId, _> = HashMap::with_capacity(pdu_map.len());
let mut events_with_auth_events = Vec::with_capacity(events.clone().count());
trace!("Fetching {} outlier pdus", events.clone().count());
for (event_id, value) in pdu_map {
// We already checked that these properties are correct in parse_incoming_pdu,
// so it's safe to unwrap here.
// We also filter to remove any prev_events that are not in this pdu_map, as we
// need to have at least one event with zero out degrees for the lexico-topo
// sort below. If there are multiple events with omitted prevs, they will be
// ordered by timestamp, then event ID. At that point though, it's unlikely to
// matter.
let prev_events = value
.get("prev_events")
.unwrap()
.as_array()
.unwrap()
.iter()
.map(|v| EventId::parse(v.as_str().unwrap()).unwrap())
.filter(|id| pdu_map.contains_key(id))
.collect();
for id in events {
// a. Look in the main timeline (pduid_pdu tree)
// b. Look at outlier pdu tree
// (get_pdu_json checks both)
if let Ok(local_pdu) = self.services.timeline.get_pdu(id).await {
trace!("Found {id} in main timeline or outlier tree");
events_with_auth_events.push((id.to_owned(), Some(local_pdu), vec![]));
continue;
dag.insert(event_id.clone(), prev_events);
let origin_server_ts = value
.get("origin_server_ts")
.and_then(CanonicalJsonValue::as_integer)
.unwrap_or_default();
id_origin_ts.insert(event_id.clone(), origin_server_ts);
}
debug!(count = dag.len(), "Sorting incoming events with partial graph");
lexicographical_topological_sort(&dag, &async |node_id| {
// Note: we don't bother fetching power levels because that would massively slow
// this function down. This is a best-effort attempt to order events correctly
// for processing, however ultimately that should be the sender's job.
let ts = id_origin_ts
.get(&node_id)
.copied()
.unwrap_or_else(|| int!(0))
.to_string()
.parse::<u64>()
.ok()
.and_then(UInt::new)
.unwrap_or_default();
Ok((int!(0), MilliSecondsSinceUnixEpoch(ts)))
})
.await
.inspect(|sorted| {
debug_assert_eq!(
sorted.len(),
pdu_map.len(),
"Sorted graph was not the same size as the input graph"
);
})
.map_err(|e| err!("failed to resolve local graph: {e}"))
}
impl super::Service {
/// Uses `/_matrix/federation/v1/get_missing_events` to fill gaps in the
/// DAG.
///
/// When this function is called, the "earliest events" (current forward
/// extremities) will be collected, and the function will loop with an
/// exponentially incrementing limit (up to 100 per request) until it has
/// filled the gap, i.e. when the remote says there's no more events.
///
/// This function does not persist the events. The caller is responsible for
/// passing them through handle_incoming_pdu.
pub(super) async fn backfill_missing_events(
&self,
room_id: OwnedRoomId,
head: Vec<OwnedEventId>,
via: OwnedServerName,
) -> conduwuit::Result<HashMap<OwnedEventId, PduEvent>> {
if head.is_empty() {
return Ok(HashMap::new());
}
let tail = self
.services
.state
.get_forward_extremities(&room_id)
.collect::<Vec<_>>()
.await;
// TODO: min_depth is probably necessary to avoid fetching the entire room
// history if there are very long gaps
let mut latest_events: HashSet<OwnedEventId> = HashSet::from_iter(head.clone());
let mut loop_count: u64 = 3;
// Start with a base number of 3 so that we fetch 10, 16, 25, 36, etc
// instead of 1, 2, 4, 9, so on.
let mut backfilled_events = HashMap::with_capacity(10);
// c. Ask origin server over federation
// We also handle its auth chain here so we don't get a stack overflow in
// handle_outlier_pdu.
let mut todo_auth_events: VecDeque<_> = [id.to_owned()].into();
let mut events_in_reverse_order = Vec::with_capacity(todo_auth_events.len());
while !latest_events.is_empty() {
let todo: Vec<OwnedEventId> = latest_events.clone().into_iter().collect();
let mut request =
get_missing_events::v1::Request::new(room_id.clone(), tail.clone(), todo.clone());
let limit = min(loop_count.saturating_pow(2), 100);
request.limit = limit
.try_into()
.expect("limit cannot be greater than 100, which fits into UInt");
let mut events_all = HashSet::with_capacity(todo_auth_events.len());
while let Some(next_id) = todo_auth_events.pop_front() {
if let Some((time, tries)) = self
.services
.globals
.bad_event_ratelimiter
.read()
.get(&*next_id)
{
// Exponential backoff
const MIN_DURATION: u64 = 60 * 2;
const MAX_DURATION: u64 = 60 * 60 * 8;
if continue_exponential_backoff_secs(
MIN_DURATION,
MAX_DURATION,
time.elapsed(),
*tries,
) {
debug_warn!(
tried = ?*tries,
elapsed = ?time.elapsed(),
"Backing off from {next_id}",
);
continue;
}
}
if events_all.contains(&next_id) {
continue;
}
if self.services.timeline.pdu_exists(&next_id).await {
trace!("Found {next_id} in db");
continue;
}
debug!("Fetching {next_id} over federation from {origin}.");
match self
debug_info!(
backfilled=%backfilled_events.len(),
%loop_count,
"Asking {via} for up to {limit} missing events",
);
trace!(
?latest_events,
?tail,
%via,
%limit,
"Requesting missing events"
);
let response: get_missing_events::v1::Response = self
.services
.sending
.send_federation_request(
origin,
get_event::v1::Request::new((*next_id).to_owned()),
)
.await
{
| Ok(res) => {
debug!("Got {next_id} over federation from {origin}");
let Ok(room_version_rules) = get_room_version_rules(create_event) else {
back_off((*next_id).to_owned());
continue;
};
.send_federation_request(&via, request)
.await?;
loop_count = loop_count.saturating_add(1);
let Ok((calculated_event_id, value)) =
gen_event_id_canonical_json(&res.pdu, &room_version_rules)
else {
back_off((*next_id).to_owned());
continue;
};
if calculated_event_id != *next_id {
warn!(
"Server didn't return event id we requested: requested: {next_id}, \
we got {calculated_event_id}. Event: {:?}",
&res.pdu
);
}
if let Some(auth_events) = value
.get("auth_events")
.and_then(CanonicalJsonValue::as_array)
// Some buggy servers (including old continuwuity) may return the same events
// multiple times, which can cause this to be an infinite loop.
// In order to break this loop, if we see no new events from this response (i.e.
// all events in the response are already in backfilled_events), we stop,
// with a warning.
let mut unseen: usize = 0;
let chunk_len = response.events.len();
if response.events.is_empty() {
debug_info!("No more missing events found");
break;
}
for event in response.events {
trace!("Parsing incoming event from backfill");
let (incoming_room_id, event_id, pdu_json) =
self.parse_incoming_pdu(&event).await.map_err(|e| {
err!(BadServerResponse("{via} returned an invalid event: {e:?}"))
})?;
trace!(%incoming_room_id, %event_id, "Parsed incoming event from backfill");
if incoming_room_id != room_id {
return Err!(BadServerResponse(
"{via} returned {event_id} in missing events which belongs to \
{incoming_room_id}, not {room_id}"
));
}
latest_events.remove(&event_id);
if head.contains(&event_id) || tail.contains(&event_id) {
debug!("Skipping known event {event_id}");
continue;
}
if backfilled_events.contains_key(&event_id) {
debug_warn!(%via, %event_id, "Remote retransmitted event");
continue;
}
// TODO: Should this be scoped to the GME session? We might end up incorrectly
// assuming we've caught up if we do this
if let Ok(pdu) = self.services.timeline.get_pdu(&event_id).await {
debug!(%via, %event_id, "Already seen event in database");
backfilled_events.insert(event_id.clone(), pdu);
} else {
unseen = unseen.saturating_add(1);
}
let parsed = PduEvent::from_id_val(&event_id, pdu_json)
.map_err(|e| err!(BadServerResponse("Unable to parse {event_id}: {e}")))?;
for prev_event_id in parsed.prev_events() {
// Verify that we have all of this event's prev_events. If we don't, add it to
// the work queue
if !(backfilled_events.contains_key(prev_event_id)
|| self.services.timeline.pdu_exists(prev_event_id).await)
{
for auth_event in auth_events {
match serde_json::from_value::<OwnedEventId>(
auth_event.clone().into(),
) {
| Ok(auth_event) => {
trace!(
"Found auth event id {auth_event} for event {next_id}"
);
todo_auth_events.push_back(auth_event);
},
| _ => {
warn!("Auth event id is not valid");
},
}
}
} else {
warn!("Auth event list invalid");
latest_events.insert(prev_event_id.to_owned());
break;
}
events_in_reverse_order.push((next_id.clone(), value));
events_all.insert(next_id);
},
| Err(e) => {
warn!("Failed to fetch auth event {next_id} from {origin}: {e}");
back_off((*next_id).to_owned());
},
continue;
}
backfilled_events.insert(event_id.clone(), parsed);
}
for event_id in todo {
latest_events.remove(&event_id);
if let hash_map::Entry::Vacant(e) = backfilled_events.entry(event_id.clone()) {
let evt = self.services.timeline.get_pdu(&event_id).await?;
e.insert(evt);
}
}
debug!(
count=%chunk_len,
new=%unseen,
remaining=%latest_events.len(),
"Got missing events"
);
if unseen == 0 {
debug_warn!("Didn't see any new events, breaking cycle");
break;
} else if chunk_len < usize::try_from(limit)? {
debug!(
"Got less than the limit number of events, assuming there's no more to fetch"
);
break;
}
}
events_with_auth_events.push((id.to_owned(), None, events_in_reverse_order));
debug_info!("Successfully fetched {} missing events from {via}", backfilled_events.len());
trace!("Missing_events: {backfilled_events:?}");
Ok(backfilled_events)
}
let mut pdus = Vec::with_capacity(events_with_auth_events.len());
for (id, local_pdu, events_in_reverse_order) in events_with_auth_events {
// a. Look in the main timeline (pduid_pdu tree)
// b. Look at outlier pdu tree
// (get_pdu_json checks both)
if let Some(local_pdu) = local_pdu {
trace!("Found {id} in main timeline or outlier tree");
pdus.push((local_pdu.clone(), None));
}
/// Find the event and auth it. Once the event is validated (steps 1 - 8)
/// it is appended to the outliers Tree.
///
/// Returns pdu and if we fetched it over federation the raw json.
///
/// a. Look in the main timeline (pduid_pdu tree)
/// b. Look at outlier pdu tree
/// c. Ask origin server over federation
/// d. TODO: Ask other servers over federation?
pub(super) async fn fetch_and_handle_outliers<'a, Pdu, Events>(
&self,
origin: &'a ServerName,
events: Events,
create_event: &'a Pdu,
room_id: &'a RoomId,
) -> Vec<(PduEvent, Option<BTreeMap<String, CanonicalJsonValue>>)>
where
Pdu: Event + Send + Sync,
Events: Iterator<Item = &'a EventId> + Clone + Send,
{
let back_off = |id| match self
.services
.globals
.bad_event_ratelimiter
.write()
.entry(id)
{
| hash_map::Entry::Vacant(e) => {
e.insert((Instant::now(), 1));
},
| hash_map::Entry::Occupied(mut e) => {
*e.get_mut() = (Instant::now(), e.get().1.saturating_add(1));
},
};
for (next_id, value) in events_in_reverse_order.into_iter().rev() {
if let Some((time, tries)) = self
.services
.globals
.bad_event_ratelimiter
.read()
.get(&*next_id)
{
// Exponential backoff
const MIN_DURATION: u64 = 5 * 60;
const MAX_DURATION: u64 = 60 * 60 * 24;
if continue_exponential_backoff_secs(
MIN_DURATION,
MAX_DURATION,
time.elapsed(),
*tries,
) {
debug!("Backing off from {next_id}");
let mut events_with_auth_events = Vec::with_capacity(events.clone().count());
trace!("Fetching {} outlier pdus", events.clone().count());
for id in events {
// a. Look in the main timeline (pduid_pdu tree)
// b. Look at outlier pdu tree
// (get_pdu_json checks both)
if let Ok(local_pdu) = self.services.timeline.get_pdu(id).await {
trace!("Found {id} in main timeline or outlier tree");
events_with_auth_events.push((id.to_owned(), Some(local_pdu), vec![]));
continue;
}
// c. Ask origin server over federation
// We also handle its auth chain here so we don't get a stack overflow in
// handle_outlier_pdu.
let mut todo_auth_events: VecDeque<_> = [id.to_owned()].into();
let mut events_in_reverse_order = Vec::with_capacity(todo_auth_events.len());
let mut events_all = HashSet::with_capacity(todo_auth_events.len());
while let Some(next_id) = todo_auth_events.pop_front() {
if let Some((time, tries)) = self
.services
.globals
.bad_event_ratelimiter
.read()
.get(&*next_id)
{
// Exponential backoff
const MIN_DURATION: u64 = 60 * 2;
const MAX_DURATION: u64 = 60 * 60 * 8;
if continue_exponential_backoff_secs(
MIN_DURATION,
MAX_DURATION,
time.elapsed(),
*tries,
) {
debug_warn!(
tried = ?*tries,
elapsed = ?time.elapsed(),
"Backing off from {next_id}",
);
continue;
}
}
if events_all.contains(&next_id) {
continue;
}
if self.services.timeline.pdu_exists(&next_id).await {
trace!("Found {next_id} in db");
continue;
}
debug!("Fetching {next_id} over federation from {origin}.");
match self
.services
.sending
.send_federation_request(
origin,
get_event::v1::Request::new((*next_id).to_owned()),
)
.await
{
| Ok(res) => {
debug!("Got {next_id} over federation from {origin}");
let Ok(room_version_rules) = get_room_version_rules(create_event) else {
back_off((*next_id).to_owned());
continue;
};
let Ok((calculated_event_id, value)) =
gen_event_id_canonical_json(&res.pdu, &room_version_rules)
else {
back_off((*next_id).to_owned());
continue;
};
if calculated_event_id != *next_id {
warn!(
"Server didn't return event id we requested: requested: \
{next_id}, we got {calculated_event_id}. Event: {:?}",
&res.pdu
);
}
if let Some(auth_events) = value
.get("auth_events")
.and_then(CanonicalJsonValue::as_array)
{
for auth_event in auth_events {
match serde_json::from_value::<OwnedEventId>(
auth_event.clone().into(),
) {
| Ok(auth_event) => {
trace!(
"Found auth event id {auth_event} for event \
{next_id}"
);
todo_auth_events.push_back(auth_event);
},
| _ => {
warn!("Auth event id is not valid");
},
}
}
} else {
warn!("Auth event list invalid");
}
events_in_reverse_order.push((next_id.clone(), value));
events_all.insert(next_id);
},
| Err(e) => {
warn!("Failed to fetch auth event {next_id} from {origin}: {e}");
back_off((*next_id).to_owned());
},
}
}
trace!("Handling outlier {next_id}");
match Box::pin(self.handle_outlier_pdu(
origin,
create_event,
&next_id,
room_id,
value.clone(),
true,
))
.await
{
| Ok((pdu, json)) =>
if next_id == *id {
trace!("Handled outlier {next_id} (original request)");
pdus.push((pdu, Some(json)));
events_with_auth_events.push((id.to_owned(), None, events_in_reverse_order));
}
let mut pdus = Vec::with_capacity(events_with_auth_events.len());
for (id, local_pdu, events_in_reverse_order) in events_with_auth_events {
// a. Look in the main timeline (pduid_pdu tree)
// b. Look at outlier pdu tree
// (get_pdu_json checks both)
if let Some(local_pdu) = local_pdu {
trace!("Found {id} in main timeline or outlier tree");
pdus.push((local_pdu.clone(), None));
}
for (next_id, value) in events_in_reverse_order.into_iter().rev() {
if let Some((time, tries)) = self
.services
.globals
.bad_event_ratelimiter
.read()
.get(&*next_id)
{
// Exponential backoff
const MIN_DURATION: u64 = 5 * 60;
const MAX_DURATION: u64 = 60 * 60 * 24;
if continue_exponential_backoff_secs(
MIN_DURATION,
MAX_DURATION,
time.elapsed(),
*tries,
) {
debug!("Backing off from {next_id}");
continue;
}
}
trace!("Handling outlier {next_id}");
match Box::pin(self.handle_outlier_pdu(
origin,
create_event,
&next_id,
room_id,
value.clone(),
true,
))
.await
{
| Ok((pdu, json)) =>
if next_id == *id {
trace!("Handled outlier {next_id} (original request)");
pdus.push((pdu, Some(json)));
},
| Err(e) => {
warn!("Authentication of event {next_id} failed: {e:?}");
back_off(next_id);
},
| Err(e) => {
warn!("Authentication of event {next_id} failed: {e:?}");
back_off(next_id);
},
}
}
}
trace!("Fetched and handled {} outlier pdus", pdus.len());
pdus
}
trace!("Fetched and handled {} outlier pdus", pdus.len());
pdus
}
+109 -119
View File
@@ -1,128 +1,118 @@
use std::{
collections::{BTreeMap, HashMap, HashSet, VecDeque},
iter::once,
};
use std::collections::{HashMap, HashSet, VecDeque};
use conduwuit::{
Event, PduEvent, Result, debug_warn, err, implement,
state_res::{self},
};
use futures::{FutureExt, future};
use ruma::{
CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, ServerName,
int, uint,
};
use conduwuit::{Event, PduEvent, debug, debug_info, error, trace};
use ruma::{OwnedEventId, RoomId, ServerName};
use super::check_room_id;
use crate::rooms::event_handler::build_local_dag;
#[implement(super::Service)]
#[tracing::instrument(
level = "debug",
skip_all,
fields(%origin),
)]
#[allow(clippy::type_complexity)]
pub(super) async fn fetch_prev<'a, Pdu, Events>(
&self,
origin: &ServerName,
create_event: &Pdu,
room_id: &RoomId,
first_ts_in_room: MilliSecondsSinceUnixEpoch,
initial_set: Events,
) -> Result<(
Vec<OwnedEventId>,
HashMap<OwnedEventId, (PduEvent, BTreeMap<String, CanonicalJsonValue>)>,
)>
where
Pdu: Event + Send + Sync,
Events: Iterator<Item = &'a EventId> + Clone + Send,
{
let num_ids = initial_set.clone().count();
let mut eventid_info = HashMap::new();
let mut graph: HashMap<OwnedEventId, _> = HashMap::with_capacity(num_ids);
let mut todo_outlier_stack: VecDeque<OwnedEventId> =
initial_set.map(ToOwned::to_owned).collect();
impl super::Service {
pub(super) async fn fetch_prevs(
&self,
room_id: &RoomId,
create_event: &PduEvent,
incoming_pdu: &PduEvent,
origin: &ServerName,
) -> conduwuit::Result<()> {
let mut queue: VecDeque<OwnedEventId> = VecDeque::new();
queue.push_back(incoming_pdu.event_id().to_owned());
let mut amount = 0;
while let Some(prev_event_id) = todo_outlier_stack.pop_front() {
self.services.server.check_running()?;
match self
.fetch_and_handle_outliers(
origin,
once(prev_event_id.as_ref()),
create_event,
room_id,
)
.boxed()
.await
.pop()
{
| Some((pdu, mut json_opt)) => {
check_room_id(room_id, &pdu)?;
let limit = self.services.server.config.max_fetch_prev_events;
if amount > limit {
debug_warn!("Max prev event limit reached! Limit: {limit}");
graph.insert(prev_event_id.clone(), HashSet::new());
continue;
}
if json_opt.is_none() {
json_opt = self
.services
.outlier
.get_outlier_pdu_json(&prev_event_id)
.await
.ok();
}
if let Some(json) = json_opt {
if pdu.origin_server_ts() > first_ts_in_room {
amount = amount.saturating_add(1);
for prev_prev in pdu.prev_events() {
if !graph.contains_key(prev_prev) {
todo_outlier_stack.push_back(prev_prev.to_owned());
}
}
graph.insert(
prev_event_id.clone(),
pdu.prev_events().map(ToOwned::to_owned).collect(),
);
} else {
// Time based check failed
graph.insert(prev_event_id.clone(), HashSet::new());
}
eventid_info.insert(prev_event_id.clone(), (pdu, json));
while let Some(event_id) = queue.pop_front() {
debug!(event_id=%incoming_pdu.event_id, "Fetching any missing prev_events");
let mut missing_prev_events: HashSet<OwnedEventId> =
incoming_pdu.prev_events().map(ToOwned::to_owned).collect();
for pid in missing_prev_events.clone() {
if self.services.timeline.pdu_exists(&pid).await {
trace!("Found prev event {pid} for outlier event {event_id} locally");
missing_prev_events.remove(&pid);
} else {
// Get json failed, so this was not fetched over federation
graph.insert(prev_event_id.clone(), HashSet::new());
debug_info!(
"Could not find prev event {pid} for outlier event {event_id} locally, \
will fetch over federation"
);
}
},
| _ => {
// Fetch and handle failed
graph.insert(prev_event_id.clone(), HashSet::new());
},
}
if !missing_prev_events.is_empty() {
debug_info!(
"Fetching {} missing prev events for outlier event {event_id}",
missing_prev_events.len()
);
let backfilled = self
.backfill_missing_events(
room_id.to_owned(),
vec![event_id.clone()],
origin.to_owned(),
)
.await?;
debug_info!("Fetched {} missing events for {event_id}", backfilled.len());
let mapped = backfilled
.iter()
.map(|(eid, evt)| {
let mut obj = evt.to_canonical_object();
obj.remove("event_id"); // event_id is inserted by backfill_missing_events
(eid.clone(), obj)
})
.collect::<HashMap<_, _>>();
let local_dag = if mapped.len() == 1 {
mapped.keys().map(ToOwned::to_owned).collect()
} else {
build_local_dag(&mapped).await?
};
debug_info!("Preparing to handle {} missing events", backfilled.len());
for prev_event_id in local_dag {
let obj = mapped
.get(&prev_event_id)
.expect("We should have this event in memory");
debug_info!("Handling prev event {prev_event_id}");
match self
.handle_outlier_pdu(
origin,
create_event,
&prev_event_id,
room_id,
obj.clone(),
false,
)
.await
{
| Ok(_) => {
debug!("Successfully handled {prev_event_id} as an outlier");
missing_prev_events.remove(&prev_event_id);
},
| Err(e) =>
error!(error=?e, %prev_event_id, %event_id, "Failed to handle prev event"),
}
debug_info!("Finished handling prev");
}
}
let outlier = self.services.timeline.get_pdu(&event_id).await;
if missing_prev_events.is_empty()
&& let Ok(pdu) = outlier
{
// promote any prevs first
for prev_event_id in pdu.prev_events() {
debug_info!("Promoting prev event {prev_event_id} to timeline");
let prev_pdu = self.services.timeline.get_pdu(&event_id).await?;
let val = prev_pdu.to_canonical_object();
self.upgrade_outlier_to_timeline_pdu(
prev_pdu,
val,
create_event,
origin,
room_id,
)
.await?;
debug_info!("Finished prev promoting {prev_event_id} to timeline");
}
debug_info!("Promoting event {event_id} to timeline");
let val = pdu.to_canonical_object();
self.upgrade_outlier_to_timeline_pdu(pdu, val, create_event, origin, room_id)
.await?;
debug_info!("Finished promoting {event_id} to timeline");
} else {
debug!(?missing_prev_events, ok=%outlier.is_ok(), "Not promoting {event_id}");
}
}
Ok(())
}
let event_fetch = |event_id| {
let origin_server_ts = eventid_info
.get(&event_id)
.map_or_else(|| uint!(0), |info| info.0.origin_server_ts().get());
// This return value is the key used for sorting events,
// events are then sorted by power level, time,
// and lexically by event_id.
future::ok((int!(0), MilliSecondsSinceUnixEpoch(origin_server_ts)))
};
let sorted = state_res::lexicographical_topological_sort(&graph, &event_fetch)
.await
.map_err(|e| err!(Database(error!("Error sorting prev events: {e}"))))?;
Ok((sorted, eventid_info))
}
@@ -1,7 +1,6 @@
use std::collections::{HashMap, hash_map};
use conduwuit::{Err, Event, Result, debug, debug_warn, err, implement};
use futures::FutureExt;
use ruma::{
EventId, OwnedEventId, RoomId, ServerName, api::federation::event::get_room_state_ids,
events::StateEventType,
@@ -42,7 +41,6 @@ pub(super) async fn fetch_state<Pdu>(
let state_ids = res.pdu_ids.iter().map(AsRef::as_ref);
let state_vec = self
.fetch_and_handle_outliers(origin, state_ids, create_event, room_id)
.boxed()
.await;
let mut state: HashMap<ShortStateKey, OwnedEventId> = HashMap::with_capacity(state_vec.len());
@@ -1,14 +1,11 @@
use std::{
collections::{BTreeMap, hash_map},
time::Instant,
};
use std::{collections::BTreeMap, time::Instant};
use conduwuit::{
Err, Event, PduEvent, Result, debug::INFO_SPAN_LEVEL, debug_error, debug_info, defer, err,
implement, info, trace, utils::stream::IterStream, warn,
implement, info, trace, warn,
};
use futures::{
FutureExt, TryFutureExt, TryStreamExt,
FutureExt,
future::{OptionFuture, try_join4},
};
use ruma::{
@@ -215,72 +212,6 @@ pub async fn handle_incoming_pdu<'a>(
.get_room_create_event(room_id)
.await;
let (incoming_pdu, val) = self
.handle_outlier_pdu(origin, create_event, event_id, room_id, value, false)
.await?;
// 8. if not timeline event: stop
if !is_timeline_event {
return Ok(None);
}
// Skip old events
let first_ts_in_room = self
.services
.timeline
.first_pdu_in_room(room_id)
.await?
.origin_server_ts();
// 9. Fetch any missing prev events doing all checks listed here starting at 1.
// These are timeline events
let (sorted_prev_events, mut eventid_info) = self
.fetch_prev(origin, create_event, room_id, first_ts_in_room, incoming_pdu.prev_events())
.await?;
debug!(
events = ?sorted_prev_events,
"Handling previous events"
);
sorted_prev_events
.iter()
.try_stream()
.map_ok(AsRef::as_ref)
.try_for_each(|prev_id| {
self.handle_prev_pdu(
origin,
event_id,
room_id,
eventid_info.remove(prev_id),
create_event,
first_ts_in_room,
prev_id,
)
.inspect_err(move |e| {
warn!("Prev {prev_id} failed: {e}");
match self
.services
.globals
.bad_event_ratelimiter
.write()
.entry(prev_id.into())
{
| hash_map::Entry::Vacant(e) => {
e.insert((Instant::now(), 1));
},
| hash_map::Entry::Occupied(mut e) => {
let tries = e.get().1.saturating_add(1);
*e.get_mut() = (Instant::now(), tries);
},
}
})
.map(|_| self.services.server.check_running())
})
.boxed()
.await?;
// Done with prev events, now handling the incoming event
let start_time = Instant::now();
self.federation_handletime
.write()
@@ -292,7 +223,31 @@ pub async fn handle_incoming_pdu<'a>(
.remove(room_id);
}};
let (incoming_pdu, val) = self
.handle_outlier_pdu(origin, create_event, event_id, room_id, value, false)
.await?;
// 8. if not timeline event: stop
if !is_timeline_event {
return Ok(None);
}
// Skip old events
// let first_ts_in_room = self
// .services
// .timeline
// .first_pdu_in_room(room_id)
// .await?
// .origin_server_ts();
// 9. Fetch any missing prev events doing all checks listed here starting at 1.
// These are timeline events
debug!("Handling previous events");
self.fetch_prevs(room_id, create_event, &incoming_pdu, origin)
.await?;
// Done with prev events, now handling the incoming event
self.upgrade_outlier_to_timeline_pdu(incoming_pdu, val, create_event, origin, room_id)
.boxed()
.await
}
@@ -1,4 +1,4 @@
use std::collections::{BTreeMap, HashMap, hash_map};
use std::collections::{BTreeMap, HashMap, HashSet, hash_map};
use conduwuit::{
Err, Event, PduEvent, Result, debug, debug_info, debug_warn, err, implement, state_res,
@@ -10,7 +10,7 @@
events::StateEventType,
};
use super::{check_room_id, get_room_version_rules};
use super::{build_local_dag, check_room_id, get_room_version_rules};
use crate::rooms::timeline::pdu_fits;
#[implement(super::Service)]
@@ -22,7 +22,7 @@ pub(super) async fn handle_outlier_pdu<'a, Pdu>(
event_id: &'a EventId,
room_id: &'a RoomId,
mut value: CanonicalJsonObject,
auth_events_known: bool,
_auth_events_known: bool,
) -> Result<(PduEvent, BTreeMap<String, CanonicalJsonValue>)>
where
Pdu: Event + Send + Sync,
@@ -100,42 +100,71 @@ pub(super) async fn handle_outlier_pdu<'a, Pdu>(
}
// Fetch any missing ones & reject invalid ones
let missing_auth_events = if auth_events_known {
pdu_event
.auth_events()
.filter(|id| !auth_events.contains_key(*id))
.collect::<Vec<_>>()
} else {
pdu_event.auth_events().collect::<Vec<_>>()
};
if !missing_auth_events.is_empty() || !auth_events_known {
let mut missing_auth_events: HashSet<OwnedEventId> = pdu_event
.auth_events()
.filter(|id| !auth_events.contains_key(*id))
.map(ToOwned::to_owned)
.collect();
if !missing_auth_events.is_empty() {
debug_info!(
"Fetching {} missing auth events for outlier event {event_id}",
missing_auth_events.len()
);
for (pdu, _) in self
.fetch_and_handle_outliers(
origin,
missing_auth_events.iter().copied(),
create_event,
room_id,
let backfilled = self
.backfill_missing_events(
room_id.to_owned(),
vec![event_id.to_owned()],
origin.to_owned(),
)
.await
{
auth_events.insert(pdu.event_id().to_owned(), pdu);
.await?;
debug_info!("Fetched {} missing auth events for {event_id}", backfilled.len());
let mapped = backfilled
.iter()
.map(|(eid, evt)| {
let mut obj = evt.to_canonical_object();
obj.remove("event_id"); // event_id is inserted by backfill_missing_events
(eid.clone(), obj)
})
.collect::<HashMap<_, _>>();
let local_dag = if mapped.len() == 1 {
mapped.keys().map(ToOwned::to_owned).collect()
} else {
build_local_dag(&mapped).await?
};
debug_info!("Preparing to handle {} missing auth events", backfilled.len());
for prev_event_id in local_dag {
let obj = mapped
.get(&prev_event_id)
.expect("We should have this event in memory");
debug_info!("Handling prev {prev_event_id}");
let (prev, _) = Box::pin(self.handle_outlier_pdu(
origin,
create_event,
&prev_event_id,
room_id,
obj.clone(),
false,
))
.await?;
if missing_auth_events.contains(&*prev_event_id) {
missing_auth_events.remove(&prev_event_id);
auth_events.insert(prev_event_id, prev);
}
debug_info!("Finished handling prev auth event");
}
} else {
debug!("No missing auth events for outlier event {event_id}");
}
// reject if we are still missing some
let still_missing = pdu_event
.auth_events()
.filter(|id| !auth_events.contains_key(*id))
.collect::<Vec<_>>();
if !still_missing.is_empty() {
// reject if we are still missing some auth events.
// If we're still missing prev events, we will fetch them individually later,
// but there's no reason for us to be missing auth events now we've gapfilled
// the DAG.
if !missing_auth_events.is_empty() {
// Don't reject: this could be a temporary condition
return Err!(Request(InvalidParam(
"Could not fetch all auth events for outlier event {event_id}, still missing: \
{still_missing:?}"
{missing_auth_events:?}"
)));
}
@@ -163,6 +192,10 @@ pub(super) async fn handle_outlier_pdu<'a, Pdu>(
v.insert(auth_event);
},
| hash_map::Entry::Occupied(_) => {
self.services
.outlier
.add_pdu_outlier(pdu_event.event_id(), &incoming_pdu);
self.services.pdu_metadata.mark_event_rejected(event_id);
return Err!(Request(InvalidParam(
"Auth event's type and state_key combination exists multiple times: {}, {}",
auth_event.kind,
@@ -177,6 +210,10 @@ pub(super) async fn handle_outlier_pdu<'a, Pdu>(
auth_events_by_key.get(&(StateEventType::RoomCreate, String::new().into())),
Some(_) | None
) {
self.services.pdu_metadata.mark_event_rejected(event_id);
self.services
.outlier
.add_pdu_outlier(pdu_event.event_id(), &incoming_pdu);
return Err!(Request(InvalidParam("Incoming event refers to wrong create event.")));
}
@@ -185,6 +222,7 @@ pub(super) async fn handle_outlier_pdu<'a, Pdu>(
ready(auth_events_by_key.get(&key).map(ToOwned::to_owned))
};
// PDU check: 3
let auth_check = state_res::event_auth::auth_check(
&room_version_rules,
&pdu_event,
@@ -196,7 +234,13 @@ pub(super) async fn handle_outlier_pdu<'a, Pdu>(
.map_err(|e| err!(Request(Forbidden("Auth check failed: {e:?}"))))?;
if !auth_check {
return Err!(Request(Forbidden("Auth check failed")));
self.services.pdu_metadata.mark_event_rejected(event_id);
self.services
.outlier
.add_pdu_outlier(pdu_event.event_id(), &incoming_pdu);
return Err!(Request(Forbidden(
"Event authorisation fails based on event's claimed auth events"
)));
}
trace!("Validation successful.");
@@ -1,89 +0,0 @@
use std::{collections::BTreeMap, time::Instant};
use conduwuit::{
Err, Event, PduEvent, Result, debug::INFO_SPAN_LEVEL, defer, implement,
utils::continue_exponential_backoff_secs,
};
use ruma::{CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch, RoomId, ServerName};
use tracing::debug;
#[implement(super::Service)]
#[allow(clippy::type_complexity)]
#[allow(clippy::too_many_arguments)]
#[tracing::instrument(
name = "prev",
level = INFO_SPAN_LEVEL,
skip_all,
fields(%prev_id),
)]
pub(super) async fn handle_prev_pdu<'a, Pdu>(
&self,
origin: &'a ServerName,
event_id: &'a EventId,
room_id: &'a RoomId,
eventid_info: Option<(PduEvent, BTreeMap<String, CanonicalJsonValue>)>,
create_event: &'a Pdu,
first_ts_in_room: MilliSecondsSinceUnixEpoch,
prev_id: &'a EventId,
) -> Result
where
Pdu: Event + Send + Sync,
{
// Check for disabled again because it might have changed
if self.services.metadata.is_disabled(room_id).await {
return Err!(Request(Forbidden(debug_warn!(
"Federaton of room {room_id} is currently disabled on this server. Request by \
origin {origin} and event ID {event_id}"
))));
}
if let Some((time, tries)) = self
.services
.globals
.bad_event_ratelimiter
.read()
.get(prev_id)
{
// Exponential backoff
const MIN_DURATION: u64 = 5 * 60;
const MAX_DURATION: u64 = 60 * 60 * 24;
if continue_exponential_backoff_secs(MIN_DURATION, MAX_DURATION, time.elapsed(), *tries) {
debug!(
?tries,
duration = ?time.elapsed(),
"Backing off from prev_event"
);
return Ok(());
}
}
let Some((pdu, json)) = eventid_info else {
return Ok(());
};
// Skip old events
if pdu.origin_server_ts() < first_ts_in_room {
return Ok(());
}
let start_time = Instant::now();
self.federation_handletime
.write()
.insert(room_id.into(), ((*prev_id).to_owned(), start_time));
defer! {{
self.federation_handletime
.write()
.remove(room_id);
}};
self.upgrade_outlier_to_timeline_pdu(pdu, json, create_event, origin, room_id)
.await?;
debug!(
elapsed = ?start_time.elapsed(),
"Handled prev_event",
);
Ok(())
}
+1 -2
View File
@@ -4,7 +4,6 @@
mod fetch_state;
mod handle_incoming_pdu;
mod handle_outlier_pdu;
mod handle_prev_pdu;
mod parse_incoming_pdu;
mod policy_server;
mod resolve_state;
@@ -15,13 +14,13 @@
use async_trait::async_trait;
use conduwuit::{Err, Event, PduEvent, Result, Server, SyncRwLock, utils::MutexMap};
pub use fetch_and_handle_outliers::build_local_dag;
use ruma::{
OwnedEventId, OwnedRoomId, RoomId, events::room::create::RoomCreateEventContent,
room_version_rules::RoomVersionRules,
};
use crate::{Dep, globals, rooms, sending, server_keys};
pub struct Service {
pub mutex_federation: RoomMutexMap,
pub federation_handletime: SyncRwLock<HandleTimeMap>,
@@ -56,7 +56,10 @@ fn extract_room_id(event_type: &str, pdu: &CanonicalJsonObject) -> Result<OwnedR
/// Parses every entry in an array as an event ID, returning an error if any
/// step fails.
fn expect_event_id_array(value: &CanonicalJsonObject, field: &str) -> Result<Vec<OwnedEventId>> {
pub(super) fn expect_event_id_array(
value: &CanonicalJsonObject,
field: &str,
) -> Result<Vec<OwnedEventId>> {
value
.get(field)
.ok_or_else(|| err!(Request(BadJson("missing field `{field}` on PDU"))))?
@@ -5,7 +5,7 @@
};
use conduwuit::{
Result, debug, err, implement,
Result, debug, err, error, implement,
matrix::{Event, StateMap},
trace,
utils::stream::{BroadbandExt, IterStream, ReadyExt, TryBroadbandExt, TryWidebandExt},
@@ -121,6 +121,7 @@ pub(super) async fn state_at_incoming_resolved<Pdu>(
.state_resolution(room_version_rules, fork_states.iter(), &auth_chain_sets)
.boxed()
.await
.inspect_err(|e| error!("State resolution failed: {e:?}"))
else {
return Ok(None);
};
@@ -1,14 +1,18 @@
use std::{borrow::Borrow, collections::BTreeMap, iter::once, sync::Arc, time::Instant};
use std::{borrow::Borrow, collections::BTreeMap, sync::Arc, time::Instant};
use conduwuit::{
Err, Result, debug, debug_info, err, implement, info, is_equal_to,
Err, Result, debug, debug_info, debug_warn, err, implement, is_equal_to,
matrix::{Event, EventTypeExt, PduEvent, StateKey, state_res},
trace,
utils::stream::{BroadbandExt, ReadyExt},
utils::{
IterStream,
stream::{BroadbandExt, ReadyExt},
},
warn,
};
use futures::{FutureExt, StreamExt, future::ready};
use ruma::{CanonicalJsonValue, RoomId, ServerName, events::StateEventType};
use tokio::join;
use super::get_room_version_rules;
use crate::rooms::{
@@ -35,16 +39,37 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu<Pdu>(
.get_pdu_id(incoming_pdu.event_id())
.await
{
trace!(event_id=%incoming_pdu.event_id(), "Skipping upgrade of already upgraded PDU");
return Ok(Some(pduid));
}
if self
.services
.pdu_metadata
.is_event_soft_failed(incoming_pdu.event_id())
.await
{
return Err!(Request(InvalidParam("Event has been soft failed")));
let (rejected, soft_failed) = join!(
self.services
.pdu_metadata
.is_event_rejected(incoming_pdu.event_id()),
self.services
.pdu_metadata
.is_event_soft_failed(incoming_pdu.event_id())
);
if rejected {
return Err!(Request(InvalidParam("Event has been rejected")));
} else if soft_failed {
return Err!(Request(InvalidParam("Event has been soft-failed")));
}
// If any of the auth events are rejected, this event is also rejected.
for aid in incoming_pdu.auth_events() {
if self.services.pdu_metadata.is_event_rejected(aid).await {
// TODO: debug_warn instead of warn
warn!(
"Rejecting incoming event {} which depends on rejected auth event {aid}",
incoming_pdu.event_id()
);
self.services
.pdu_metadata
.mark_event_rejected(incoming_pdu.event_id());
return Err!(Request(InvalidParam("Event has rejected auth event: {aid}")));
}
}
debug!(
@@ -70,6 +95,7 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu<Pdu>(
};
if state_at_incoming_event.is_none() {
trace!("Could not calculate incoming state, asking remote {origin} for it");
state_at_incoming_event = self
.fetch_state(origin, create_event, room_id, incoming_pdu.event_id())
.await?;
@@ -95,6 +121,7 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu<Pdu>(
event_id = %incoming_pdu.event_id,
"Running initial auth check"
);
// PDU check: 5
let auth_check = state_res::event_auth::auth_check(
&room_version_rules,
&incoming_pdu,
@@ -106,7 +133,12 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu<Pdu>(
.map_err(|e| err!(Request(Forbidden("Auth check failed: {e:?}"))))?;
if !auth_check {
return Err!(Request(Forbidden("Event has failed auth check with state at the event.")));
self.services
.pdu_metadata
.mark_event_rejected(incoming_pdu.event_id());
return Err!(Request(Forbidden(
"Event authorisation fails based on the state before the event"
)));
}
debug!(
@@ -135,6 +167,7 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu<Pdu>(
event_id = %incoming_pdu.event_id,
"Running auth check with claimed state auth"
);
// PDU check: 6
let auth_check = state_res::event_auth::auth_check(
&room_version_rules,
&incoming_pdu,
@@ -144,6 +177,12 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu<Pdu>(
)
.await
.map_err(|e| err!(Request(Forbidden("Auth check failed: {e:?}"))))?;
if !auth_check {
warn!(
event_id = %incoming_pdu.event_id,
"Event authentication fails based on the current state of the room"
);
}
// Soft fail check before doing state res
debug!(
@@ -153,16 +192,22 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu<Pdu>(
let mut soft_fail = match (auth_check, incoming_pdu.redacts_id(&room_version_rules)) {
| (false, _) => true,
| (true, None) => false,
| (true, Some(redact_id)) =>
!self
| (true, Some(redact_id)) => {
if !self
.services
.state_accessor
.user_can_redact(&redact_id, incoming_pdu.sender(), room_id, true)
.await?,
.await?
{
warn!(redacts = %redact_id, "User is not allowed to redact event");
true
} else {
false
}
},
};
// 13. Use state resolution to find new room state
// We start looking at current room state now, so lets lock the room
trace!(
room_id = %room_id,
@@ -170,36 +215,6 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu<Pdu>(
);
let state_lock = self.services.state.mutex.lock(room_id).await;
// Now we calculate the set of extremities this room has after the incoming
// event has been applied. We start with the previous extremities (aka leaves)
trace!("Calculating extremities");
let mut extremities: Vec<_> = self
.services
.state
.get_forward_extremities(room_id)
.ready_filter(|event_id| {
// Remove any that are referenced by this incoming event's prev_events
!incoming_pdu.prev_events().any(is_equal_to!(event_id))
})
.broad_filter_map(|event_id| async move {
// Only keep those extremities were not referenced yet
self.services
.pdu_metadata
.is_event_referenced(room_id, &event_id)
.await
.eq(&false)
.then_some(event_id)
})
.collect()
.await;
extremities.push(incoming_pdu.event_id().to_owned());
debug!(
"Retained {} extremities checked against {} prev_events",
extremities.len(),
incoming_pdu.prev_events().count()
);
let state_ids_compressed: Arc<CompressedState> = self
.services
.state_compressor
@@ -294,60 +309,57 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu<Pdu>(
.is_event_soft_failed(&redact_id)
.await
{
// TODO: This should avoid pushing the event to the timeline instead of using
// soft-fails as a hack
warn!(
redact_id = %redact_id,
"Redaction is for a soft-failed event, soft failing the redaction"
"Redaction is for a soft-failed event"
);
soft_fail = true;
}
}
}
// 14. Check if the event passes auth based on the "current state" of the room,
// if not soft fail it
if soft_fail {
info!(
event_id = %incoming_pdu.event_id,
"Soft failing event"
);
// assert!(extremities.is_empty(), "soft_fail extremities empty");
let extremities = extremities.iter().map(Borrow::borrow);
debug_assert!(extremities.clone().count() > 0, "extremities not empty");
self.services
.timeline
.append_incoming_pdu(
&incoming_pdu,
val,
extremities,
state_ids_compressed,
soft_fail,
&state_lock,
room_id,
)
.await?;
// Soft fail, we keep the event as an outlier but don't add it to the timeline
self.services
.pdu_metadata
.mark_event_soft_failed(incoming_pdu.event_id());
warn!(
event_id = %incoming_pdu.event_id,
"Event was soft failed"
);
return Err!(Request(InvalidParam("Event has been soft failed")));
}
// Now that the event has passed all auth it is added into the timeline.
// We use the `state_at_event` instead of `state_after` so we accurately
// represent the state for this event.
trace!("Appending pdu to timeline");
let extremities = extremities
.iter()
.map(Borrow::borrow)
.chain(once(incoming_pdu.event_id()));
debug_assert!(extremities.clone().count() > 0, "extremities not empty");
let mut extremities: Vec<_> = self
.services
.state
.get_forward_extremities(room_id)
.collect()
.await;
if !soft_fail {
// Per https://spec.matrix.org/unstable/server-server-api/#soft-failure, soft-failed events
// are not added as forward extremities.
// Now we calculate the set of extremities this room has after the incoming
// event has been applied. We start with the previous extremities (aka leaves)
trace!("Calculating extremities");
extremities = extremities
.into_iter()
.stream()
.ready_filter(|event_id| {
// Remove any that are referenced by this incoming event's prev_events
!incoming_pdu.prev_events().any(is_equal_to!(event_id))
})
.broad_filter_map(|event_id| async move {
// Only keep those extremities were not referenced yet
self.services
.pdu_metadata
.is_event_referenced(room_id, &event_id)
.await
.eq(&false)
.then_some(event_id)
})
.collect::<Vec<_>>()
.await;
extremities.push(incoming_pdu.event_id().to_owned());
debug!(
"Retained {} extremities checked against {} prev_events",
extremities.len(),
incoming_pdu.prev_events().count()
);
assert!(!extremities.is_empty(), "extremities must not empty");
}
let pdu_id = self
.services
@@ -355,20 +367,30 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu<Pdu>(
.append_incoming_pdu(
&incoming_pdu,
val,
extremities,
extremities.iter().map(Borrow::borrow),
state_ids_compressed,
soft_fail,
&state_lock,
room_id,
)
.await?;
if soft_fail {
self.services
.pdu_metadata
.mark_event_soft_failed(incoming_pdu.event_id());
debug_warn!(
elapsed = ?timer.elapsed(),
"Event has been soft-failed",
);
} else {
debug_info!(
elapsed = ?timer.elapsed(),
"Accepted",
);
}
// Event has passed all auth/stateres checks
drop(state_lock);
debug_info!(
elapsed = ?timer.elapsed(),
"Accepted",
);
Ok(pdu_id)
}
+9 -2
View File
@@ -34,7 +34,7 @@
use crate::{
Dep, antispam, globals,
rooms::{
metadata, outlier, short,
metadata, outlier, pdu_metadata, short,
state::{self, RoomMutexGuard},
state_accessor, state_cache,
state_compressor::{self, CompressedState, HashSetCompressStateEvent},
@@ -54,6 +54,7 @@ struct Services {
globals: Dep<globals::Service>,
metadata: Dep<metadata::Service>,
outlier: Dep<outlier::Service>,
pdu_metadata: Dep<pdu_metadata::Service>,
sending: Dep<sending::Service>,
server_keys: Dep<server_keys::Service>,
short: Dep<short::Service>,
@@ -75,6 +76,7 @@ fn build(args: crate::Args<'_>) -> Result<Arc<Self>> {
globals: args.depend::<globals::Service>("globals"),
metadata: args.depend::<metadata::Service>("metadata"),
outlier: args.depend::<outlier::Service>("rooms::outlier"),
pdu_metadata: args.depend::<pdu_metadata::Service>("rooms::pdu_metadata"),
sending: args.depend::<sending::Service>("sending"),
server_keys: args.depend::<server_keys::Service>("server_keys"),
short: args.depend::<short::Service>("rooms::short"),
@@ -240,6 +242,7 @@ async fn join_local_room(
let mut content = RoomMemberEventContent::new(MembershipState::Join);
content.displayname = self.services.users.displayname(sender_user).await.ok();
content.avatar_url = self.services.users.avatar_url(sender_user).await.ok();
content.blurhash = self.services.users.blurhash(sender_user).await.ok();
content.reason.clone_from(&reason);
content.join_authorized_via_users_server = auth_user;
@@ -285,7 +288,7 @@ async fn join_local_room(
}
#[tracing::instrument(skip_all, fields(%sender_user, %room_id), name = "join_remote_room", level = "info")]
async fn join_remote_room(
pub async fn join_remote_room(
&self,
sender_user: &UserId,
room_id: &RoomId,
@@ -293,6 +296,7 @@ async fn join_remote_room(
servers: &[OwnedServerName],
state_lock: RoomMutexGuard,
) -> Result {
// public so the admin command force-join-room-remotely works
info!("Joining {room_id} over federation.");
let (make_join_response, remote_server) = self
@@ -350,6 +354,7 @@ async fn join_remote_room(
let mut join_content = RoomMemberEventContent::new(MembershipState::Join);
join_content.displayname = self.services.users.displayname(sender_user).await.ok();
join_content.avatar_url = self.services.users.avatar_url(sender_user).await.ok();
join_content.blurhash = self.services.users.blurhash(sender_user).await.ok();
join_content.reason = reason;
join_content
.join_authorized_via_users_server
@@ -512,6 +517,7 @@ async fn join_remote_room(
return state;
}
self.services.outlier.add_pdu_outlier(&event_id, &value);
self.services.pdu_metadata.clear_pdu_markers(&event_id);
if let Some(state_key) = &pdu.state_key {
let shortstatekey = self
.services
@@ -543,6 +549,7 @@ async fn join_remote_room(
.ready_for_each(|(event_id, value)| {
trace!(%event_id, "Adding PDU as an outlier from send_join auth_chain");
self.services.outlier.add_pdu_outlier(&event_id, &value);
self.services.pdu_metadata.clear_pdu_markers(&event_id);
})
.await;
+24
View File
@@ -26,6 +26,7 @@ pub(super) struct Data {
tofrom_relation: Arc<Map>,
referencedevents: Arc<Map>,
softfailedeventids: Arc<Map>,
rejectedeventids: Arc<Map>,
services: Services,
}
@@ -40,6 +41,7 @@ pub(super) fn new(args: &crate::Args<'_>) -> Self {
tofrom_relation: db["tofrom_relation"].clone(),
referencedevents: db["referencedevents"].clone(),
softfailedeventids: db["softfailedeventids"].clone(),
rejectedeventids: db["rejectedeventids"].clone(),
services: Services {
timeline: args.depend::<rooms::timeline::Service>("rooms::timeline"),
},
@@ -118,7 +120,29 @@ pub(super) fn mark_event_soft_failed(&self, event_id: &EventId) {
self.softfailedeventids.insert(event_id, []);
}
pub(super) fn unmark_event_soft_failed(&self, event_id: &EventId) {
self.softfailedeventids.remove(event_id);
}
pub(super) async fn is_event_soft_failed(&self, event_id: &EventId) -> bool {
self.softfailedeventids.get(event_id).await.is_ok()
}
pub(super) fn mark_event_rejected(&self, event_id: &EventId) {
self.rejectedeventids.insert(event_id, []);
}
pub(super) fn unmark_event_rejected(&self, event_id: &EventId) {
self.rejectedeventids.remove(event_id);
}
pub(super) async fn is_event_rejected(&self, event_id: &EventId) -> bool {
self.rejectedeventids.get(event_id).await.is_ok()
}
/// Removes any soft-fail or rejection markers applied to the target PDU
pub(super) fn clear_pdu_markers(&self, event_id: &EventId) {
self.unmark_event_rejected(event_id);
self.unmark_event_soft_failed(event_id);
}
}
+24
View File
@@ -140,4 +140,28 @@ pub fn mark_event_soft_failed(&self, event_id: &EventId) {
pub async fn is_event_soft_failed(&self, event_id: &EventId) -> bool {
self.db.is_event_soft_failed(event_id).await
}
pub async fn is_event_rejected(&self, event_id: &EventId) -> bool {
self.db.is_event_rejected(event_id).await
}
pub fn mark_event_rejected(&self, event_id: &EventId) {
self.db.mark_event_rejected(event_id);
}
pub fn unmark_event_soft_failed(&self, event_id: &EventId) {
self.db.unmark_event_soft_failed(event_id);
}
pub fn unmark_event_rejected(&self, event_id: &EventId) {
self.db.unmark_event_rejected(event_id);
}
/// Returns true if the event is neither soft-failed nor rejected.
pub async fn is_event_accepted(&self, event_id: &EventId) -> bool {
!self.db.is_event_rejected(event_id).await
&& !self.db.is_event_soft_failed(event_id).await
}
pub fn clear_pdu_markers(&self, event_id: &EventId) { self.db.clear_pdu_markers(event_id); }
}
+1 -9
View File
@@ -53,15 +53,7 @@ pub async fn append_incoming_pdu<'a, Leaves>(
.await?;
if soft_fail {
self.services
.pdu_metadata
.mark_as_referenced(room_id, pdu.prev_events.iter().map(AsRef::as_ref));
// self.services
// .state
// .set_forward_extremities(room_id, new_room_leaves, state_lock)
// .await;
// Nothing else to do with a soft-failed event.
return Ok(None);
}
+17
View File
@@ -79,6 +79,7 @@ struct Data {
userdeviceid_token: Arc<Map>,
userfilterid_filter: Arc<Map>,
userid_avatarurl: Arc<Map>,
userid_blurhash: Arc<Map>,
userid_dehydrateddevice: Arc<Map>,
userid_devicelistversion: Arc<Map>,
userid_displayname: Arc<Map>,
@@ -119,6 +120,7 @@ fn build(args: crate::Args<'_>) -> Result<Arc<Self>> {
userdeviceid_token: args.db["userdeviceid_token"].clone(),
userfilterid_filter: args.db["userfilterid_filter"].clone(),
userid_avatarurl: args.db["userid_avatarurl"].clone(),
userid_blurhash: args.db["userid_blurhash"].clone(),
userid_dehydrateddevice: args.db["userid_dehydrateddevice"].clone(),
userid_devicelistversion: args.db["userid_devicelistversion"].clone(),
userid_displayname: args.db["userid_displayname"].clone(),
@@ -428,6 +430,20 @@ pub fn set_avatar_url(&self, user_id: &UserId, avatar_url: Option<OwnedMxcUri>)
}
}
/// Get the blurhash of a user.
pub async fn blurhash(&self, user_id: &UserId) -> Result<String> {
self.db.userid_blurhash.get(user_id).await.deserialized()
}
/// Sets a new avatar_url or removes it if avatar_url is None.
pub fn set_blurhash(&self, user_id: &UserId, blurhash: Option<String>) {
if let Some(blurhash) = blurhash {
self.db.userid_blurhash.insert(user_id, blurhash);
} else {
self.db.userid_blurhash.remove(user_id);
}
}
/// Adds a new device to a user.
pub async fn create_device(
&self,
@@ -1363,6 +1379,7 @@ pub fn set_profile_key(
pub async fn clear_profile(&self, user_id: &UserId) {
self.set_displayname(user_id, None);
self.set_avatar_url(user_id, None);
self.set_blurhash(user_id, None);
self.all_profile_keys(user_id)
.ready_for_each(|(key, _)| self.set_profile_key(user_id, &key, None))
.await;
+1 -1
View File
@@ -3,7 +3,7 @@
/// A reusable form component with field validation.
#[derive(Debug, Template)]
#[template(path = "_components/form.html.j2")]
#[template(path = "_components/form.html.j2", print = "code")]
pub(crate) struct Form<'a> {
pub inputs: Vec<FormInput<'a>>,
pub validation_errors: Option<ValidationErrors>,