Merge branch 'develop' into madlittlemods/remove-flawed-msc4311-partial-implementation

This commit is contained in:
Eric Eastwood
2026-05-01 18:23:10 -05:00
55 changed files with 2055 additions and 383 deletions
+1
View File
@@ -7,6 +7,7 @@ updates:
package-ecosystem: "pip"
directory: "/"
open-pull-requests-limit: 10
versioning-strategy: "increase-if-necessary"
schedule:
interval: "weekly"
# Group patch updates to packages together into a single PR, as they rarely
+9 -9
View File
@@ -41,13 +41,13 @@ jobs:
echo "SYNAPSE_VERSION=$(grep "^version" pyproject.toml | sed -E 's/version\s*=\s*["]([^"]*)["]/\1/')" >> $GITHUB_ENV
- name: Log in to DockerHub
uses: docker/login-action@b45d80f862d83dbcd57f89517bcf500b2ab88fb2 # v4.0.0
uses: docker/login-action@4907a6ddec9925e35a0a9e82d7399ccc52663121 # v4.1.0
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Log in to GHCR
uses: docker/login-action@b45d80f862d83dbcd57f89517bcf500b2ab88fb2 # v4.0.0
uses: docker/login-action@4907a6ddec9925e35a0a9e82d7399ccc52663121 # v4.1.0
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
@@ -79,7 +79,7 @@ jobs:
services/backend-repositories/secret/data/oci.element.io password | OCI_PASSWORD ;
- name: Login to Element OCI Registry
uses: docker/login-action@b45d80f862d83dbcd57f89517bcf500b2ab88fb2 # v4.0.0
uses: docker/login-action@4907a6ddec9925e35a0a9e82d7399ccc52663121 # v4.1.0
with:
registry: oci-push.vpn.infra.element.io
username: ${{ steps.import-secrets.outputs.OCI_USERNAME }}
@@ -87,7 +87,7 @@ jobs:
- name: Build and push by digest
id: build
uses: docker/build-push-action@d08e5c354a6adb9ed34480a06d141179aa583294 # v7.0.0
uses: docker/build-push-action@bcafcacb16a39f128d818304e6c9c0c18556b85f # v7.1.0
with:
push: true
labels: |
@@ -108,7 +108,7 @@ jobs:
touch "${{ runner.temp }}/digests/${digest#sha256:}"
- name: Upload digest
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0
uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1
with:
name: digests-${{ matrix.suffix }}
path: ${{ runner.temp }}/digests/*
@@ -136,14 +136,14 @@ jobs:
merge-multiple: true
- name: Log in to DockerHub
uses: docker/login-action@b45d80f862d83dbcd57f89517bcf500b2ab88fb2 # v4.0.0
uses: docker/login-action@4907a6ddec9925e35a0a9e82d7399ccc52663121 # v4.1.0
if: ${{ startsWith(matrix.repository, 'docker.io') }}
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Log in to GHCR
uses: docker/login-action@b45d80f862d83dbcd57f89517bcf500b2ab88fb2 # v4.0.0
uses: docker/login-action@4907a6ddec9925e35a0a9e82d7399ccc52663121 # v4.1.0
if: ${{ startsWith(matrix.repository, 'ghcr.io') }}
with:
registry: ghcr.io
@@ -176,7 +176,7 @@ jobs:
services/backend-repositories/secret/data/oci.element.io password | OCI_PASSWORD ;
- name: Login to Element OCI Registry
uses: docker/login-action@b45d80f862d83dbcd57f89517bcf500b2ab88fb2 # v4.0.0
uses: docker/login-action@4907a6ddec9925e35a0a9e82d7399ccc52663121 # v4.1.0
with:
registry: oci-push.vpn.infra.element.io
username: ${{ steps.import-secrets.outputs.OCI_USERNAME }}
@@ -186,7 +186,7 @@ jobs:
uses: docker/setup-buildx-action@4d04d5d9486b7bd6fa91e7baf45bbb4f8b9deedd # v4.0.0
- name: Install Cosign
uses: sigstore/cosign-installer@faadad0cce49287aee09b3a48701e75088a2c6ad # v4.0.0
uses: sigstore/cosign-installer@cad07c2e89fa2edd6e2d7bab4c1aa38e53f76003 # v4.1.1
- name: Calculate docker image tag
uses: docker/metadata-action@030e881283bb7a6894de51c315a6bfe6a94e05cf # v6.0.0
+1 -1
View File
@@ -39,7 +39,7 @@ jobs:
cp book/welcome_and_overview.html book/index.html
- name: Upload Artifact
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0
uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1
with:
name: book
path: book
+1 -1
View File
@@ -172,7 +172,7 @@ jobs:
if: ${{ always() }}
run: /sytest/scripts/tap_to_gha.pl /logs/results.tap
- name: Upload SyTest logs
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0
uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1
if: ${{ always() }}
with:
name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.*, ', ') }})
+1 -1
View File
@@ -52,7 +52,7 @@ jobs:
with:
poetry-version: "2.2.1"
- name: Login to registry
uses: docker/login-action@b45d80f862d83dbcd57f89517bcf500b2ab88fb2 # v4.0.0
uses: docker/login-action@4907a6ddec9925e35a0a9e82d7399ccc52663121 # v4.1.0
with:
registry: ghcr.io
username: ${{ github.actor }}
+4 -4
View File
@@ -64,7 +64,7 @@ jobs:
uses: docker/setup-buildx-action@4d04d5d9486b7bd6fa91e7baf45bbb4f8b9deedd # v4.0.0
- name: Set up docker layer caching
uses: actions/cache@668228422ae6a00e4ad889ee87cd7109ec5666a7 # v5.0.4
uses: actions/cache@27d5ce7f107fe9357f9df03efb73ab90386fccae # v5.0.5
with:
path: /tmp/.buildx-cache
key: ${{ runner.os }}-buildx-${{ github.sha }}
@@ -99,7 +99,7 @@ jobs:
echo "ARTIFACT_NAME=${DISTRO#*:}" >> "$GITHUB_OUTPUT"
- name: Upload debs as artifacts
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0
uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1
with:
name: debs-${{ steps.artifact-name.outputs.ARTIFACT_NAME }}
path: debs/*
@@ -150,7 +150,7 @@ jobs:
# musl: (TODO: investigate).
CIBW_TEST_SKIP: pp3*-* *musl*
- uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0
- uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1
with:
name: Wheel-${{ matrix.os }}
path: ./wheelhouse/*.whl
@@ -171,7 +171,7 @@ jobs:
- name: Build sdist
run: python -m build --sdist
- uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0
- uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1
with:
name: Sdist
path: dist/*.tar.gz
+3 -3
View File
@@ -174,7 +174,7 @@ jobs:
# Cribbed from
# https://github.com/AustinScola/mypy-cache-github-action/blob/85ea4f2972abed39b33bd02c36e341b28ca59213/src/restore.ts#L10-L17
- name: Restore/persist mypy's cache
uses: actions/cache@668228422ae6a00e4ad889ee87cd7109ec5666a7 # v5.0.4
uses: actions/cache@27d5ce7f107fe9357f9df03efb73ab90386fccae # v5.0.5
with:
path: |
.mypy_cache
@@ -561,7 +561,7 @@ jobs:
if: ${{ always() }}
run: /sytest/scripts/tap_to_gha.pl /logs/results.tap
- name: Upload SyTest logs
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0
uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1
if: ${{ always() }}
with:
name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.job.*, ', ') }})
@@ -658,7 +658,7 @@ jobs:
PGPASSWORD: postgres
PGDATABASE: postgres
- name: "Upload schema differences"
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0
uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1
if: ${{ failure() && !cancelled() && steps.run_tester_script.outcome == 'failure' }}
with:
name: Schema dumps
+1 -1
View File
@@ -145,7 +145,7 @@ jobs:
if: ${{ always() }}
run: /sytest/scripts/tap_to_gha.pl /logs/results.tap
- name: Upload SyTest logs
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0
uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1
if: ${{ always() }}
with:
name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.*, ', ') }})
+8 -4
View File
@@ -1,20 +1,24 @@
# Synapse 1.152.0rc1 (2026-04-22)
# Synapse 1.152.0 (2026-04-28)
No significant changes since 1.152.0rc1.
## Configuration changes needed for deployments using workers
For deployments using workers, please note that this version introduces a new `quarantined_media_changes` stream writer, which may require configuration changes.
Please see the [the relevant section in the upgrade notes](https://github.com/element-hq/synapse/blob/develop/docs/upgrade.md#upgrading-to-v11520) for details.
Without configuring this new stream writer, only the main process will be able to handle the `/media/quarantine` admin API endpoints for quarantining media.
# Synapse 1.152.0rc1 (2026-04-22)
## Features
- Add a ["Listing quarantined media changes" Admin API](https://element-hq.github.io/synapse/latest/admin_api/media_admin_api.html#listing-quarantined-media-changes) for retrieving a paginated record of when media became (un)quarantined. ([\#19558](https://github.com/element-hq/synapse/issues/19558), [\#19677](https://github.com/element-hq/synapse/issues/19677))
- Add a ["Listing quarantined media changes" Admin API](https://element-hq.github.io/synapse/latest/admin_api/media_admin_api.html#listing-quarantined-media-changes) for retrieving a paginated record of when media became (un)quarantined. ([\#19558](https://github.com/element-hq/synapse/issues/19558), [\#19677](https://github.com/element-hq/synapse/issues/19677), [\#19694](https://github.com/element-hq/synapse/issues/19694))
- Advertise [MSC4445](https://github.com/matrix-org/matrix-spec-proposals/pull/4445) sync timeline order in `unstable_features`. ([\#19642](https://github.com/element-hq/synapse/issues/19642))
- Report the Rust compiler version used in the Prometheus metrics. Contributed by Noah Markert. ([\#19643](https://github.com/element-hq/synapse/issues/19643))
- Passthrough 'article' and 'profile' OpenGraph metadata on URL preview requests. ([\#19659](https://github.com/element-hq/synapse/issues/19659))
- Add a way to re-sign local events with a new signing key. ([\#19668](https://github.com/element-hq/synapse/issues/19668))
- Support [MSC4450: Identity Provider selection for User-Interactive Authentication with Legacy Single Sign-On](https://github.com/matrix-org/matrix-spec-proposals/pull/4450). ([\#19693](https://github.com/element-hq/synapse/issues/19693))
- Add a ["Listing quarantined media changes" Admin API](https://element-hq.github.io/synapse/latest/admin_api/media_admin_api.html#listing-quarantined-media-changes) for retrieving a paginated record of when media became (un)quarantined. ([\#19694](https://github.com/element-hq/synapse/issues/19694))
- Add experimental support for [MSC4242](https://github.com/matrix-org/matrix-spec-proposals/pull/4242): State DAGs. Excludes federation support. ([\#19424](https://github.com/element-hq/synapse/issues/19424))
- Adds [Admin API](https://element-hq.github.io/synapse/latest/usage/administration/admin_api/index.html) endpoints to
list, fetch and delete user reports. ([\#19657](https://github.com/element-hq/synapse/issues/19657))
@@ -30,7 +34,7 @@ Without configuring this new stream writer, only the main process will be able t
- Include a workaround for running the unit tests with SQLite under recent versions of MacOS. ([\#19615](https://github.com/element-hq/synapse/issues/19615))
- Fix Docker image link typo in worker docs. ([\#19645](https://github.com/element-hq/synapse/issues/19645))
- Update developer stream docs for creating a new stream to point out `_setup_sequence(...)` in `portdb`. ([\#19675](https://github.com/element-hq/synapse/issues/19675))
- Update the developer stream docs for creating a new stream to point out `_setup_sequence(...)` in `portdb`. ([\#19675](https://github.com/element-hq/synapse/issues/19675))
- Update the developer stream docs for creating a new stream to highlight places that require documentation updates. ([\#19696](https://github.com/element-hq/synapse/issues/19696))
## Internal Changes
Generated
+18 -2
View File
@@ -164,6 +164,12 @@ dependencies = [
"syn",
]
[[package]]
name = "either"
version = "1.15.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719"
[[package]]
name = "equivalent"
version = "1.0.2"
@@ -662,6 +668,15 @@ dependencies = [
"serde",
]
[[package]]
name = "itertools"
version = "0.14.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2b192c782037fadd9cfa75548310488aabdbf3d2da73885b31bd0abd03351285"
dependencies = [
"either",
]
[[package]]
name = "itoa"
version = "1.0.15"
@@ -1125,9 +1140,9 @@ dependencies = [
[[package]]
name = "rustls-webpki"
version = "0.103.10"
version = "0.103.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "df33b2b81ac578cabaf06b89b0631153a3f416b0a886e8a7a1707fb51abbd1ef"
checksum = "61c429a8649f110dddef65e2a5ad240f747e85f7758a6bccc7e5777bd33f756e"
dependencies = [
"ring",
"rustls-pki-types",
@@ -1336,6 +1351,7 @@ dependencies = [
"http",
"http-body-util",
"icu_segmenter",
"itertools",
"lazy_static",
"log",
"mime",
+1
View File
@@ -0,0 +1 @@
Make ACLs apply to EDUs per [MSC4163](https://github.com/matrix-org/matrix-spec-proposals/pull/4163).
+1
View File
@@ -0,0 +1 @@
Allow user requested erasure to succeed even if Synapse has disabled profile changes. Contributed by Famedly.
+1
View File
@@ -0,0 +1 @@
Fix Synapse not backfilling new history when attempting to use a pagination token near a backward extremity.
+1
View File
@@ -0,0 +1 @@
Add warning about known problems when configuring `use_frozen_dicts`.
+1
View File
@@ -0,0 +1 @@
Have SSS return a new response immediately if a room subscription have changed and produced a new response.
+1
View File
@@ -0,0 +1 @@
Stabilize MSC3266, removing the experimental config flag `msc3266_enabled`. Add support for stable room summary endpoints. Contributed by @dasha-uwu.
+1
View File
@@ -0,0 +1 @@
Partial [MSC4311](https://github.com/matrix-org/matrix-spec-proposals/pull/4311) implementation: `m.room.create` is now a required part of stripped `invite_state`/`knock_state` . Contributed by @FrenchGithubUser @Famedly.
+1
View File
@@ -0,0 +1 @@
Fix a bug where when upgrading a room to v12 the power level event in the old room got mutated to remove the user upgrading the room's power.
+1
View File
@@ -0,0 +1 @@
Exposes `tombstoned` and `replacement_room` in room details on admin API endpoint `GET /_synapse/admin/v1/rooms/<room_id>`. Contributed by Noah Markert.
+1
View File
@@ -0,0 +1 @@
Add a Rust canonical JSON serializer.
+1
View File
@@ -0,0 +1 @@
Fix packaging for Fedora and EPEL caused by unnecessary bumping `authlib` minimum version requirement in `pyproject.toml` file. Contributed by Oleg Girko.
+1
View File
@@ -0,0 +1 @@
Configure Dependabot to only update Python dependencies in the lockfile, unless widening upper bounds.
+6 -5
View File
@@ -7,6 +7,7 @@ toolchain go1.24.4
require (
github.com/matrix-org/complement v0.0.0-20251120181401-44111a2a8a9d
github.com/matrix-org/gomatrixserverlib v0.0.0-20250813150445-9f5070a65744
github.com/tidwall/gjson v1.18.0
)
require (
@@ -22,6 +23,7 @@ require (
require (
github.com/Microsoft/go-winio v0.6.2 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/containerd/errdefs v1.0.0 // indirect
github.com/containerd/errdefs/pkg v0.3.0 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
@@ -40,16 +42,15 @@ require (
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/sirupsen/logrus v1.9.3 // indirect
github.com/tidwall/gjson v1.18.0 // indirect
github.com/tidwall/match v1.1.1 // indirect
github.com/tidwall/pretty v1.2.1 // indirect
github.com/tidwall/sjson v1.2.5 // indirect
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
go.opentelemetry.io/auto/sdk v1.2.1 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect
go.opentelemetry.io/otel v1.36.0 // indirect
go.opentelemetry.io/otel v1.41.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0 // indirect
go.opentelemetry.io/otel/metric v1.36.0 // indirect
go.opentelemetry.io/otel/trace v1.36.0 // indirect
go.opentelemetry.io/otel/metric v1.41.0 // indirect
go.opentelemetry.io/otel/trace v1.41.0 // indirect
go.opentelemetry.io/proto/otlp v1.7.0 // indirect
golang.org/x/crypto v0.45.0 // indirect
golang.org/x/sys v0.38.0 // indirect
+12 -10
View File
@@ -4,6 +4,8 @@ github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERo
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI=
github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M=
github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE=
@@ -81,8 +83,8 @@ github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
github.com/tidwall/gjson v1.14.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY=
github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
@@ -95,24 +97,24 @@ github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY=
github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q=
go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg=
go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E=
go.opentelemetry.io/otel v1.41.0 h1:YlEwVsGAlCvczDILpUXpIpPSL/VPugt7zHThEMLce1c=
go.opentelemetry.io/otel v1.41.0/go.mod h1:Yt4UwgEKeT05QbLwbyHXEwhnjxNO6D8L5PQP51/46dE=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0 h1:dNzwXjZKpMpE2JhmO+9HsPl42NIXFIFSUSSs0fiqra0=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0/go.mod h1:90PoxvaEB5n6AOdZvi+yWJQoE95U8Dhhw2bSyRqnTD0=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.34.0 h1:BEj3SPM81McUZHYjRS5pEgNgnmzGJ5tRpU5krWnV8Bs=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.34.0/go.mod h1:9cKLGBDzI/F3NoHLQGm4ZrYdIHsvGt6ej6hUowxY0J4=
go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE=
go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs=
go.opentelemetry.io/otel/metric v1.41.0 h1:rFnDcs4gRzBcsO9tS8LCpgR0dxg4aaxWlJxCno7JlTQ=
go.opentelemetry.io/otel/metric v1.41.0/go.mod h1:xPvCwd9pU0VN8tPZYzDZV/BMj9CM9vs00GuBjeKhJps=
go.opentelemetry.io/otel/sdk v1.36.0 h1:b6SYIuLRs88ztox4EyrvRti80uXIFy+Sqzoh9kFULbs=
go.opentelemetry.io/otel/sdk v1.36.0/go.mod h1:+lC+mTgD+MUWfjJubi2vvXWcVxyr9rmlshZni72pXeY=
go.opentelemetry.io/otel/sdk/metric v1.36.0 h1:r0ntwwGosWGaa0CrSt8cuNuTcccMXERFwHX4dThiPis=
go.opentelemetry.io/otel/sdk/metric v1.36.0/go.mod h1:qTNOhFDfKRwX0yXOqJYegL5WRaW376QbB7P4Pb0qva4=
go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w=
go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA=
go.opentelemetry.io/otel/trace v1.41.0 h1:Vbk2co6bhj8L59ZJ6/xFTskY+tGAbOnCtQGVVa9TIN0=
go.opentelemetry.io/otel/trace v1.41.0/go.mod h1:U1NU4ULCoxeDKc09yCWdWe+3QoyweJcISEVa1RBzOis=
go.opentelemetry.io/proto/otlp v1.7.0 h1:jX1VolD6nHuFzOYso2E73H85i92Mv8JQYk0K9vz09os=
go.opentelemetry.io/proto/otlp v1.7.0/go.mod h1:fSKjH6YJ7HDlwzltzyMj036AJ3ejJLCgCSHGj4efDDo=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+394
View File
@@ -0,0 +1,394 @@
// This file is licensed under the Affero General Public License (AGPL) version 3.
//
// Copyright (C) 2026 Element Creations Ltd
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// See the GNU Affero General Public License for more details:
// <https://www.gnu.org/licenses/agpl-3.0.html>.
package synapse_tests
import (
"encoding/json"
"fmt"
"net/url"
"slices"
"strings"
"testing"
"github.com/matrix-org/complement"
"github.com/matrix-org/complement/b"
"github.com/matrix-org/complement/client"
"github.com/matrix-org/complement/helpers"
"github.com/matrix-org/gomatrixserverlib/spec"
"github.com/tidwall/gjson"
)
func TestMessagesOverFederation(t *testing.T) {
deployment := complement.Deploy(t, 2)
defer deployment.Destroy(t)
alice := deployment.Register(t, "hs1", helpers.RegistrationOpts{
LocalpartSuffix: "alice",
})
bob := deployment.Register(t, "hs2", helpers.RegistrationOpts{
LocalpartSuffix: "bob",
})
// The typical convention to find backfill points is from the backward extremities in
// the DAG. Backward extremities are the oldest events we know of in the room but we
// only know of them because some other event referenced them by prev_event and aren't
// known to the homeserver yet (meaning we don't know their depth specifically). So we
// can only do approximate depth comparisons (use the depth of the known events
// they're connected to). And we don't know if those backward extremities point to a
// long chain/fork of history that could stretch back far enough to be visible.
//
// This means a naive homeserver implementation that looks for backward extremities <=
// depth of the `/messages?dir=b&from=xxx` token may overlook a backfill point that could
// reveal more history in the window the user is currently paginating in.
//
// This could be a near miss as this test is specifically stressing or a more deep miss
// as the backward extremity could reveal an entire fork of history that stretches
// back far enough to be visible.
//
// In Synapse, we consider "nearby" as anything within range of the `limit` specified
// in `/messages?dir=b&from=xxx&limit=xxx`.
//
// This test lives in our in-repo Complement tests for Synapse because the Matrix spec
// doesn't have any rules for how a homeserver should backfill. Practically speaking,
// homeservers that don't do anything for this problem will just hide messages from
// clients. This underscores the fact why it's necessary for homeservers to indicate that
// there is a gap (using MSC3871) at the very least.
//
// --------------------------------------------------
//
// Even with MSC3871 gaps, the tested behavior here is necessary as the gap prev/next
// tokens point before/after the event (remember: tokens are positions between
// events), so if you use `/messages?dir=b&from=<gap prev_pagination_token>`, we can't
// rely on naive depth comparison. MSC3871 Complement tests will also exercise this.
// Example:
//
// t0 t1 t2 t3 t4
// [A] <--- [B] <--- [C] <--- [bob join 4]
//
// When Bob calls `/messages?dir=b&backfill=false`, he sees a gap (`{ event_id: "bob
// join 4", prev_pagination_token: "t3", next_pagination_token: "t4" }`) and tries to
// fill it in with `/messages?dir=b&from=t3&limit=10&backfill=true`. To find backfill
// points, Synapse will compare `t3` with the backward extremity at an approximate
// depth of 4. Which is why we take `t3`, add the `limit=10` and then do the
// comparison (find any backfill points with an approximate depth <= 13).
t.Run("Backfill from nearby backward extremities past token", func(t *testing.T) {
// Alice creates the room
roomID := alice.MustCreateRoom(t, map[string]interface{}{
// The `public_chat` preset includes `history_visibility: "shared"` ("Previous
// events are always accessible to newly joined members. All events in the
// room are accessible, even those sent when the member was not a part of the
// room."), which is what we want to test.
"preset": "public_chat",
})
// Keep track of the order
eventIDs := make([]string, 0)
// Map from event_id to event info
eventMap := make(map[string]EventInfo)
// Send some message history into the room
numberOfMessagesToSend := 3
messageDrafts := make([]MessageDraft, 0, numberOfMessagesToSend)
for i := 0; i < numberOfMessagesToSend; i++ {
messageDrafts = append(
messageDrafts,
MessageDraft{alice, fmt.Sprintf("message history %d", i+1)},
)
}
sendAndTrackMessages(t, roomID, messageDrafts, &eventIDs, &eventMap)
// Bob joins the room
bob.MustJoinRoom(t, roomID, []spec.ServerName{
deployment.GetFullyQualifiedHomeserverName(t, "hs1"),
})
bobJoinEventID := getStateID(t, bob, roomID, "m.room.member", bob.UserID)
// Make it easy to cross-reference the events being talked about in the logs
for eventIndex, eventID := range eventIDs {
t.Logf("Message %d -> event_id=%s", eventIndex, eventID)
}
// Use a `/context` request to get a pagination token just before Bob's join event
// (remember: tokens are positions between events)
//
// Usually a client would just use `/messages?dir=b` to start getting history
// after joining but this is valid as well. To illustrate a more real example of
// this, someone can use `/timestamp_to_event` to jump back in history and
// `/context` to start paginating history.
contextRes := bob.MustDo(
t,
"GET",
[]string{"_matrix", "client", "v3", "rooms", roomID, "context", bobJoinEventID},
client.WithContentType("application/json"),
client.WithQueries(url.Values{
"limit": []string{"0"},
}),
)
contextResResBody := client.ParseJSON(t, contextRes)
// > `start`: A token that can be used to paginate backwards with.
// > - https://spec.matrix.org/v1.17/client-server-api/#get_matrixclientv3roomsroomidcontexteventid
paginationToken := client.GetJSONFieldStr(t, contextResResBody, "start")
// Paginate backwards from the join event
messagesRes := bob.MustDo(
t,
"GET",
[]string{"_matrix", "client", "v3", "rooms", roomID, "messages"},
client.WithContentType("application/json"),
client.WithQueries(url.Values{
"dir": []string{"b"},
"limit": []string{"100"},
"from": []string{paginationToken},
}),
)
messagesResBody := client.ParseJSON(t, messagesRes)
// Since `dir=b`, these will be in reverse chronological order
actualEventIDsFromRequest := extractEventIDsFromMessagesResponse(t, messagesResBody)
// Put them in chronological order to match the expected list
chronologicalActualEventIds := slices.Clone(actualEventIDsFromRequest)
slices.Reverse(chronologicalActualEventIds)
// Assert timeline order
assertEventsInOrder(t, chronologicalActualEventIds, eventIDs)
})
// TODO: Backfill test to make sure we backfill from forks when viewing history (see
// docstring above).
//
// 1. Alice (hs1, engineered homeserver) creates a room with events A, B
// 1. Bob (hs2) joins the room
// 1. Bob leaves the room
// 1. Alice creates a fork from A with some history (1, 2, 3) and connects it back with a new event C
// 1. Bob joins back
// 1. Bob paginates `/messages?dir=b&from=<token-after-b>`
// 1. Ensure Bob sees events: B, 2, 1, A
//
// 1 <--- 2 <----- 3
// / \
// A <------- B ▲ <--- C <-- D
// |
// Paginate backwards from this point
// t.Run("Backfill from nearby backward extremities past token (fork)", func(t *testing.T) {
}
// These utilities match what we're using in the Complement repo (see
// `matrix-org/complement` -> `tests/csapi/room_messages_test.go`)
type MessageDraft struct {
Sender *client.CSAPI
Message string
}
type EventInfo struct {
MessageDraft MessageDraft
EventID string
}
func sendMessageDrafts(
t *testing.T,
roomID string,
messageDrafts []MessageDraft,
) []string {
t.Helper()
eventIDs := make([]string, len(messageDrafts))
for messageDraftIndex, messageDraft := range messageDrafts {
eventID := messageDraft.Sender.SendEventSynced(t, roomID, b.Event{
Type: "m.room.message",
Content: map[string]interface{}{
"msgtype": "m.text",
"body": messageDraft.Message,
},
})
eventIDs[messageDraftIndex] = eventID
}
return eventIDs
}
// sendAndTrackMessages sends the given message drafts to the room, keeping track of the
// new events in the list of `eventIDs` and `eventMap`. Returns the list of new event
// IDs that were sent.
func sendAndTrackMessages(
t *testing.T,
roomID string,
messageDrafts []MessageDraft,
eventIDs *[]string,
eventMap *map[string]EventInfo,
) []string {
t.Helper()
newEventIDs := sendMessageDrafts(t, roomID, messageDrafts)
*eventIDs = append(*eventIDs, newEventIDs...)
for i, eventID := range newEventIDs {
(*eventMap)[eventID] = EventInfo{
MessageDraft: messageDrafts[i],
EventID: eventID,
}
}
return newEventIDs
}
// extractEventIDsFromMessagesResponse extracts the event IDs from the given
// `/messages` response body.
func extractEventIDsFromMessagesResponse(
t *testing.T,
messagesResBody json.RawMessage,
) []string {
t.Helper()
wantKey := "chunk"
keyRes := gjson.GetBytes(messagesResBody, wantKey)
if !keyRes.Exists() {
t.Fatalf("extractEventIDsFromMessagesResponse: missing key '%s'", wantKey)
}
if !keyRes.IsArray() {
t.Fatalf(
"extractEventIDsFromMessagesResponse: key '%s' is not an array (was %s)",
wantKey,
keyRes.Type,
)
}
var eventIDs []string
actualEvents := keyRes.Array()
for _, event := range actualEvents {
eventIDs = append(eventIDs, event.Get("event_id").Str)
}
return eventIDs
}
func filterEventIDs(t *testing.T, actualEventIDs []string, expectedEventIDs []string) []string {
t.Helper()
relevantActualEventIDs := make([]string, 0, len(expectedEventIDs))
for _, eventID := range actualEventIDs {
if slices.Contains(expectedEventIDs, eventID) {
relevantActualEventIDs = append(relevantActualEventIDs, eventID)
}
}
return relevantActualEventIDs
}
// assertEventsInOrder asserts all `actualEventIDs` are present and in order according
// to `expectedEventIDs`. Other unrelated events can be in between.
func assertEventsInOrder(t *testing.T, actualEventIDs []string, expectedEventIDs []string) {
t.Helper()
relevantActualEventIDs := filterEventIDs(t, actualEventIDs, expectedEventIDs)
if len(relevantActualEventIDs) != len(expectedEventIDs) {
t.Fatalf(
"expected %d events in timeline (got %d relevant events filtered down from %d events)\n%s",
len(expectedEventIDs),
len(relevantActualEventIDs),
len(actualEventIDs),
generateEventOrderDiffString(relevantActualEventIDs, expectedEventIDs),
)
}
for i, eventID := range relevantActualEventIDs {
if eventID != expectedEventIDs[i] {
t.Fatalf(
"expected event ID %s (got %s) at index %d\n%s",
expectedEventIDs[i],
eventID,
i,
generateEventOrderDiffString(relevantActualEventIDs, expectedEventIDs),
)
}
}
}
func generateEventOrderDiffString(actualEventIDs []string, expectedEventIDs []string) string {
expectedLines := make([]string, len(expectedEventIDs))
for i, expectedEventID := range expectedEventIDs {
isExpectedInActual := slices.Contains(actualEventIDs, expectedEventID)
isMissingIndicatorString := " "
if !isExpectedInActual {
isMissingIndicatorString = "?"
}
expectedLines[i] = fmt.Sprintf("%2d: %s %s", i, isMissingIndicatorString, expectedEventID)
}
expectedDiffString := strings.Join(expectedLines, "\n")
actualLines := make([]string, len(actualEventIDs))
for actualEventIndex, actualEventID := range actualEventIDs {
isActualInExpected := slices.Contains(expectedEventIDs, actualEventID)
isActualInExpectedIndicatorString := " "
if isActualInExpected {
isActualInExpectedIndicatorString = "+"
}
expectedIndex := slices.Index(expectedEventIDs, actualEventID)
expectedIndexString := ""
if actualEventIndex != expectedIndex {
expectedDirectionString := "⬆️"
if expectedIndex > actualEventIndex {
expectedDirectionString = "⬇️"
}
expectedIndexString = fmt.Sprintf(
" (expected index %d %s)",
expectedIndex,
expectedDirectionString,
)
}
actualLines[actualEventIndex] = fmt.Sprintf("%2d: %s %s%s",
actualEventIndex, isActualInExpectedIndicatorString, actualEventID, expectedIndexString,
)
}
actualDiffString := strings.Join(actualLines, "\n")
return fmt.Sprintf(
"Actual events ('+' = found expected items):\n%s\nExpected events ('?' = missing expected items):\n%s",
actualDiffString,
expectedDiffString,
)
}
func getStateID(
t *testing.T,
c *client.CSAPI,
roomID string,
stateType string,
stateKey string,
) string {
t.Helper()
stateRes := c.MustDo(t, "GET", []string{"_matrix", "client", "v3", "rooms", roomID, "state"})
stateResBody := client.ParseJSON(t, stateRes)
eventJSON := gjson.ParseBytes(stateResBody)
if !eventJSON.IsArray() {
t.Fatalf("expected array of state events but found %s", eventJSON.Type)
}
events := eventJSON.Array()
for _, event := range events {
if event.Get("type").Str == stateType && event.Get("state_key").Str == stateKey {
return event.Get("event_id").Str
}
}
t.Fatalf("Unable to find state event for (%s, %s). Room state: %s", stateType, stateKey, events)
return ""
}
+6
View File
@@ -1,3 +1,9 @@
matrix-synapse-py3 (1.152.0) stable; urgency=medium
* New Synapse release 1.152.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 28 Apr 2026 11:45:01 +0100
matrix-synapse-py3 (1.152.0~rc1) stable; urgency=medium
* New Synapse release 1.152.0rc1.
@@ -123,8 +123,6 @@ experimental_features:
msc3874_enabled: true
# no UIA for x-signing upload for the first time
msc3967_enabled: true
# Expose a room summary for public rooms
msc3266_enabled: true
# Send to-device messages to application services
msc2409_to_device_messages_enabled: true
# Allow application services to masquerade devices
+6 -1
View File
@@ -308,6 +308,9 @@ The following fields are possible in the JSON response body:
If the room does not define a type, the value will be `null`.
* `forgotten` - Whether all local users have
[forgotten](https://spec.matrix.org/latest/client-server-api/#leaving-rooms) the room.
* `tombstoned` - Whether the room has been tombstoned (permanently closed).
* `replacement_room` - The room ID of the new room that users should join instead, if this room was tombstoned. Will be
`null` if the room has not been tombstoned, or if it was tombstoned without designating a successor room.
The API is:
@@ -337,7 +340,9 @@ A response body like the following is returned:
"history_visibility": "shared",
"state_events": 93534,
"room_type": "m.space",
"forgotten": false
"forgotten": false,
"tombstoned": false,
"replacement_room": null
}
```
@@ -194,7 +194,11 @@ user_agent_suffix: ' (I''m a teapot; Linux x86_64)'
---
### `use_frozen_dicts`
*(boolean)* Determines whether we should freeze the internal dict object in `FrozenEvent`. Freezing prevents bugs where we accidentally share e.g. signature dicts. However, freezing a dict is expensive. Defaults to `false`.
*(boolean)* Determines whether we should freeze the internal dict object in `FrozenEvent`. Freezing prevents bugs where we accidentally share e.g. signature dicts. However, freezing a dict is expensive.
> ⚠️ **Warning** This option is known to introduce a new class of [comparison bugs](https://github.com/element-hq/synapse/issues/18117) in Synapse.
Defaults to `false`.
Example configuration:
```yaml
Generated
+103 -103
View File
@@ -14,14 +14,14 @@ files = [
[[package]]
name = "attrs"
version = "25.4.0"
version = "26.1.0"
description = "Classes Without Boilerplate"
optional = false
python-versions = ">=3.9"
groups = ["main", "dev"]
files = [
{file = "attrs-25.4.0-py3-none-any.whl", hash = "sha256:adcf7e2a1fb3b36ac48d97835bb6d8ade15b8dcce26aba8bf1d14847b57a3373"},
{file = "attrs-25.4.0.tar.gz", hash = "sha256:16d5969b87f0859ef33a48b35d55ac1be6e42ae49d5e853b597db70c35c57e11"},
{file = "attrs-26.1.0-py3-none-any.whl", hash = "sha256:c647aa4a12dfbad9333ca4e71fe62ddc36f4e63b2d260a37a8b83d2f043ac309"},
{file = "attrs-26.1.0.tar.gz", hash = "sha256:d03ceb89cb322a8fd706d4fb91940737b6642aa36998fe130a9bc96c985eff32"},
]
[[package]]
@@ -582,21 +582,21 @@ smmap = ">=3.0.1,<6"
[[package]]
name = "gitpython"
version = "3.1.46"
version = "3.1.47"
description = "GitPython is a Python library used to interact with Git repositories"
optional = false
python-versions = ">=3.7"
groups = ["dev"]
files = [
{file = "gitpython-3.1.46-py3-none-any.whl", hash = "sha256:79812ed143d9d25b6d176a10bb511de0f9c67b1fa641d82097b0ab90398a2058"},
{file = "gitpython-3.1.46.tar.gz", hash = "sha256:400124c7d0ef4ea03f7310ac2fbf7151e09ff97f2a3288d64a440c584a29c37f"},
{file = "gitpython-3.1.47-py3-none-any.whl", hash = "sha256:489f590edfd6d20571b2c0e72c6a6ac6915ee8b8cd04572330e3842207a78905"},
{file = "gitpython-3.1.47.tar.gz", hash = "sha256:dba27f922bd2b42cb54c87a8ab3cb6beb6bf07f3d564e21ac848913a05a8a3cd"},
]
[package.dependencies]
gitdb = ">=4.0.1,<5"
[package.extras]
doc = ["sphinx (>=7.1.2,<7.2)", "sphinx-autodoc-typehints", "sphinx_rtd_theme"]
doc = ["sphinx (>=7.4.7,<8)", "sphinx-autodoc-typehints", "sphinx_rtd_theme"]
test = ["coverage[toml]", "ddt (>=1.1.1,!=1.4.3)", "mock ; python_version < \"3.8\"", "mypy (==1.18.2) ; python_version >= \"3.9\"", "pre-commit", "pytest (>=7.3.1)", "pytest-cov", "pytest-instafail", "pytest-mock", "pytest-sugar", "typing-extensions ; python_version < \"3.11\""]
[[package]]
@@ -1901,103 +1901,103 @@ files = [
[[package]]
name = "pillow"
version = "12.1.1"
version = "12.2.0"
description = "Python Imaging Library (fork)"
optional = false
python-versions = ">=3.10"
groups = ["main"]
files = [
{file = "pillow-12.1.1-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:1f1625b72740fdda5d77b4def688eb8fd6490975d06b909fd19f13f391e077e0"},
{file = "pillow-12.1.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:178aa072084bd88ec759052feca8e56cbb14a60b39322b99a049e58090479713"},
{file = "pillow-12.1.1-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:b66e95d05ba806247aaa1561f080abc7975daf715c30780ff92a20e4ec546e1b"},
{file = "pillow-12.1.1-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:89c7e895002bbe49cdc5426150377cbbc04767d7547ed145473f496dfa40408b"},
{file = "pillow-12.1.1-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3a5cbdcddad0af3da87cb16b60d23648bc3b51967eb07223e9fed77a82b457c4"},
{file = "pillow-12.1.1-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9f51079765661884a486727f0729d29054242f74b46186026582b4e4769918e4"},
{file = "pillow-12.1.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:99c1506ea77c11531d75e3a412832a13a71c7ebc8192ab9e4b2e355555920e3e"},
{file = "pillow-12.1.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:36341d06738a9f66c8287cf8b876d24b18db9bd8740fa0672c74e259ad408cff"},
{file = "pillow-12.1.1-cp310-cp310-win32.whl", hash = "sha256:6c52f062424c523d6c4db85518774cc3d50f5539dd6eed32b8f6229b26f24d40"},
{file = "pillow-12.1.1-cp310-cp310-win_amd64.whl", hash = "sha256:c6008de247150668a705a6338156efb92334113421ceecf7438a12c9a12dab23"},
{file = "pillow-12.1.1-cp310-cp310-win_arm64.whl", hash = "sha256:1a9b0ee305220b392e1124a764ee4265bd063e54a751a6b62eff69992f457fa9"},
{file = "pillow-12.1.1-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:e879bb6cd5c73848ef3b2b48b8af9ff08c5b71ecda8048b7dd22d8a33f60be32"},
{file = "pillow-12.1.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:365b10bb9417dd4498c0e3b128018c4a624dc11c7b97d8cc54effe3b096f4c38"},
{file = "pillow-12.1.1-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d4ce8e329c93845720cd2014659ca67eac35f6433fd3050393d85f3ecef0dad5"},
{file = "pillow-12.1.1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fc354a04072b765eccf2204f588a7a532c9511e8b9c7f900e1b64e3e33487090"},
{file = "pillow-12.1.1-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7e7976bf1910a8116b523b9f9f58bf410f3e8aa330cd9a2bb2953f9266ab49af"},
{file = "pillow-12.1.1-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:597bd9c8419bc7c6af5604e55847789b69123bbe25d65cc6ad3012b4f3c98d8b"},
{file = "pillow-12.1.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:2c1fc0f2ca5f96a3c8407e41cca26a16e46b21060fe6d5b099d2cb01412222f5"},
{file = "pillow-12.1.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:578510d88c6229d735855e1f278aa305270438d36a05031dfaae5067cc8eb04d"},
{file = "pillow-12.1.1-cp311-cp311-win32.whl", hash = "sha256:7311c0a0dcadb89b36b7025dfd8326ecfa36964e29913074d47382706e516a7c"},
{file = "pillow-12.1.1-cp311-cp311-win_amd64.whl", hash = "sha256:fbfa2a7c10cc2623f412753cddf391c7f971c52ca40a3f65dc5039b2939e8563"},
{file = "pillow-12.1.1-cp311-cp311-win_arm64.whl", hash = "sha256:b81b5e3511211631b3f672a595e3221252c90af017e399056d0faabb9538aa80"},
{file = "pillow-12.1.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:ab323b787d6e18b3d91a72fc99b1a2c28651e4358749842b8f8dfacd28ef2052"},
{file = "pillow-12.1.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:adebb5bee0f0af4909c30db0d890c773d1a92ffe83da908e2e9e720f8edf3984"},
{file = "pillow-12.1.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:bb66b7cc26f50977108790e2456b7921e773f23db5630261102233eb355a3b79"},
{file = "pillow-12.1.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:aee2810642b2898bb187ced9b349e95d2a7272930796e022efaf12e99dccd293"},
{file = "pillow-12.1.1-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a0b1cd6232e2b618adcc54d9882e4e662a089d5768cd188f7c245b4c8c44a397"},
{file = "pillow-12.1.1-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7aac39bcf8d4770d089588a2e1dd111cbaa42df5a94be3114222057d68336bd0"},
{file = "pillow-12.1.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ab174cd7d29a62dd139c44bf74b698039328f45cb03b4596c43473a46656b2f3"},
{file = "pillow-12.1.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:339ffdcb7cbeaa08221cd401d517d4b1fe7a9ed5d400e4a8039719238620ca35"},
{file = "pillow-12.1.1-cp312-cp312-win32.whl", hash = "sha256:5d1f9575a12bed9e9eedd9a4972834b08c97a352bd17955ccdebfeca5913fa0a"},
{file = "pillow-12.1.1-cp312-cp312-win_amd64.whl", hash = "sha256:21329ec8c96c6e979cd0dfd29406c40c1d52521a90544463057d2aaa937d66a6"},
{file = "pillow-12.1.1-cp312-cp312-win_arm64.whl", hash = "sha256:af9a332e572978f0218686636610555ae3defd1633597be015ed50289a03c523"},
{file = "pillow-12.1.1-cp313-cp313-ios_13_0_arm64_iphoneos.whl", hash = "sha256:d242e8ac078781f1de88bf823d70c1a9b3c7950a44cdf4b7c012e22ccbcd8e4e"},
{file = "pillow-12.1.1-cp313-cp313-ios_13_0_arm64_iphonesimulator.whl", hash = "sha256:02f84dfad02693676692746df05b89cf25597560db2857363a208e393429f5e9"},
{file = "pillow-12.1.1-cp313-cp313-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:e65498daf4b583091ccbb2556c7000abf0f3349fcd57ef7adc9a84a394ed29f6"},
{file = "pillow-12.1.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:6c6db3b84c87d48d0088943bf33440e0c42370b99b1c2a7989216f7b42eede60"},
{file = "pillow-12.1.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:8b7e5304e34942bf62e15184219a7b5ad4ff7f3bb5cca4d984f37df1a0e1aee2"},
{file = "pillow-12.1.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:18e5bddd742a44b7e6b1e773ab5db102bd7a94c32555ba656e76d319d19c3850"},
{file = "pillow-12.1.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fc44ef1f3de4f45b50ccf9136999d71abb99dca7706bc75d222ed350b9fd2289"},
{file = "pillow-12.1.1-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5a8eb7ed8d4198bccbd07058416eeec51686b498e784eda166395a23eb99138e"},
{file = "pillow-12.1.1-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:47b94983da0c642de92ced1702c5b6c292a84bd3a8e1d1702ff923f183594717"},
{file = "pillow-12.1.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:518a48c2aab7ce596d3bf79d0e275661b846e86e4d0e7dec34712c30fe07f02a"},
{file = "pillow-12.1.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a550ae29b95c6dc13cf69e2c9dc5747f814c54eeb2e32d683e5e93af56caa029"},
{file = "pillow-12.1.1-cp313-cp313-win32.whl", hash = "sha256:a003d7422449f6d1e3a34e3dd4110c22148336918ddbfc6a32581cd54b2e0b2b"},
{file = "pillow-12.1.1-cp313-cp313-win_amd64.whl", hash = "sha256:344cf1e3dab3be4b1fa08e449323d98a2a3f819ad20f4b22e77a0ede31f0faa1"},
{file = "pillow-12.1.1-cp313-cp313-win_arm64.whl", hash = "sha256:5c0dd1636633e7e6a0afe7bf6a51a14992b7f8e60de5789018ebbdfae55b040a"},
{file = "pillow-12.1.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:0330d233c1a0ead844fc097a7d16c0abff4c12e856c0b325f231820fee1f39da"},
{file = "pillow-12.1.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:5dae5f21afb91322f2ff791895ddd8889e5e947ff59f71b46041c8ce6db790bc"},
{file = "pillow-12.1.1-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:2e0c664be47252947d870ac0d327fea7e63985a08794758aa8af5b6cb6ec0c9c"},
{file = "pillow-12.1.1-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:691ab2ac363b8217f7d31b3497108fb1f50faab2f75dfb03284ec2f217e87bf8"},
{file = "pillow-12.1.1-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e9e8064fb1cc019296958595f6db671fba95209e3ceb0c4734c9baf97de04b20"},
{file = "pillow-12.1.1-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:472a8d7ded663e6162dafdf20015c486a7009483ca671cece7a9279b512fcb13"},
{file = "pillow-12.1.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:89b54027a766529136a06cfebeecb3a04900397a3590fd252160b888479517bf"},
{file = "pillow-12.1.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:86172b0831b82ce4f7877f280055892b31179e1576aa00d0df3bb1bbf8c3e524"},
{file = "pillow-12.1.1-cp313-cp313t-win32.whl", hash = "sha256:44ce27545b6efcf0fdbdceb31c9a5bdea9333e664cda58a7e674bb74608b3986"},
{file = "pillow-12.1.1-cp313-cp313t-win_amd64.whl", hash = "sha256:a285e3eb7a5a45a2ff504e31f4a8d1b12ef62e84e5411c6804a42197c1cf586c"},
{file = "pillow-12.1.1-cp313-cp313t-win_arm64.whl", hash = "sha256:cc7d296b5ea4d29e6570dabeaed58d31c3fea35a633a69679fb03d7664f43fb3"},
{file = "pillow-12.1.1-cp314-cp314-ios_13_0_arm64_iphoneos.whl", hash = "sha256:417423db963cb4be8bac3fc1204fe61610f6abeed1580a7a2cbb2fbda20f12af"},
{file = "pillow-12.1.1-cp314-cp314-ios_13_0_arm64_iphonesimulator.whl", hash = "sha256:b957b71c6b2387610f556a7eb0828afbe40b4a98036fc0d2acfa5a44a0c2036f"},
{file = "pillow-12.1.1-cp314-cp314-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:097690ba1f2efdeb165a20469d59d8bb03c55fb6621eb2041a060ae8ea3e9642"},
{file = "pillow-12.1.1-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:2815a87ab27848db0321fb78c7f0b2c8649dee134b7f2b80c6a45c6831d75ccd"},
{file = "pillow-12.1.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:f7ed2c6543bad5a7d5530eb9e78c53132f93dfa44a28492db88b41cdab885202"},
{file = "pillow-12.1.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:652a2c9ccfb556235b2b501a3a7cf3742148cd22e04b5625c5fe057ea3e3191f"},
{file = "pillow-12.1.1-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d6e4571eedf43af33d0fc233a382a76e849badbccdf1ac438841308652a08e1f"},
{file = "pillow-12.1.1-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b574c51cf7d5d62e9be37ba446224b59a2da26dc4c1bb2ecbe936a4fb1a7cb7f"},
{file = "pillow-12.1.1-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a37691702ed687799de29a518d63d4682d9016932db66d4e90c345831b02fb4e"},
{file = "pillow-12.1.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:f95c00d5d6700b2b890479664a06e754974848afaae5e21beb4d83c106923fd0"},
{file = "pillow-12.1.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:559b38da23606e68681337ad74622c4dbba02254fc9cb4488a305dd5975c7eeb"},
{file = "pillow-12.1.1-cp314-cp314-win32.whl", hash = "sha256:03edcc34d688572014ff223c125a3f77fb08091e4607e7745002fc214070b35f"},
{file = "pillow-12.1.1-cp314-cp314-win_amd64.whl", hash = "sha256:50480dcd74fa63b8e78235957d302d98d98d82ccbfac4c7e12108ba9ecbdba15"},
{file = "pillow-12.1.1-cp314-cp314-win_arm64.whl", hash = "sha256:5cb1785d97b0c3d1d1a16bc1d710c4a0049daefc4935f3a8f31f827f4d3d2e7f"},
{file = "pillow-12.1.1-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:1f90cff8aa76835cba5769f0b3121a22bd4eb9e6884cfe338216e557a9a548b8"},
{file = "pillow-12.1.1-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:1f1be78ce9466a7ee64bfda57bdba0f7cc499d9794d518b854816c41bf0aa4e9"},
{file = "pillow-12.1.1-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:42fc1f4677106188ad9a55562bbade416f8b55456f522430fadab3cef7cd4e60"},
{file = "pillow-12.1.1-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:98edb152429ab62a1818039744d8fbb3ccab98a7c29fc3d5fcef158f3f1f68b7"},
{file = "pillow-12.1.1-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d470ab1178551dd17fdba0fef463359c41aaa613cdcd7ff8373f54be629f9f8f"},
{file = "pillow-12.1.1-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6408a7b064595afcab0a49393a413732a35788f2a5092fdc6266952ed67de586"},
{file = "pillow-12.1.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:5d8c41325b382c07799a3682c1c258469ea2ff97103c53717b7893862d0c98ce"},
{file = "pillow-12.1.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:c7697918b5be27424e9ce568193efd13d925c4481dd364e43f5dff72d33e10f8"},
{file = "pillow-12.1.1-cp314-cp314t-win32.whl", hash = "sha256:d2912fd8114fc5545aa3a4b5576512f64c55a03f3ebcca4c10194d593d43ea36"},
{file = "pillow-12.1.1-cp314-cp314t-win_amd64.whl", hash = "sha256:4ceb838d4bd9dab43e06c363cab2eebf63846d6a4aeaea283bbdfd8f1a8ed58b"},
{file = "pillow-12.1.1-cp314-cp314t-win_arm64.whl", hash = "sha256:7b03048319bfc6170e93bd60728a1af51d3dd7704935feb228c4d4faab35d334"},
{file = "pillow-12.1.1-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:600fd103672b925fe62ed08e0d874ea34d692474df6f4bf7ebe148b30f89f39f"},
{file = "pillow-12.1.1-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:665e1b916b043cef294bc54d47bf02d87e13f769bc4bc5fa225a24b3a6c5aca9"},
{file = "pillow-12.1.1-pp311-pypy311_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:495c302af3aad1ca67420ddd5c7bd480c8867ad173528767d906428057a11f0e"},
{file = "pillow-12.1.1-pp311-pypy311_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:8fd420ef0c52c88b5a035a0886f367748c72147b2b8f384c9d12656678dfdfa9"},
{file = "pillow-12.1.1-pp311-pypy311_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f975aa7ef9684ce7e2c18a3aa8f8e2106ce1e46b94ab713d156b2898811651d3"},
{file = "pillow-12.1.1-pp311-pypy311_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8089c852a56c2966cf18835db62d9b34fef7ba74c726ad943928d494fa7f4735"},
{file = "pillow-12.1.1-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:cb9bb857b2d057c6dfc72ac5f3b44836924ba15721882ef103cecb40d002d80e"},
{file = "pillow-12.1.1.tar.gz", hash = "sha256:9ad8fa5937ab05218e2b6a4cff30295ad35afd2f83ac592e68c0d871bb0fdbc4"},
{file = "pillow-12.2.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:a4e8f36e677d3336f35089648c8955c51c6d386a13cf6ee9c189c5f5bd713a9f"},
{file = "pillow-12.2.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2e589959f10d9824d39b350472b92f0ce3b443c0a3442ebf41c40cb8361c5b97"},
{file = "pillow-12.2.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:a52edc8bfff4429aaabdf4d9ee0daadbbf8562364f940937b941f87a4290f5ff"},
{file = "pillow-12.2.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:975385f4776fafde056abb318f612ef6285b10a1f12b8570f3647ad0d74b48ec"},
{file = "pillow-12.2.0-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:bd9c0c7a0c681a347b3194c500cb1e6ca9cab053ea4d82a5cf45b6b754560136"},
{file = "pillow-12.2.0-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:88d387ff40b3ff7c274947ed3125dedf5262ec6919d83946753b5f3d7c67ea4c"},
{file = "pillow-12.2.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:51c4167c34b0d8ba05b547a3bb23578d0ba17b80a5593f93bd8ecb123dd336a3"},
{file = "pillow-12.2.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:34c0d99ecccea270c04882cb3b86e7b57296079c9a4aff88cb3b33563d95afaa"},
{file = "pillow-12.2.0-cp310-cp310-win32.whl", hash = "sha256:b85f66ae9eb53e860a873b858b789217ba505e5e405a24b85c0464822fe88032"},
{file = "pillow-12.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:673aa32138f3e7531ccdbca7b3901dba9b70940a19ccecc6a37c77d5fdeb05b5"},
{file = "pillow-12.2.0-cp310-cp310-win_arm64.whl", hash = "sha256:3e080565d8d7c671db5802eedfb438e5565ffa40115216eabb8cd52d0ecce024"},
{file = "pillow-12.2.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:8be29e59487a79f173507c30ddf57e733a357f67881430449bb32614075a40ab"},
{file = "pillow-12.2.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:71cde9a1e1551df7d34a25462fc60325e8a11a82cc2e2f54578e5e9a1e153d65"},
{file = "pillow-12.2.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:f490f9368b6fc026f021db16d7ec2fbf7d89e2edb42e8ec09d2c60505f5729c7"},
{file = "pillow-12.2.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:8bd7903a5f2a4545f6fd5935c90058b89d30045568985a71c79f5fd6edf9b91e"},
{file = "pillow-12.2.0-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3997232e10d2920a68d25191392e3a4487d8183039e1c74c2297f00ed1c50705"},
{file = "pillow-12.2.0-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e74473c875d78b8e9d5da2a70f7099549f9eb37ded4e2f6a463e60125bccd176"},
{file = "pillow-12.2.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:56a3f9c60a13133a98ecff6197af34d7824de9b7b38c3654861a725c970c197b"},
{file = "pillow-12.2.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:90e6f81de50ad6b534cab6e5aef77ff6e37722b2f5d908686f4a5c9eba17a909"},
{file = "pillow-12.2.0-cp311-cp311-win32.whl", hash = "sha256:8c984051042858021a54926eb597d6ee3012393ce9c181814115df4c60b9a808"},
{file = "pillow-12.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:6e6b2a0c538fc200b38ff9eb6628228b77908c319a005815f2dde585a0664b60"},
{file = "pillow-12.2.0-cp311-cp311-win_arm64.whl", hash = "sha256:9a8a34cc89c67a65ea7437ce257cea81a9dad65b29805f3ecee8c8fe8ff25ffe"},
{file = "pillow-12.2.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:2d192a155bbcec180f8564f693e6fd9bccff5a7af9b32e2e4bf8c9c69dbad6b5"},
{file = "pillow-12.2.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f3f40b3c5a968281fd507d519e444c35f0ff171237f4fdde090dd60699458421"},
{file = "pillow-12.2.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:03e7e372d5240cc23e9f07deca4d775c0817bffc641b01e9c3af208dbd300987"},
{file = "pillow-12.2.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:b86024e52a1b269467a802258c25521e6d742349d760728092e1bc2d135b4d76"},
{file = "pillow-12.2.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7371b48c4fa448d20d2714c9a1f775a81155050d383333e0a6c15b1123dda005"},
{file = "pillow-12.2.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:62f5409336adb0663b7caa0da5c7d9e7bdbaae9ce761d34669420c2a801b2780"},
{file = "pillow-12.2.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:01afa7cf67f74f09523699b4e88c73fb55c13346d212a59a2db1f86b0a63e8c5"},
{file = "pillow-12.2.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:fc3d34d4a8fbec3e88a79b92e5465e0f9b842b628675850d860b8bd300b159f5"},
{file = "pillow-12.2.0-cp312-cp312-win32.whl", hash = "sha256:58f62cc0f00fd29e64b29f4fd923ffdb3859c9f9e6105bfc37ba1d08994e8940"},
{file = "pillow-12.2.0-cp312-cp312-win_amd64.whl", hash = "sha256:7f84204dee22a783350679a0333981df803dac21a0190d706a50475e361c93f5"},
{file = "pillow-12.2.0-cp312-cp312-win_arm64.whl", hash = "sha256:af73337013e0b3b46f175e79492d96845b16126ddf79c438d7ea7ff27783a414"},
{file = "pillow-12.2.0-cp313-cp313-ios_13_0_arm64_iphoneos.whl", hash = "sha256:8297651f5b5679c19968abefd6bb84d95fe30ef712eb1b2d9b2d31ca61267f4c"},
{file = "pillow-12.2.0-cp313-cp313-ios_13_0_arm64_iphonesimulator.whl", hash = "sha256:50d8520da2a6ce0af445fa6d648c4273c3eeefbc32d7ce049f22e8b5c3daecc2"},
{file = "pillow-12.2.0-cp313-cp313-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:766cef22385fa1091258ad7e6216792b156dc16d8d3fa607e7545b2b72061f1c"},
{file = "pillow-12.2.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:5d2fd0fa6b5d9d1de415060363433f28da8b1526c1c129020435e186794b3795"},
{file = "pillow-12.2.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:56b25336f502b6ed02e889f4ece894a72612fe885889a6e8c4c80239ff6e5f5f"},
{file = "pillow-12.2.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:f1c943e96e85df3d3478f7b691f229887e143f81fedab9b20205349ab04d73ed"},
{file = "pillow-12.2.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:03f6fab9219220f041c74aeaa2939ff0062bd5c364ba9ce037197f4c6d498cd9"},
{file = "pillow-12.2.0-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5cdfebd752ec52bf5bb4e35d9c64b40826bc5b40a13df7c3cda20a2c03a0f5ed"},
{file = "pillow-12.2.0-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:eedf4b74eda2b5a4b2b2fb4c006d6295df3bf29e459e198c90ea48e130dc75c3"},
{file = "pillow-12.2.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:00a2865911330191c0b818c59103b58a5e697cae67042366970a6b6f1b20b7f9"},
{file = "pillow-12.2.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:1e1757442ed87f4912397c6d35a0db6a7b52592156014706f17658ff58bbf795"},
{file = "pillow-12.2.0-cp313-cp313-win32.whl", hash = "sha256:144748b3af2d1b358d41286056d0003f47cb339b8c43a9ea42f5fea4d8c66b6e"},
{file = "pillow-12.2.0-cp313-cp313-win_amd64.whl", hash = "sha256:390ede346628ccc626e5730107cde16c42d3836b89662a115a921f28440e6a3b"},
{file = "pillow-12.2.0-cp313-cp313-win_arm64.whl", hash = "sha256:8023abc91fba39036dbce14a7d6535632f99c0b857807cbbbf21ecc9f4717f06"},
{file = "pillow-12.2.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:042db20a421b9bafecc4b84a8b6e444686bd9d836c7fd24542db3e7df7baad9b"},
{file = "pillow-12.2.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:dd025009355c926a84a612fecf58bb315a3f6814b17ead51a8e48d3823d9087f"},
{file = "pillow-12.2.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:88ddbc66737e277852913bd1e07c150cc7bb124539f94c4e2df5344494e0a612"},
{file = "pillow-12.2.0-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d362d1878f00c142b7e1a16e6e5e780f02be8195123f164edf7eddd911eefe7c"},
{file = "pillow-12.2.0-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:2c727a6d53cb0018aadd8018c2b938376af27914a68a492f59dfcaca650d5eea"},
{file = "pillow-12.2.0-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:efd8c21c98c5cc60653bcb311bef2ce0401642b7ce9d09e03a7da87c878289d4"},
{file = "pillow-12.2.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:9f08483a632889536b8139663db60f6724bfcb443c96f1b18855860d7d5c0fd4"},
{file = "pillow-12.2.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:dac8d77255a37e81a2efcbd1fc05f1c15ee82200e6c240d7e127e25e365c39ea"},
{file = "pillow-12.2.0-cp313-cp313t-win32.whl", hash = "sha256:ee3120ae9dff32f121610bb08e4313be87e03efeadfc6c0d18f89127e24d0c24"},
{file = "pillow-12.2.0-cp313-cp313t-win_amd64.whl", hash = "sha256:325ca0528c6788d2a6c3d40e3568639398137346c3d6e66bb61db96b96511c98"},
{file = "pillow-12.2.0-cp313-cp313t-win_arm64.whl", hash = "sha256:2e5a76d03a6c6dcef67edabda7a52494afa4035021a79c8558e14af25313d453"},
{file = "pillow-12.2.0-cp314-cp314-ios_13_0_arm64_iphoneos.whl", hash = "sha256:3adc9215e8be0448ed6e814966ecf3d9952f0ea40eb14e89a102b87f450660d8"},
{file = "pillow-12.2.0-cp314-cp314-ios_13_0_arm64_iphonesimulator.whl", hash = "sha256:6a9adfc6d24b10f89588096364cc726174118c62130c817c2837c60cf08a392b"},
{file = "pillow-12.2.0-cp314-cp314-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:6a6e67ea2e6feda684ed370f9a1c52e7a243631c025ba42149a2cc5934dec295"},
{file = "pillow-12.2.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:2bb4a8d594eacdfc59d9e5ad972aa8afdd48d584ffd5f13a937a664c3e7db0ed"},
{file = "pillow-12.2.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:80b2da48193b2f33ed0c32c38140f9d3186583ce7d516526d462645fd98660ae"},
{file = "pillow-12.2.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:22db17c68434de69d8ecfc2fe821569195c0c373b25cccb9cbdacf2c6e53c601"},
{file = "pillow-12.2.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:7b14cc0106cd9aecda615dd6903840a058b4700fcb817687d0ee4fc8b6e389be"},
{file = "pillow-12.2.0-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8cbeb542b2ebc6fcdacabf8aca8c1a97c9b3ad3927d46b8723f9d4f033288a0f"},
{file = "pillow-12.2.0-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4bfd07bc812fbd20395212969e41931001fd59eb55a60658b0e5710872e95286"},
{file = "pillow-12.2.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:9aba9a17b623ef750a4d11b742cbafffeb48a869821252b30ee21b5e91392c50"},
{file = "pillow-12.2.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:deede7c263feb25dba4e82ea23058a235dcc2fe1f6021025dc71f2b618e26104"},
{file = "pillow-12.2.0-cp314-cp314-win32.whl", hash = "sha256:632ff19b2778e43162304d50da0181ce24ac5bb8180122cbe1bf4673428328c7"},
{file = "pillow-12.2.0-cp314-cp314-win_amd64.whl", hash = "sha256:4e6c62e9d237e9b65fac06857d511e90d8461a32adcc1b9065ea0c0fa3a28150"},
{file = "pillow-12.2.0-cp314-cp314-win_arm64.whl", hash = "sha256:b1c1fbd8a5a1af3412a0810d060a78b5136ec0836c8a4ef9aa11807f2a22f4e1"},
{file = "pillow-12.2.0-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:57850958fe9c751670e49b2cecf6294acc99e562531f4bd317fa5ddee2068463"},
{file = "pillow-12.2.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:d5d38f1411c0ed9f97bcb49b7bd59b6b7c314e0e27420e34d99d844b9ce3b6f3"},
{file = "pillow-12.2.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:5c0a9f29ca8e79f09de89293f82fc9b0270bb4af1d58bc98f540cc4aedf03166"},
{file = "pillow-12.2.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:1610dd6c61621ae1cf811bef44d77e149ce3f7b95afe66a4512f8c59f25d9ebe"},
{file = "pillow-12.2.0-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0a34329707af4f73cf1782a36cd2289c0368880654a2c11f027bcee9052d35dd"},
{file = "pillow-12.2.0-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8e9c4f5b3c546fa3458a29ab22646c1c6c787ea8f5ef51300e5a60300736905e"},
{file = "pillow-12.2.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:fb043ee2f06b41473269765c2feae53fc2e2fbf96e5e22ca94fb5ad677856f06"},
{file = "pillow-12.2.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:f278f034eb75b4e8a13a54a876cc4a5ab39173d2cdd93a638e1b467fc545ac43"},
{file = "pillow-12.2.0-cp314-cp314t-win32.whl", hash = "sha256:6bb77b2dcb06b20f9f4b4a8454caa581cd4dd0643a08bacf821216a16d9c8354"},
{file = "pillow-12.2.0-cp314-cp314t-win_amd64.whl", hash = "sha256:6562ace0d3fb5f20ed7290f1f929cae41b25ae29528f2af1722966a0a02e2aa1"},
{file = "pillow-12.2.0-cp314-cp314t-win_arm64.whl", hash = "sha256:aa88ccfe4e32d362816319ed727a004423aab09c5cea43c01a4b435643fa34eb"},
{file = "pillow-12.2.0-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:0538bd5e05efec03ae613fd89c4ce0368ecd2ba239cc25b9f9be7ed426b0af1f"},
{file = "pillow-12.2.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:394167b21da716608eac917c60aa9b969421b5dcbbe02ae7f013e7b85811c69d"},
{file = "pillow-12.2.0-pp311-pypy311_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:5d04bfa02cc2d23b497d1e90a0f927070043f6cbf303e738300532379a4b4e0f"},
{file = "pillow-12.2.0-pp311-pypy311_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:0c838a5125cee37e68edec915651521191cef1e6aa336b855f495766e77a366e"},
{file = "pillow-12.2.0-pp311-pypy311_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4a6c9fa44005fa37a91ebfc95d081e8079757d2e904b27103f4f5fa6f0bf78c0"},
{file = "pillow-12.2.0-pp311-pypy311_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:25373b66e0dd5905ed63fa3cae13c82fbddf3079f2c8bf15c6fb6a35586324c1"},
{file = "pillow-12.2.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:bfa9c230d2fe991bed5318a5f119bd6780cda2915cca595393649fc118ab895e"},
{file = "pillow-12.2.0.tar.gz", hash = "sha256:a830b1a40919539d07806aa58e1b114df53ddd43213d9c8b75847eee6c0182b5"},
]
[package.extras]
@@ -2516,14 +2516,14 @@ six = ">=1.5"
[[package]]
name = "python-multipart"
version = "0.0.22"
version = "0.0.26"
description = "A streaming multipart parser for Python"
optional = false
python-versions = ">=3.10"
groups = ["main"]
files = [
{file = "python_multipart-0.0.22-py3-none-any.whl", hash = "sha256:2b2cd894c83d21bf49d702499531c7bafd057d730c201782048f7945d82de155"},
{file = "python_multipart-0.0.22.tar.gz", hash = "sha256:7340bef99a7e0032613f56dc36027b959fd3b30a787ed62d310e951f7c3a3a58"},
{file = "python_multipart-0.0.26-py3-none-any.whl", hash = "sha256:c0b169f8c4484c13b0dcf2ef0ec3a4adb255c4b7d18d8e420477d2b1dd03f185"},
{file = "python_multipart-0.0.26.tar.gz", hash = "sha256:08fadc45918cd615e26846437f50c5d6d23304da32c341f289a617127b081f17"},
]
[[package]]
@@ -3755,4 +3755,4 @@ url-preview = ["lxml"]
[metadata]
lock-version = "2.1"
python-versions = ">=3.10.0,<4.0.0"
content-hash = "8d994f1fc65664b2a04e1de78df4d1f06f3d99b39f95db16763790f2ee0aff11"
content-hash = "d97bee07fec0f4048d964aa7127a50813920bce77b00e5191aa1815f83922c85"
+5 -5
View File
@@ -1,6 +1,6 @@
[project]
name = "matrix-synapse"
version = "1.152.0rc1"
version = "1.152.0"
description = "Homeserver for the Matrix decentralised comms protocol"
readme = "README.rst"
authors = [
@@ -66,7 +66,7 @@ dependencies = [
"prometheus-client>=0.6.0",
# we use `order`, which arrived in attrs 19.2.0.
# Note: 21.1.0 broke `/sync`, see https://github.com/matrix-org/synapse/issues/9936
"attrs>=19.2.0,!=21.1.0",
"attrs>=26.1.0,!=21.1.0",
"netaddr>=0.7.18",
# Jinja 2.x is incompatible with MarkupSafe>=2.1. To ensure that admins do not
# end up with a broken installation, with recent MarkupSafe but old Jinja, we
@@ -137,7 +137,7 @@ saml2 = [
"defusedxml>=0.7.1", # via pysaml2
"pytz>=2018.3", # via pysaml2
]
oidc = ["authlib>=1.6.11"]
oidc = ["authlib>=0.15.1"]
url-preview = ["lxml>=4.6.3"]
sentry = ["sentry-sdk>=0.7.2"]
opentracing = [
@@ -179,7 +179,7 @@ all = [
# saml2
"pysaml2>=4.5.0",
# oidc and jwt
"authlib>=1.6.11",
"authlib>=0.15.1",
# url-preview
"lxml>=4.6.3",
# sentry
@@ -292,7 +292,7 @@ dev = [
# The following are used by the release script
"click>=8.1.3",
# GitPython was == 3.1.14; bumped to 3.1.20, the first release with type hints.
"GitPython>=3.1.20",
"GitPython>=3.1.47",
"markdown-it-py>=3.0.0",
"pygithub>=1.59",
# The following are executed as commands by the release script.
+2 -1
View File
@@ -44,7 +44,7 @@ pythonize = "0.27.0"
regex = "1.6.0"
sha2 = "0.10.8"
serde = { version = "1.0.144", features = ["derive"] }
serde_json = "1.0.85"
serde_json = { version = "1.0.85", features = ["raw_value"] }
ulid = "1.1.2"
icu_segmenter = "2.0.0"
reqwest = { version = "0.12.15", default-features = false, features = [
@@ -56,6 +56,7 @@ http-body-util = "0.1.3"
futures = "0.3.31"
tokio = { version = "1.44.2", features = ["rt", "rt-multi-thread"] }
once_cell = "1.18.0"
itertools = "0.14.0"
[features]
extension-module = ["pyo3/extension-module"]
+841
View File
@@ -0,0 +1,841 @@
/*
* This file is licensed under the Affero General Public License (AGPL) version 3.
*
* Copyright (C) 2026 Element Creations Ltd
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* See the GNU Affero General Public License for more details:
* <https://www.gnu.org/licenses/agpl-3.0.html>.
*
* Originally licensed under the Apache License, Version 2.0:
* <http://www.apache.org/licenses/LICENSE-2.0>.
*
* [This file includes modifications made by Element Creations Ltd]
*/
//! Serialize a Rust data structure into canonical JSON data.
//!
//! See the [Canonical
//! JSON](https://matrix.org/docs/spec/appendices#canonical-json) docs for more
//! information.
use std::{
collections::BTreeMap,
convert::TryFrom,
io::{self, Write},
};
use serde::ser::SerializeMap;
use serde::{
ser::{Error as _, SerializeStruct},
Serialize,
};
use serde_json::{
ser::{Formatter, Serializer},
value::RawValue,
Value,
};
/// The minimum integer that can be used in canonical JSON.
pub const MIN_VALID_INTEGER: i64 = -(2i64.pow(53)) + 1;
/// The maximum integer that can be used in canonical JSON.
pub const MAX_VALID_INTEGER: i64 = (2i64.pow(53)) - 1;
/// Options to control how strict JSON canonicalization is.
#[derive(Clone, Debug)]
pub struct CanonicalizationOptions {
/// Configure the serializer to strictly enforce the canonical JSON allowable number range.
/// Allows JSON for room versions v5 or less when `false`.
enforce_int_range: bool,
}
impl CanonicalizationOptions {
/// Creates an instance of [CanonicalizationOptions] with permissive JSON enforcement settings.
pub fn relaxed() -> Self {
Self {
enforce_int_range: false,
}
}
/// Creates an instance of [CanonicalizationOptions] with strict JSON enforcement settings.
pub fn strict() -> Self {
Self {
enforce_int_range: true,
}
}
}
/// Serialize the given data structure as a canonical JSON byte vector.
///
/// See the [Canonical
/// JSON](https://matrix.org/docs/spec/appendices#canonical-json) docs for more
/// information.
///
/// Note: serializing [`RawValue`] is not supported, as it may contain JSON that
/// is not canonical.
///
/// # Errors
///
/// Serialization can fail if `T`'s implementation of `Serialize` decides to
/// fail, if `T` contains a map with non-string keys, or if `T` contains numbers
/// that are not integers in the range `[-2**53 + 1, 2**53 - 1]`.
pub fn to_vec_canonical<T>(
value: &T,
options: CanonicalizationOptions,
) -> Result<Vec<u8>, serde_json::Error>
where
T: Serialize + ?Sized,
{
let mut vec = Vec::new();
let mut ser = CanonicalSerializer::new(&mut vec, options);
value.serialize(&mut ser)?;
Ok(vec)
}
/// Serialize the given data structure as a canonical JSON string.
///
/// See the [Canonical
/// JSON](https://matrix.org/docs/spec/appendices#canonical-json) docs for more
/// information.
///
/// Note: serializing [`RawValue`] is not supported, as it may contain JSON that
/// is not canonical.
///
/// # Errors
///
/// Serialization can fail if `T`'s implementation of `Serialize` decides to
/// fail, if `T` contains a map with non-string keys, or if `T` contains numbers
/// that are not integers in the range `[-2**53 + 1, 2**53 - 1]`.
pub fn to_string_canonical<T>(
value: &T,
options: CanonicalizationOptions,
) -> Result<String, serde_json::Error>
where
T: Serialize + ?Sized,
{
let vec = to_vec_canonical(value, options)?;
// We'll always get valid UTF-8 out
let json_string = String::from_utf8(vec).expect("valid utf8");
Ok(json_string)
}
/// A helper function that asserts that an integer is in the valid range.
fn assert_integer_in_range<I>(v: I) -> Result<(), serde_json::Error>
where
i64: TryFrom<I>,
{
let res = i64::try_from(v);
match res {
Ok(MIN_VALID_INTEGER..=MAX_VALID_INTEGER) => Ok(()),
Ok(_) | Err(_) => Err(serde_json::Error::custom("integer out of range")),
}
}
/// A JSON formatter that ensures all strings are encoded as per the [Canonical
/// JSON](https://matrix.org/docs/spec/appendices#canonical-json) spec.
pub struct CanonicalFormatter;
impl Formatter for CanonicalFormatter {
fn write_string_fragment<W>(&mut self, writer: &mut W, fragment: &str) -> io::Result<()>
where
W: ?Sized + io::Write,
{
// `fragment` only contains characters that are not escaped, and don't
// need to be escaped, so they can be written directly to the writer.
writer.write_all(fragment.as_bytes())
}
fn write_char_escape<W>(
&mut self,
writer: &mut W,
char_escape: serde_json::ser::CharEscape,
) -> io::Result<()>
where
W: ?Sized + io::Write,
{
use serde_json::ser::CharEscape::*;
let s = match char_escape {
Quote => b"\\\"" as &[u8],
ReverseSolidus => b"\\\\",
Solidus => b"/", // Note: this doesn't need to be escaped (and appears unused in serde_json).
Backspace => b"\\b",
FormFeed => b"\\f",
LineFeed => b"\\n",
CarriageReturn => b"\\r",
Tab => b"\\t",
AsciiControl(byte) => {
static HEX_DIGITS: [u8; 16] = *b"0123456789abcdef";
let bytes = &[
b'\\',
b'u',
b'0',
b'0',
HEX_DIGITS[(byte >> 4) as usize],
HEX_DIGITS[(byte & 0xF) as usize],
];
return writer.write_all(bytes);
}
};
writer.write_all(s)
}
}
/// A JSON serializer that outputs [Canonical
/// JSON](https://matrix.org/docs/spec/appendices#canonical-json).
pub struct CanonicalSerializer<W> {
inner: Serializer<W, CanonicalFormatter>,
options: CanonicalizationOptions,
}
impl<W> CanonicalSerializer<W>
where
W: Write,
{
/// Create a new serializer that writes the canonical JSON bytes to the
/// given writer.
pub fn new(writer: W, options: CanonicalizationOptions) -> Self {
Self {
inner: Serializer::with_formatter(writer, CanonicalFormatter),
options,
}
}
}
// We implement the serializer by proxying all calls to the standard
// `serde_json` serializer, except where we a) buffer up maps and structs so that we can
// sort them, and b) ensure that all numbers are integers in the valid range.
impl<'a, W> serde::Serializer for &'a mut CanonicalSerializer<W>
where
W: Write,
{
type Ok = <&'a mut Serializer<W, CanonicalFormatter> as serde::Serializer>::Ok;
type Error = <&'a mut Serializer<W, CanonicalFormatter> as serde::Serializer>::Error;
type SerializeSeq =
<&'a mut Serializer<W, CanonicalFormatter> as serde::Serializer>::SerializeSeq;
type SerializeTuple =
<&'a mut Serializer<W, CanonicalFormatter> as serde::Serializer>::SerializeTuple;
type SerializeTupleStruct =
<&'a mut Serializer<W, CanonicalFormatter> as serde::Serializer>::SerializeTupleStruct;
type SerializeTupleVariant =
<&'a mut Serializer<W, CanonicalFormatter> as serde::Serializer>::SerializeTupleVariant;
type SerializeMap = CanonicalSerializeMap<'a, W>;
type SerializeStruct = CanonicalSerializeMap<'a, W>;
type SerializeStructVariant =
<&'a mut Serializer<W, CanonicalFormatter> as serde::Serializer>::SerializeStructVariant;
fn serialize_bool(self, v: bool) -> Result<Self::Ok, Self::Error> {
self.inner.serialize_bool(v)
}
fn serialize_i8(self, v: i8) -> Result<Self::Ok, Self::Error> {
assert_integer_in_range(v)?;
self.inner.serialize_i8(v)
}
fn serialize_i16(self, v: i16) -> Result<Self::Ok, Self::Error> {
assert_integer_in_range(v)?;
self.inner.serialize_i16(v)
}
fn serialize_i32(self, v: i32) -> Result<Self::Ok, Self::Error> {
assert_integer_in_range(v)?;
self.inner.serialize_i32(v)
}
fn serialize_i64(self, v: i64) -> Result<Self::Ok, Self::Error> {
if self.options.enforce_int_range {
assert_integer_in_range(v)?;
}
self.inner.serialize_i64(v)
}
fn serialize_i128(self, v: i128) -> Result<Self::Ok, Self::Error> {
if self.options.enforce_int_range {
assert_integer_in_range(v)?;
}
self.inner.serialize_i128(v)
}
fn serialize_u8(self, v: u8) -> Result<Self::Ok, Self::Error> {
assert_integer_in_range(v)?;
self.inner.serialize_u8(v)
}
fn serialize_u16(self, v: u16) -> Result<Self::Ok, Self::Error> {
self.inner.serialize_u16(v)
}
fn serialize_u32(self, v: u32) -> Result<Self::Ok, Self::Error> {
assert_integer_in_range(v)?;
self.inner.serialize_u32(v)
}
fn serialize_u64(self, v: u64) -> Result<Self::Ok, Self::Error> {
if self.options.enforce_int_range {
assert_integer_in_range(v)?;
}
self.inner.serialize_u64(v)
}
fn serialize_u128(self, v: u128) -> Result<Self::Ok, Self::Error> {
if self.options.enforce_int_range {
assert_integer_in_range(v)?;
}
self.inner.serialize_u128(v)
}
fn serialize_f32(self, _: f32) -> Result<Self::Ok, Self::Error> {
Err(serde_json::Error::custom(
"non-integer numbers are not allowed",
))
}
fn serialize_f64(self, _: f64) -> Result<Self::Ok, Self::Error> {
Err(serde_json::Error::custom(
"non-integer numbers are not allowed",
))
}
fn serialize_char(self, v: char) -> Result<Self::Ok, Self::Error> {
self.inner.serialize_char(v)
}
fn serialize_str(self, v: &str) -> Result<Self::Ok, Self::Error> {
self.inner.serialize_str(v)
}
fn serialize_bytes(self, v: &[u8]) -> Result<Self::Ok, Self::Error> {
self.inner.serialize_bytes(v)
}
fn serialize_none(self) -> Result<Self::Ok, Self::Error> {
self.inner.serialize_none()
}
fn serialize_some<T>(self, value: &T) -> Result<Self::Ok, Self::Error>
where
T: serde::Serialize + ?Sized,
{
self.inner.serialize_some(value)
}
fn serialize_unit(self) -> Result<Self::Ok, Self::Error> {
self.inner.serialize_unit()
}
fn serialize_unit_struct(self, name: &'static str) -> Result<Self::Ok, Self::Error> {
self.inner.serialize_unit_struct(name)
}
fn serialize_unit_variant(
self,
name: &'static str,
variant_index: u32,
variant: &'static str,
) -> Result<Self::Ok, Self::Error> {
self.inner
.serialize_unit_variant(name, variant_index, variant)
}
fn serialize_newtype_struct<T>(
self,
name: &'static str,
value: &T,
) -> Result<Self::Ok, Self::Error>
where
T: serde::Serialize + ?Sized,
{
self.inner.serialize_newtype_struct(name, value)
}
fn serialize_newtype_variant<T>(
self,
name: &'static str,
variant_index: u32,
variant: &'static str,
value: &T,
) -> Result<Self::Ok, Self::Error>
where
T: serde::Serialize + ?Sized,
{
self.inner
.serialize_newtype_variant(name, variant_index, variant, value)
}
fn serialize_seq(self, len: Option<usize>) -> Result<Self::SerializeSeq, Self::Error> {
self.inner.serialize_seq(len)
}
fn serialize_tuple(self, len: usize) -> Result<Self::SerializeTuple, Self::Error> {
self.inner.serialize_tuple(len)
}
fn serialize_tuple_struct(
self,
name: &'static str,
len: usize,
) -> Result<Self::SerializeTupleStruct, Self::Error> {
self.inner.serialize_tuple_struct(name, len)
}
fn serialize_tuple_variant(
self,
name: &'static str,
variant_index: u32,
variant: &'static str,
len: usize,
) -> Result<Self::SerializeTupleVariant, Self::Error> {
self.inner
.serialize_tuple_variant(name, variant_index, variant, len)
}
fn serialize_map(self, _len: Option<usize>) -> Result<Self::SerializeMap, Self::Error> {
Ok(CanonicalSerializeMap::new(
&mut self.inner,
self.options.clone(),
))
}
fn serialize_struct(
self,
name: &'static str,
_len: usize,
) -> Result<Self::SerializeStruct, Self::Error> {
// We want to disallow `RawValue` as we don't know if its contents is
// canonical JSON.
//
// Note: the `name` here comes from `serde_json::raw::TOKEN`, which
// unfortunately isn't exported by the crate.
if name == "$serde_json::private::RawValue" {
return Err(Self::Error::custom("`RawValue` is not supported"));
}
Ok(CanonicalSerializeMap::new(
&mut self.inner,
self.options.clone(),
))
}
fn serialize_struct_variant(
self,
name: &'static str,
variant_index: u32,
variant: &'static str,
len: usize,
) -> Result<Self::SerializeStructVariant, Self::Error> {
self.inner
.serialize_struct_variant(name, variant_index, variant, len)
}
fn collect_str<T>(self, value: &T) -> Result<Self::Ok, Self::Error>
where
T: std::fmt::Display + ?Sized,
{
self.inner.collect_str(value)
}
}
/// A helper type for [`CanonicalSerializer`] that serializes JSON maps in
/// lexicographic order.
#[doc(hidden)]
pub struct CanonicalSerializeMap<'a, W> {
// We buffer up the key and serialized value for each field we see.
// The BTreeMap will then serialize in lexicographic order.
map: BTreeMap<String, Box<RawValue>>,
// A key which we're still waiting for a value for
last_key: Option<String>,
// The serializer to use to write the sorted map too.
ser: &'a mut Serializer<W, CanonicalFormatter>,
options: CanonicalizationOptions,
}
impl<'a, W> CanonicalSerializeMap<'a, W> {
fn new(
ser: &'a mut Serializer<W, CanonicalFormatter>,
options: CanonicalizationOptions,
) -> Self {
Self {
map: BTreeMap::new(),
last_key: None,
ser,
options,
}
}
}
impl<'a, W> SerializeMap for CanonicalSerializeMap<'a, W>
where
W: Write,
{
type Ok = ();
type Error = serde_json::Error;
fn serialize_key<T>(&mut self, key: &T) -> Result<(), Self::Error>
where
T: serde::Serialize + ?Sized,
{
if self.last_key.is_some() {
// This can only happen if `serialize_key` is called multiple times
// in a row without a `serialize_value` call in between. This
// violates the contract of `SerializeMap`.
return Err(Self::Error::custom(
"serialize_key called multiple times in a row without serialize_value",
));
}
// Parse the `key` into a string.
let key_string = if let Value::String(str) = serde_json::to_value(key)? {
str
} else {
return Err(Self::Error::custom("key must be a string"));
};
self.last_key = Some(key_string);
Ok(())
}
fn serialize_value<T>(&mut self, value: &T) -> Result<(), Self::Error>
where
T: serde::Serialize + ?Sized,
{
let key_string = if let Some(key_string) = self.last_key.take() {
key_string
} else {
// `serde` should ensure that for every `serialize_key` there is a
// `serialize_field` call, so `last_key` should never be None here.
unreachable!()
};
// We serialize the value canonically, then store it as a `RawValue` in
// the buffer map.
let value_string = to_string_canonical(value, self.options.clone())?;
self.map
.insert(key_string, RawValue::from_string(value_string)?);
Ok(())
}
fn end(self) -> Result<Self::Ok, Self::Error> {
// No more entries in the map being serialized, so we can now serialize
// our buffered map (which will be serialized in the correct order as
// its a BTreeMap).
self.map.serialize(self.ser)?;
Ok(())
}
}
impl<'a, W> SerializeStruct for CanonicalSerializeMap<'a, W>
where
W: Write,
{
type Ok = ();
type Error = serde_json::Error;
fn serialize_field<T>(&mut self, key: &'static str, value: &T) -> Result<(), Self::Error>
where
T: Serialize + ?Sized,
{
let key_string = key.to_string();
// We serialize the value canonically, then store it as a `RawValue` in
// the buffer map.
let value_string = to_string_canonical(value, self.options.clone())?;
self.map
.insert(key_string, RawValue::from_string(value_string)?);
Ok(())
}
fn end(self) -> Result<Self::Ok, Self::Error> {
self.map.serialize(self.ser)?;
Ok(())
}
}
#[cfg(test)]
mod tests {
use std::collections::HashMap;
use itertools::Itertools;
use serde::Serializer;
use serde_json::json;
use super::*;
#[test]
fn empty() {
let test = json!({});
let json_string = to_string_canonical(&test, CanonicalizationOptions::strict()).unwrap();
assert_eq!(json_string, r#"{}"#);
}
#[test]
fn order_struct_fields() {
#[derive(Serialize)]
struct Test {
b: u8,
a: u8,
}
let test = Test { b: 1, a: 2 };
let json_string = to_string_canonical(&test, CanonicalizationOptions::strict()).unwrap();
assert_eq!(json_string, r#"{"a":2,"b":1}"#);
}
#[test]
fn strings() {
let test = json!({
"a": "\u{1F37B}",
"b": "\n",
"c": "\x01",
});
let json_string = to_string_canonical(&test, CanonicalizationOptions::strict()).unwrap();
assert_eq!(json_string, r#"{"a":"🍻","b":"\n","c":"\u0001"}"#);
}
#[test]
fn escapes() {
let mut buffer;
let mut char_buffer = [0u8; 4];
// Ensure that we encode every UTF-8 character correctly
for c in '\0'..='\u{10FFFF}' {
// Serialize the character and strip out the quotes to make comparison easier.
let json_string = to_string_canonical(&c, CanonicalizationOptions::strict()).unwrap();
let unquoted_json_string = &json_string[1..json_string.len() - 1];
let expected = match c {
// Some control characters have specific escape codes.
'\x08' => r"\b",
'\x09' => r"\t",
'\x0A' => r"\n",
'\x0C' => r"\f",
'\x0D' => r"\r",
'\x22' => r#"\""#,
'\x5C' => r"\\",
// Otherwise any character less than \x1F gets escaped as
// `\u00xx`
'\0'..='\x1F' => {
buffer = format!(r"\u00{:02x}", c as u32);
&buffer
}
// And everything else doesn't get escaped
_ => c.encode_utf8(&mut char_buffer),
};
// The serialized character will be wrapped in quotes.
assert_eq!(unquoted_json_string, expected);
}
}
#[test]
fn nested_map() {
let test = json!({
"a": {"b": 1}
});
let json_string = to_string_canonical(&test, CanonicalizationOptions::strict()).unwrap();
assert_eq!(json_string, r#"{"a":{"b":1}}"#);
}
#[test]
fn floats() {
assert!(to_string_canonical(&100.0f32, CanonicalizationOptions::strict()).is_err());
assert!(to_string_canonical(&100.0f64, CanonicalizationOptions::strict()).is_err());
}
#[test]
fn integers() {
assert_eq!(
to_string_canonical(&100u8, CanonicalizationOptions::strict()).unwrap(),
"100"
);
assert_eq!(
to_string_canonical(&100u16, CanonicalizationOptions::strict()).unwrap(),
"100"
);
assert_eq!(
to_string_canonical(&100u32, CanonicalizationOptions::strict()).unwrap(),
"100"
);
assert_eq!(
to_string_canonical(&100u64, CanonicalizationOptions::strict()).unwrap(),
"100"
);
assert_eq!(
to_string_canonical(&100u128, CanonicalizationOptions::strict()).unwrap(),
"100"
);
assert_eq!(
to_string_canonical(&100i8, CanonicalizationOptions::strict()).unwrap(),
"100"
);
assert_eq!(
to_string_canonical(&100i16, CanonicalizationOptions::strict()).unwrap(),
"100"
);
assert_eq!(
to_string_canonical(&100i32, CanonicalizationOptions::strict()).unwrap(),
"100"
);
assert_eq!(
to_string_canonical(&100i64, CanonicalizationOptions::strict()).unwrap(),
"100"
);
assert_eq!(
to_string_canonical(&100i128, CanonicalizationOptions::strict()).unwrap(),
"100"
);
assert!(to_string_canonical(&2u64.pow(60), CanonicalizationOptions::strict()).is_err());
assert!(to_string_canonical(&2u128.pow(60), CanonicalizationOptions::strict()).is_err());
assert!(to_string_canonical(&2i64.pow(60), CanonicalizationOptions::strict()).is_err());
assert!(to_string_canonical(&2i128.pow(60), CanonicalizationOptions::strict()).is_err());
assert!(to_string_canonical(&-(2i64.pow(60)), CanonicalizationOptions::strict()).is_err());
assert!(to_string_canonical(&-(2i128.pow(60)), CanonicalizationOptions::strict()).is_err());
}
#[test]
fn backwards_compatibility() {
assert_eq!(
to_string_canonical(&u64::MAX, CanonicalizationOptions::relaxed()).unwrap(),
format!("{}", u64::MAX)
);
assert_eq!(
to_string_canonical(&u128::MAX, CanonicalizationOptions::relaxed()).unwrap(),
format!("{}", u128::MAX)
);
assert_eq!(
to_string_canonical(&i128::MAX, CanonicalizationOptions::relaxed()).unwrap(),
format!("{}", i128::MAX)
);
assert_eq!(
to_string_canonical(&-i128::MAX, CanonicalizationOptions::relaxed()).unwrap(),
format!("{}", -i128::MAX)
);
}
#[test]
fn hashmap_order() {
let mut test = HashMap::new();
test.insert("e", 1);
test.insert("d", 1);
test.insert("c", 1);
test.insert("b", 1);
test.insert("a", 1);
test.insert("AA", 1);
let json_string = to_string_canonical(&test, CanonicalizationOptions::strict()).unwrap();
assert_eq!(json_string, r#"{"AA":1,"a":1,"b":1,"c":1,"d":1,"e":1}"#);
}
#[test]
fn raw_value() {
let raw_value = RawValue::from_string("{}".to_string()).unwrap();
assert!(to_string_canonical(&raw_value, CanonicalizationOptions::strict()).is_err());
}
#[test]
fn map_with_duplicate_keys() {
let mut output = Vec::new();
let mut serializer =
CanonicalSerializer::new(&mut output, CanonicalizationOptions::strict());
let mut map_serializer = serializer.serialize_map(None).unwrap();
map_serializer.serialize_entry("a", &1).unwrap();
map_serializer.serialize_entry("a", &2).unwrap();
// Also try with different representations of the same key (e.g. `\t` and `\u{0009}`).
map_serializer.serialize_entry("\t", &2).unwrap();
map_serializer.serialize_entry("\u{0009}", &2).unwrap();
SerializeMap::end(map_serializer).unwrap();
assert_eq!(String::from_utf8(output).unwrap(), r#"{"\t":2,"a":2}"#);
}
#[test]
fn map_with_out_of_order_keys() {
let mut output = Vec::new();
let mut serializer =
CanonicalSerializer::new(&mut output, CanonicalizationOptions::strict());
let mut map_serializer = serializer.serialize_map(None).unwrap();
// An ordered list of keys to insert, and the expected way they should be serialized.
let ascii_order = [
('\0', r"\u0000"),
('\t', r"\t"),
(' ', r" "),
('!', r"!"),
('"', r#"\""#),
('&', r"&"),
('A', r"A"),
('\\', r"\\"),
('a', r"a"),
('🍻', r"🍻"),
];
// Double check that the keys are in the expected order.
assert!(ascii_order.is_sorted_by_key(|(c, _)| u32::from(*c)));
// Serialize the keys in the reverse order.
for (c, _) in ascii_order.iter().rev() {
map_serializer.serialize_entry(c.into(), &1).unwrap();
}
SerializeMap::end(map_serializer).unwrap();
// The expected JSON should have the keys in the correct order, and the
// correct escaping.
let expected_json_inner = ascii_order
.iter()
.map(|(_, escaped)| format!(r#""{escaped}":1"#))
.join(",");
let expected_json = r"{".to_owned() + &expected_json_inner + r"}";
assert_eq!(String::from_utf8(output).unwrap(), expected_json);
}
}
+1
View File
@@ -5,6 +5,7 @@ use pyo3::prelude::*;
use pyo3_log::ResetHandle;
pub mod acl;
pub mod canonical_json;
pub mod duration;
pub mod errors;
pub mod events;
+4
View File
@@ -94,6 +94,10 @@ properties:
Determines whether we should freeze the internal dict object in
`FrozenEvent`. Freezing prevents bugs where we accidentally share e.g.
signature dicts. However, freezing a dict is expensive.
> ⚠️ **Warning** This option is known to introduce a new class of [comparison
bugs](https://github.com/element-hq/synapse/issues/18117) in Synapse.
default: false
examples:
- true
+3
View File
@@ -48,6 +48,9 @@ class ApiConfig(Config):
self, config: JsonDict
) -> Iterable[tuple[str, str | None]]:
"""Get the event types and state keys to include in the prejoin state."""
# MSC4311: the create event must always be included in invite/knock state.
yield EventTypes.Create, ""
room_prejoin_state_config = config.get("room_prejoin_state") or {}
# backwards-compatibility support for room_invite_state_types
-3
View File
@@ -386,9 +386,6 @@ class ExperimentalConfig(Config):
# MSC3814 (dehydrated devices with SSSS)
self.msc3814_enabled: bool = experimental.get("msc3814_enabled", False)
# MSC3266 (room summary api)
self.msc3266_enabled: bool = experimental.get("msc3266_enabled", False)
# MSC2409 (this setting only relates to optionally sending to-device messages).
# Presence, typing and read receipt EDUs are already sent to application services that
# have opted in to receive them. If enabled, this adds to-device messages to that list.
+4
View File
@@ -63,6 +63,10 @@ dict to frozen_dicts is expensive.
NOTE: This is overridden by the configuration by the Synapse worker apps, but
for the sake of tests, it is set here because it cannot be configured on the
homeserver object itself.
FIXME: Because of how this option works (changing the underlying types), it causes
subtle downstream bugs that makes type comparisons brittle, tracked by
https://github.com/element-hq/synapse/issues/18117
"""
T = TypeVar("T")
+51 -1
View File
@@ -20,6 +20,7 @@
# [This file includes modifications made by New Vector Limited]
#
#
import copy
import logging
import random
from typing import (
@@ -568,9 +569,58 @@ class FederationServer(FederationBase):
origin=origin,
destination=self.server_name,
edu_type=edu_dict["edu_type"],
content=edu_dict["content"],
# Make a deep-copy as we mutate the content down below
content=copy.deepcopy(edu_dict["content"]),
)
try:
# Server ACL's apply to `EduTypes.TYPING` per MSC4163:
#
# > For typing notifications (m.typing), the room_id field inside
# > content should be checked, with the typing notification ignored if
# > the origin of the request is a server which is forbidden by the
# > room's ACL. Ignoring the typing notification means that the EDU
# > MUST be dropped upon receipt.
if edu.edu_type == EduTypes.TYPING:
origin_host, _ = parse_server_name(origin)
room_id = edu.content["room_id"]
try:
await self.check_server_matches_acl(origin_host, room_id)
except AuthError:
logger.warning(
"Ignoring typing EDU for room %s from banned server because of ACL's",
room_id,
)
return
# Server ACL's apply to `EduTypes.RECEIPT` per MSC4163:
#
# > For read receipts (m.receipt), all receipts inside a room_id
# > inside content should be ignored if the origin of the request is
# > forbidden by the room's ACL.
if edu.edu_type == EduTypes.RECEIPT:
origin_host, _ = parse_server_name(origin)
to_remove_room_ids = set()
for room_id in edu.content.keys():
try:
await self.check_server_matches_acl(origin_host, room_id)
except AuthError:
to_remove_room_ids.add(room_id)
if to_remove_room_ids:
logger.warning(
"Ignoring receipts in EDU for rooms %s from banned server %s because of ACL's",
to_remove_room_ids,
origin_host,
)
for room_id in to_remove_room_ids:
edu.content.pop(room_id)
if not edu.content:
# If we've removed all the rooms, we can just ignore the whole EDU
return
await self.registry.on_edu(edu.edu_type, origin, edu.content)
except Exception:
# If there was an error handling the EDU, we must reject the
+16 -4
View File
@@ -105,6 +105,12 @@ backfill_processing_before_timer = Histogram(
)
NUMBER_OF_EVENTS_TO_BACKFILL = 100
"""
The number of events we try to backfill from other servers in a single request.
"""
# TODO: We can refactor this away now that there is only one backfill point again
class _BackfillPointType(Enum):
# a regular backwards extremity (ie, an event which we don't yet have, but which
@@ -255,7 +261,9 @@ class FederationHandler:
_BackfillPoint(event_id, depth, _BackfillPointType.BACKWARDS_EXTREMITY)
for event_id, depth in await self.store.get_backfill_points_in_room(
room_id=room_id,
current_depth=current_depth,
# Per the docstring, it's best to pad the `current_depth` by the
# number of messages you plan to backfill from these points.
nearby_depth=current_depth + NUMBER_OF_EVENTS_TO_BACKFILL,
# We only need to end up with 5 extremities combined with the
# insertion event extremities to make the `/backfill` request
# but fetch an order of magnitude more to make sure there is
@@ -299,12 +307,13 @@ class FederationHandler:
# likely not to return anything relevant so we backfill in the background. The
# only way, this could return something relevant is if we discover a new branch
# of history that extends all the way back to where we are currently paginating
# and it's within the 100 events that are returned from `/backfill`.
# and it's within the `NUMBER_OF_EVENTS_TO_BACKFILL` events that are returned
# from `/backfill`.
if not sorted_backfill_points and current_depth != MAX_DEPTH:
# Check that we actually have later backfill points, if not just return.
have_later_backfill_points = await self.store.get_backfill_points_in_room(
room_id=room_id,
current_depth=MAX_DEPTH,
nearby_depth=MAX_DEPTH,
limit=1,
)
if not have_later_backfill_points:
@@ -464,7 +473,10 @@ class FederationHandler:
try:
await self._federation_event_handler.backfill(
dom, room_id, limit=100, extremities=extremities_to_request
dom,
room_id,
limit=NUMBER_OF_EVENTS_TO_BACKFILL,
extremities=extremities_to_request,
)
# If this succeeded then we probably already have the
# appropriate stuff.
-22
View File
@@ -406,28 +406,6 @@ class ProfileHandler:
# have it.
raise AuthError(400, "Cannot remove another user's profile")
if not by_admin:
current_profile = await self.store.get_profileinfo(target_user)
if not self.hs.config.registration.enable_set_displayname:
if current_profile.display_name:
# SUSPICIOUS: It seems strange to block deactivation on this,
# though this is preserving previous behaviour.
raise SynapseError(
400,
"Changing display name is disabled on this server",
Codes.FORBIDDEN,
)
if not self.hs.config.registration.enable_set_avatar_url:
if current_profile.avatar_url:
# SUSPICIOUS: It seems strange to block deactivation on this,
# though this is preserving previous behaviour.
raise SynapseError(
400,
"Changing avatar is disabled on this server",
Codes.FORBIDDEN,
)
await self.store.delete_profile(target_user)
await self._third_party_rules.on_profile_update(
+27 -11
View File
@@ -27,6 +27,7 @@ import math
import random
import string
from collections import OrderedDict
from collections.abc import Mapping
from http import HTTPStatus
from typing import (
TYPE_CHECKING,
@@ -67,7 +68,11 @@ from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion
from synapse.event_auth import validate_event_for_room_version
from synapse.events import EventBase, event_exists_in_state_dag
from synapse.events.snapshot import UnpersistedEventContext
from synapse.events.utils import FilteredEvent, copy_and_fixup_power_levels_contents
from synapse.events.utils import (
FilteredEvent,
PowerLevelsContent,
copy_and_fixup_power_levels_contents,
)
from synapse.handlers.relations import BundledAggregations
from synapse.rest.admin._base import assert_user_is_admin
from synapse.streams import EventSource
@@ -500,10 +505,12 @@ class RoomCreationHandler:
except AuthError as e:
logger.warning("Unable to update PLs in old room: %s", e)
power_levels_content: JsonMapping = old_room_pl_state.content
new_room_version = await self.store.get_room_version(new_room_id)
if new_room_version.msc4289_creator_power_enabled:
self._remove_creators_from_pl_users_map(
old_room_pl_state.content.get("users", {}),
power_levels_content = self._copy_and_remove_creators_from_pl_users_map(
power_levels_content,
requester.user.to_string(),
additional_creators,
)
@@ -515,9 +522,7 @@ class RoomCreationHandler:
"state_key": "",
"room_id": new_room_id,
"sender": requester.user.to_string(),
"content": copy_and_fixup_power_levels_contents(
old_room_pl_state.content
),
"content": copy_and_fixup_power_levels_contents(power_levels_content),
},
ratelimit=False,
)
@@ -686,11 +691,12 @@ class RoomCreationHandler:
if new_room_version.msc4289_creator_power_enabled:
# the creator(s) cannot be in the users map
self._remove_creators_from_pl_users_map(
user_power_levels,
fixed_power_levels = self._copy_and_remove_creators_from_pl_users_map(
power_levels,
user_id,
additional_creators,
)
initial_state[(EventTypes.PowerLevels, "")] = fixed_power_levels
# We construct a subset of what the body of a call to /createRoom would look like
# for passing to the spam checker. We don't include a preset here, as we expect the
@@ -1829,12 +1835,19 @@ class RoomCreationHandler:
)
return preset_name, preset_config
def _remove_creators_from_pl_users_map(
def _copy_and_remove_creators_from_pl_users_map(
self,
users_map: dict[str, int],
power_levels_content: PowerLevelsContent,
creator: str,
additional_creators: list[str] | None,
) -> None:
) -> PowerLevelsContent:
users_map = power_levels_content.get("users", {})
if not users_map:
return power_levels_content
assert isinstance(users_map, Mapping)
users_map = dict(users_map)
creators = [creator]
if additional_creators:
creators.extend(additional_creators)
@@ -1842,6 +1855,9 @@ class RoomCreationHandler:
# the creator(s) cannot be in the users map
users_map.pop(creator, None)
power_levels_content = {**power_levels_content, "users": users_map}
return power_levels_content
def _generate_room_id(self) -> str:
"""Generates a random room ID.
+28 -24
View File
@@ -167,34 +167,38 @@ class SlidingSyncHandler:
timeout_ms -= after_wait_ts - before_wait_ts
timeout_ms = max(timeout_ms, 0)
# We're going to respond immediately if the timeout is 0 or if this is an
# initial sync (without a `from_token`) so we can avoid calling
# `notifier.wait_for_events()`.
if timeout_ms == 0 or from_token is None:
now_token = self.event_sources.get_current_token()
result = await self.current_sync_for_user(
# Compute a response immediately. We always need to do this before
# waiting for new data (unlike in /v3/sync), as the request config might
# have changed (e.g. new room subscriptions, etc).
now_token = self.event_sources.get_current_token()
result = await self.current_sync_for_user(
sync_config,
from_token=from_token,
to_token=now_token,
)
# Return immediately if we have a result, the timeout is 0, or this is
# an initial sync.
if result or timeout_ms == 0 or from_token is None:
return result, did_wait
# Otherwise, we wait for something to happen and report it to the user.
async def current_sync_callback(
before_token: StreamToken, after_token: StreamToken
) -> SlidingSyncResult:
return await self.current_sync_for_user(
sync_config,
from_token=from_token,
to_token=now_token,
to_token=after_token,
)
else:
# Otherwise, we wait for something to happen and report it to the user.
async def current_sync_callback(
before_token: StreamToken, after_token: StreamToken
) -> SlidingSyncResult:
return await self.current_sync_for_user(
sync_config,
from_token=from_token,
to_token=after_token,
)
result = await self.notifier.wait_for_events(
sync_config.user.to_string(),
timeout_ms,
current_sync_callback,
from_token=from_token.stream_token,
)
did_wait = True
result = await self.notifier.wait_for_events(
sync_config.user.to_string(),
timeout_ms,
current_sync_callback,
from_token=now_token,
)
did_wait = True
return result, did_wait
+8 -4
View File
@@ -852,11 +852,15 @@ class SlidingSyncRoomLists:
previous_connection_state.room_configs.get(room_id)
)
if prev_room_sync_config is not None:
# Always include rooms whose timeline limit has increased.
# (see the "XXX: Odd behavior" described below)
# Always include rooms whose effective config has
# expanded. This covers timeline-limit increases and
# required-state additions introduced by room
# subscriptions overriding list-derived params.
if (
prev_room_sync_config.timeline_limit
< room_config.timeline_limit
prev_room_sync_config.combine_room_sync_config(
room_config
)
!= prev_room_sync_config
):
rooms_should_send.add(room_id)
continue
+10
View File
@@ -367,6 +367,7 @@ class RoomRestServlet(RestServlet):
self.store = hs.get_datastores().main
self.room_shutdown_handler = hs.get_room_shutdown_handler()
self.pagination_handler = hs.get_pagination_handler()
self._storage_controllers = hs.get_storage_controllers()
async def on_GET(
self, request: SynapseRequest, room_id: str
@@ -383,6 +384,15 @@ class RoomRestServlet(RestServlet):
members
)
result["forgotten"] = await self.store.is_locally_forgotten_room(room_id)
tombstone_event = await self._storage_controllers.state.get_current_state_event(
room_id,
EventTypes.Tombstone,
"",
)
result["tombstoned"] = tombstone_event is not None
result["replacement_room"] = (
tombstone_event.content.get("replacement_room") if tombstone_event else None
)
return HTTPStatus.OK, result
+5 -4
View File
@@ -1718,16 +1718,18 @@ class RoomHierarchyRestServlet(RestServlet):
class RoomSummaryRestServlet(ResolveRoomIdMixin, RestServlet):
PATTERNS = (
# deprecated endpoint, to be removed
# deprecated unstable endpoint, to be removed
re.compile(
"^/_matrix/client/unstable/im.nheko.summary"
"/rooms/(?P<room_identifier>[^/]*)/summary$"
),
# recommended endpoint
# recommended unstable endpoint
re.compile(
"^/_matrix/client/unstable/im.nheko.summary"
"/summary/(?P<room_identifier>[^/]*)$"
),
# stable endpoint
re.compile("^/_matrix/client/v1/room_summary/(?P<room_identifier>[^/]*)$"),
)
CATEGORY = "Client API requests"
@@ -1775,8 +1777,7 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
RoomTypingRestServlet(hs).register(http_server)
RoomEventContextServlet(hs).register(http_server)
RoomHierarchyRestServlet(hs).register(http_server)
if hs.config.experimental.msc3266_enabled:
RoomSummaryRestServlet(hs).register(http_server)
RoomSummaryRestServlet(hs).register(http_server)
RoomEventServlet(hs).register(http_server)
JoinedRoomsRestServlet(hs).register(http_server)
RoomAliasListServlet(hs).register(http_server)
@@ -1199,34 +1199,72 @@ class EventFederationWorkerStore(
async def get_backfill_points_in_room(
self,
room_id: str,
current_depth: int,
nearby_depth: int,
limit: int,
) -> list[tuple[str, int]]:
"""
Get the backward extremities to backfill from in the room along with the
approximate depth.
Only returns events that are at a depth lower than or
equal to the `current_depth`. Sorted by depth, highest to lowest (descending)
so the closest events to the `current_depth` are first in the list.
Only returns events that are at a depth lower than or equal to the `nearby_depth`.
Sorted by depth, highest to lowest (descending) so the closest events to the
`nearby_depth` are first in the list.
We ignore extremities that are newer than the user's current scroll position
(ie, those with depth greater than `current_depth`) as:
1. we don't really care about getting events that have happened
after our current position; and
2. by the nature of paginating and scrolling back, we have likely
previously tried and failed to backfill from that extremity, so
to avoid getting "stuck" requesting the same backfill repeatedly
we drop those extremities.
### Why `nearby_depth`?
We find backfill points from the backward extremities in the DAG. Backward
extremities are the oldest events we know of in the room but we only know of
them because some other event referenced them by prev_event and aren't persisted
in our database yet (meaning we don't know their depth specifically). So we can
only do approximate depth comparisons (use the depth of the known events they're
connected to). And we don't know if those backward extremities point to a long
chain/fork of history that could stretch back far enough to be visible.
This means a naive homeserver implementation that looks for backward extremities <=
depth of the `/messages?dir=b&from=xxx` token may overlook a backfill point that could
reveal more history in the window the user is currently paginating in.
We consider "nearby" as anything within range of the number of events you plan
to backfill from the given backfill point. This is a good heuristic as since we
plan to backfill N events, the chain of events from a backfill point could
extend back into the visible window.
Example:
- Your pagination token represents a scroll position at a depth of `100`.
- We have a backfill point at an approximate depth of `125`
- You plan to backfill `50` events from that backfill point.
When we pad the token `depth` with the number of messages we plan to backfill,
`100` + `50` = `150`, we find the backfill point at `125` (because <= `150`, our
`nearby_depth`), backfill `50` events to a depth of `75` in the timeline
(exposing new events that we can return `100` -> `75`).
When we don't pad our token `depth`, `100` is lower than any of the backfill
points so we don't pick any and miss out on backfilling any events. Without
something like MSC3871 to indicate gaps in the timeline, clients will most
likely never know they are missing any events and never try to paginate again.
Generally though, we ignore extremities that are newer than the user's current
scroll position (ie, those with depth greater than `nearby_depth`) as:
1. we don't really care about getting events that have happened after our
current position; and
2. by the nature of paginating and scrolling back, we have likely previously
tried and failed to backfill from that extremity, so to avoid getting
"stuck" requesting the same backfill repeatedly we drop those
extremities. Although we also have `event_failed_pull_attempts` nowadays
to backoff as well.
Args:
room_id: Room where we want to find the oldest events
current_depth: The depth at the user's current scrollback position
nearby_depth: Typically, this is depth at the user's current scrollback
position + the number of events you plan to backfill from these backfill
points.
limit: The max number of backfill points to return
Returns:
List of (event_id, depth) tuples. Sorted by depth, highest to lowest
(descending) so the closest events to the `current_depth` are first
(descending) so the closest events to the `nearby_depth` are first
in the list.
"""
@@ -1234,12 +1272,12 @@ class EventFederationWorkerStore(
txn: LoggingTransaction, room_id: str
) -> list[tuple[str, int]]:
# Assemble a tuple lookup of event_id -> depth for the oldest events
# we know of in the room. Backwards extremeties are the oldest
# we know of in the room. Backwards extremities are the oldest
# events we know of in the room but we only know of them because
# some other event referenced them by prev_event and aren't
# persisted in our database yet (meaning we don't know their depth
# specifically). So we need to look for the approximate depth from
# the events connected to the current backwards extremeties.
# the events connected to the current backwards extremities.
if isinstance(self.database_engine, PostgresEngine):
least_function = "LEAST"
@@ -1259,7 +1297,7 @@ class EventFederationWorkerStore(
ON edge.event_id = event.event_id
/**
* We find the "oldest" events in the room by looking for
* events connected to backwards extremeties (oldest events
* events connected to backwards extremities (oldest events
* in the room that we know of so far).
*/
INNER JOIN event_backward_extremities AS backward_extrem
@@ -1285,16 +1323,19 @@ class EventFederationWorkerStore(
AND edge.is_state is FALSE
/**
* We only want backwards extremities that are older than or at
* the same position of the given `current_depth` (where older
* the same position of the given `nearby_depth` (where older
* means less than the given depth) because we're looking backwards
* from the `current_depth` when backfilling.
* from the `nearby_depth` when backfilling.
*
* current_depth (ignore events that come after this, ignore 2-4)
* Keep in mind that `event.depth` is an approximate depth of the
* backward extremity itself.
*
* nearby_depth (ignore events that come after this, ignore 2-4)
* |
*
* <oldest-in-time> [0]<--[1]<--[2]<--[3]<--[4] <newest-in-time>
*/
AND event.depth <= ? /* current_depth */
AND event.depth <= ? /* nearby_depth */
/**
* Exponential back-off (up to the upper bound) so we don't retry the
* same backfill point over and over. ex. 2hr, 4hr, 8hr, 16hr, etc.
@@ -1312,7 +1353,7 @@ class EventFederationWorkerStore(
)
)
/**
* Sort from highest (closest to the `current_depth`) to the lowest depth
* Sort from highest (closest to the `nearby_depth`) to the lowest depth
* because the closest are most relevant to backfill from first.
* Then tie-break on alphabetical order of the event_ids so we get a
* consistent ordering which is nice when asserting things in tests.
@@ -1325,7 +1366,7 @@ class EventFederationWorkerStore(
sql,
(
room_id,
current_depth,
nearby_depth,
self.clock.time_msec(),
BACKFILL_EVENT_EXPONENTIAL_BACKOFF_MAXIMUM_DOUBLING_STEPS,
BACKFILL_EVENT_EXPONENTIAL_BACKOFF_STEP_MILLISECONDS,
+24 -7
View File
@@ -5,7 +5,6 @@ import yaml
from synapse.config import ConfigError
from synapse.config._base import RootConfig
from synapse.config.api import ApiConfig
from synapse.types.state import StateFilter
DEFAULT_PREJOIN_STATE_PAIRS = {
("m.room.join_rules", ""),
@@ -38,7 +37,11 @@ room_prejoin_state:
disable_default_event_types: true
"""
)
self.assertEqual(config.room_prejoin_state, StateFilter.none())
# MSC4311: m.room.create is always included even when defaults are disabled
self.assertEqual(
set(config.room_prejoin_state.concrete_types()),
{("m.room.create", "")},
)
def test_event_without_state_key(self) -> None:
config = self.read_config(
@@ -50,7 +53,11 @@ room_prejoin_state:
"""
)
self.assertEqual(config.room_prejoin_state.wildcard_types(), ["foo"])
self.assertEqual(config.room_prejoin_state.concrete_types(), [])
# MSC4311: m.room.create is always included
self.assertEqual(
set(config.room_prejoin_state.concrete_types()),
{("m.room.create", "")},
)
def test_event_with_specific_state_key(self) -> None:
config = self.read_config(
@@ -62,9 +69,10 @@ room_prejoin_state:
"""
)
self.assertFalse(config.room_prejoin_state.has_wildcards())
# MSC4311: m.room.create is always included
self.assertEqual(
set(config.room_prejoin_state.concrete_types()),
{("foo", "bar")},
{("foo", "bar"), ("m.room.create", "")},
)
def test_repeated_event_with_specific_state_key(self) -> None:
@@ -78,9 +86,10 @@ room_prejoin_state:
"""
)
self.assertFalse(config.room_prejoin_state.has_wildcards())
# MSC4311: m.room.create is always included
self.assertEqual(
set(config.room_prejoin_state.concrete_types()),
{("foo", "bar"), ("foo", "baz")},
{("foo", "bar"), ("foo", "baz"), ("m.room.create", "")},
)
def test_no_specific_state_key_overrides_specific_state_key(self) -> None:
@@ -94,7 +103,11 @@ room_prejoin_state:
"""
)
self.assertEqual(config.room_prejoin_state.wildcard_types(), ["foo"])
self.assertEqual(config.room_prejoin_state.concrete_types(), [])
# MSC4311: m.room.create is always included
self.assertEqual(
set(config.room_prejoin_state.concrete_types()),
{("m.room.create", "")},
)
config = self.read_config(
"""
@@ -106,7 +119,11 @@ room_prejoin_state:
"""
)
self.assertEqual(config.room_prejoin_state.wildcard_types(), ["foo"])
self.assertEqual(config.room_prejoin_state.concrete_types(), [])
# MSC4311: m.room.create is always included
self.assertEqual(
set(config.room_prejoin_state.concrete_types()),
{("m.room.create", "")},
)
def test_bad_event_type_entry_raises(self) -> None:
with self.assertRaises(ConfigError):
-117
View File
@@ -43,10 +43,8 @@ from synapse.rest.client import login, room
from synapse.server import HomeServer
from synapse.storage.databases.main.events_worker import EventCacheEntry
from synapse.util.clock import Clock
from synapse.util.events import generate_fake_event_id
from tests import unittest
from tests.test_utils import event_injection
logger = logging.getLogger(__name__)
@@ -213,121 +211,6 @@ class FederationTestCase(unittest.FederatingHomeserverTestCase):
self.assertEqual(sg, sg2)
def test_backfill_with_many_backward_extremities(self) -> None:
"""
Check that we can backfill with many backward extremities.
The goal is to make sure that when we only use a portion
of backwards extremities(the magic number is more than 5),
no errors are thrown.
Regression test, see https://github.com/matrix-org/synapse/pull/11027
"""
# create the room
user_id = self.register_user("kermit", "test")
tok = self.login("kermit", "test")
room_id = self.helper.create_room_as(room_creator=user_id, tok=tok)
room_version = self.get_success(self.store.get_room_version(room_id))
# we need a user on the remote server to be a member, so that we can send
# extremity-causing events.
remote_server_user_id = f"@user:{self.OTHER_SERVER_NAME}"
self.get_success(
event_injection.inject_member_event(
self.hs, room_id, remote_server_user_id, "join"
)
)
send_result = self.helper.send(room_id, "first message", tok=tok)
ev1 = self.get_success(
self.store.get_event(send_result["event_id"], allow_none=False)
)
current_state = self.get_success(
self.store.get_events_as_list(
(
self.get_success(self.store.get_partial_current_state_ids(room_id))
).values()
)
)
# Create "many" backward extremities. The magic number we're trying to
# create more than is 5 which corresponds to the number of backward
# extremities we slice off in `_maybe_backfill_inner`
federation_event_handler = self.hs.get_federation_event_handler()
auth_events = [
ev
for ev in current_state
if (ev.type, ev.state_key)
in {("m.room.create", ""), ("m.room.member", remote_server_user_id)}
]
for _ in range(8):
event = make_event_from_dict(
self.add_hashes_and_signatures_from_other_server(
{
"origin_server_ts": 1,
"type": "m.room.message",
"content": {
"msgtype": "m.text",
"body": "message connected to fake event",
},
"room_id": room_id,
"sender": remote_server_user_id,
"prev_events": [
ev1.event_id,
# We're creating an backward extremity each time thanks
# to this fake event
generate_fake_event_id(),
],
"auth_events": [ev.event_id for ev in auth_events],
"depth": ev1.depth + 1,
},
room_version,
),
room_version,
)
# we poke this directly into _process_received_pdu, to avoid the
# federation handler wanting to backfill the fake event.
state_handler = self.hs.get_state_handler()
context = self.get_success(
state_handler.compute_event_context(
event,
state_ids_before_event={
(e.type, e.state_key): e.event_id for e in current_state
},
partial_state=False,
)
)
self.get_success(
federation_event_handler._process_received_pdu(
self.OTHER_SERVER_NAME,
event,
context,
)
)
# we should now have 8 backwards extremities.
backwards_extremities = self.get_success(
self.store.db_pool.simple_select_list(
"event_backward_extremities",
keyvalues={"room_id": room_id},
retcols=["event_id"],
)
)
self.assertEqual(len(backwards_extremities), 8)
current_depth = 1
limit = 100
# Make sure backfill still works
self.get_success(
self.hs.get_federation_handler().maybe_backfill(
room_id,
current_depth,
limit,
)
)
def test_backfill_ignores_known_events(self) -> None:
"""
Tests that events that we already know about are ignored when backfilling.
+3 -4
View File
@@ -38,6 +38,7 @@ from synapse.util.duration import Duration
from synapse.util.task_scheduler import TaskStatus
from tests import unittest
from tests.unittest import override_config
class ProfileTestCase(unittest.HomeserverTestCase):
@@ -314,9 +315,8 @@ class ProfileTestCase(unittest.HomeserverTestCase):
membership[state_tuple].content["displayname"], "Frank Jr."
)
@override_config({"enable_set_displayname": False})
def test_set_my_name_if_disabled(self) -> None:
self.hs.config.registration.enable_set_displayname = False
# Setting displayname for the first time is allowed
self.get_success(self.store.set_profile_displayname(self.frank, "Frank"))
@@ -435,9 +435,8 @@ class ProfileTestCase(unittest.HomeserverTestCase):
(self.get_success(self.store.get_profile_avatar_url(self.frank))),
)
@override_config({"enable_set_avatar_url": False})
def test_set_my_avatar_if_disabled(self) -> None:
self.hs.config.registration.enable_set_avatar_url = False
# Setting displayname for the first time is allowed
self.get_success(
self.store.set_profile_avatar_url(self.frank, "http://my.server/me.png")
+4
View File
@@ -2311,10 +2311,14 @@ class RoomTestCase(unittest.HomeserverTestCase):
self.assertIn("state_events", channel.json_body)
self.assertIn("room_type", channel.json_body)
self.assertIn("forgotten", channel.json_body)
self.assertIn("tombstoned", channel.json_body)
self.assertIn("replacement_room", channel.json_body)
self.assertEqual(room_id_1, channel.json_body["room_id"])
self.assertIs(True, channel.json_body["federatable"])
self.assertIs(True, channel.json_body["public"])
self.assertIs(False, channel.json_body["tombstoned"])
self.assertIs(None, channel.json_body["replacement_room"])
def test_single_room_devices(self) -> None:
"""Test that `joined_local_devices` can be requested correctly"""
@@ -22,6 +22,7 @@ import synapse.rest.admin
from synapse.api.constants import EventTypes, HistoryVisibility
from synapse.rest.client import login, room, sync
from synapse.server import HomeServer
from synapse.types import JsonDict
from synapse.util.clock import Clock
from tests.rest.client.sliding_sync.test_sliding_sync import SlidingSyncBase
@@ -126,6 +127,124 @@ class SlidingSyncRoomSubscriptionsTestCase(SlidingSyncBase):
response_body["rooms"][room_id1],
)
def test_room_subscription_required_state_expansion_returns_immediately(
self,
) -> None:
"""
Test that adding a room subscription with stronger params than the list causes an
incremental long-poll to return immediately, even without new stream activity.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok)
sync_body: JsonDict = {
"lists": {
"foo-list": {
"ranges": [[0, 0]],
"required_state": [],
"timeline_limit": 0,
}
},
"conn_id": "conn_id",
}
_, from_token = self.do_sync(sync_body, tok=user1_tok)
sync_body["room_subscriptions"] = {
room_id1: {
"required_state": [
[EventTypes.Create, ""],
],
"timeline_limit": 0,
}
}
channel = self.make_request(
"POST",
self.sync_endpoint + f"?timeout=10000&pos={from_token}",
content=sync_body,
access_token=user1_tok,
await_result=False,
)
channel.await_result(timeout_ms=3000)
self.assertEqual(channel.code, 200, channel.json_body)
state_map = self.get_success(
self.storage_controllers.state.get_current_state(room_id1)
)
room_response = channel.json_body["rooms"][room_id1]
self.assertNotIn("initial", room_response)
self._assertRequiredStateIncludes(
room_response["required_state"],
{
state_map[(EventTypes.Create, "")],
},
exact=True,
)
def test_room_subscription_required_state_change_returns_immediately(self) -> None:
"""
Test that expanding an existing room subscription's required state causes an
incremental long-poll to return immediately, even without new stream activity.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
room_id1 = self.helper.create_room_as(
user1_id, tok=user1_tok, extra_content={"name": "Foo"}
)
sync_body: JsonDict = {
"room_subscriptions": {
room_id1: {
"required_state": [
[EventTypes.Create, ""],
],
"timeline_limit": 0,
}
},
"conn_id": "conn_id",
}
response_body, from_token = self.do_sync(sync_body, tok=user1_tok)
state_map = self.get_success(
self.storage_controllers.state.get_current_state(room_id1)
)
self._assertRequiredStateIncludes(
response_body["rooms"][room_id1]["required_state"],
{
state_map[(EventTypes.Create, "")],
},
exact=True,
)
sync_body["room_subscriptions"][room_id1]["required_state"] = [
[EventTypes.Create, ""],
[EventTypes.Name, ""],
]
channel = self.make_request(
"POST",
self.sync_endpoint + f"?timeout=10000&pos={from_token}",
content=sync_body,
access_token=user1_tok,
await_result=False,
)
channel.await_result(timeout_ms=3000)
self.assertEqual(channel.code, 200, channel.json_body)
room_response = channel.json_body["rooms"][room_id1]
self.assertNotIn("initial", room_response)
self._assertRequiredStateIncludes(
room_response["required_state"],
{
state_map[(EventTypes.Name, "")],
},
exact=True,
)
def test_room_subscriptions_with_leave_membership(self) -> None:
"""
Test `room_subscriptions` with a leave room should give us timeline and state
+130 -4
View File
@@ -31,14 +31,14 @@ from twisted.internet.testing import MemoryReactor
import synapse.rest.admin
from synapse.api.constants import LoginType, Membership
from synapse.api.errors import Codes, HttpResponseException
from synapse.api.errors import Codes, HttpResponseException, SynapseError
from synapse.appservice import ApplicationService
from synapse.rest import admin
from synapse.rest.client import account, login, register, room
from synapse.rest.synapse.client.password_reset import PasswordResetSubmitTokenResource
from synapse.server import HomeServer
from synapse.storage._base import db_to_json
from synapse.types import JsonDict, UserID
from synapse.types import JsonDict, UserID, create_requester
from synapse.util.clock import Clock
from tests import unittest
@@ -500,6 +500,123 @@ class DeactivateTestCase(unittest.HomeserverTestCase):
channel = self.make_request("GET", "account/whoami", access_token=tok)
self.assertEqual(channel.code, 401)
def test_deactivate_erase_account(self) -> None:
"""
Test that a user account can be signaled for erasure on the Matrix spec endpoint
for client access, `/account/deactivate` and that profile data is erased as part
of the process
"""
mxid = self.register_user("kermit", "test")
user_id = UserID.from_string(mxid)
tok = self.login("kermit", "test")
profile_handler = self.hs.get_profile_handler()
# Set some profile data that can be checked for after the user is erased
self.get_success(
profile_handler.set_displayname(
user_id, create_requester(user_id), "Kermit the Frog"
)
)
self.get_success(
profile_handler.set_avatar_url(
user_id, create_requester(user_id), "http://test/Kermit.jpg"
)
)
# Verify it is set
self.assertEqual(
self.get_success(profile_handler.get_displayname(user_id)),
"Kermit the Frog",
)
self.assertEqual(
self.get_success(profile_handler.get_avatar_url(user_id)),
"http://test/Kermit.jpg",
)
# Deactivate!
self.deactivate(mxid, tok, erase=True)
store = self.hs.get_datastores().main
# Check that the user has been marked as deactivated.
self.assertTrue(self.get_success(store.get_user_deactivated_status(mxid)))
# On deactivation with 'erase', the entire database row is erased. Both of these
# should raise a 404(Not Found) SynapseError
display_name_failure = self.get_failure(
profile_handler.get_displayname(user_id), SynapseError
)
assert display_name_failure.value.code == HTTPStatus.NOT_FOUND
avatar_url_failure = self.get_failure(
profile_handler.get_avatar_url(user_id), SynapseError
)
assert avatar_url_failure.value.code == HTTPStatus.NOT_FOUND
# Check that this access token has been invalidated.
channel = self.make_request("GET", "account/whoami", access_token=tok)
self.assertEqual(channel.code, 401)
@override_config({"enable_set_displayname": False, "enable_set_avatar_url": False})
def test_deactivate_erase_account_with_disabled_profile_changes(self) -> None:
"""
Test that deactivating the user with the 'erase' option will remove existing
profile data, even with the Synapse configuration to forbid profile changes
"""
mxid = self.register_user("kermit", "test")
user_id = UserID.from_string(mxid)
tok = self.login("kermit", "test")
profile_handler = self.hs.get_profile_handler()
# Can not use the profile handler to set a display name when it is disabled. Use
# the database directly
store = self.hs.get_datastores().main
self.get_success(store.set_profile_displayname(user_id, "Kermit the Frog"))
self.get_success(
store.set_profile_avatar_url(user_id, "http://test/Kermit.jpg")
)
# Verify it is set
self.assertEqual(
(self.get_success(store.get_profile_displayname(user_id))),
"Kermit the Frog",
)
self.assertEqual(
self.get_success(profile_handler.get_displayname(user_id)),
"Kermit the Frog",
)
self.assertEqual(
(self.get_success(store.get_profile_avatar_url(user_id))),
"http://test/Kermit.jpg",
)
self.assertEqual(
self.get_success(profile_handler.get_avatar_url(user_id)),
"http://test/Kermit.jpg",
)
# Deactivate!
self.deactivate(mxid, tok, erase=True)
# Check that the user has been marked as deactivated.
self.assertTrue(self.get_success(store.get_user_deactivated_status(mxid)))
# On deactivation with 'erase', the entire database row is erased. Both of these
# should raise a 404(Not Found) SynapseError
display_name_failure = self.get_failure(
profile_handler.get_displayname(user_id), SynapseError
)
assert display_name_failure.value.code == HTTPStatus.NOT_FOUND
avatar_url_failure = self.get_failure(
profile_handler.get_avatar_url(user_id), SynapseError
)
assert avatar_url_failure.value.code == HTTPStatus.NOT_FOUND
# Check that this access token has been invalidated.
channel = self.make_request("GET", "account/whoami", access_token=tok)
self.assertEqual(channel.code, 401)
def test_pending_invites(self) -> None:
"""Tests that deactivating a user rejects every pending invite for them."""
store = self.hs.get_datastores().main
@@ -698,14 +815,23 @@ class DeactivateTestCase(unittest.HomeserverTestCase):
)
self.assertEqual(len(res2), 4)
def deactivate(self, user_id: str, tok: str) -> None:
def deactivate(self, user_id: str, tok: str, erase: bool = False) -> None:
"""
Helper to deactivate a user using the /account/deactivate endpoint, optionally
with erasure
Args:
user_id: the string formatted mxid(not a UserID)
tok: the user's access token
erase: bool of if this should be a full erasure request
"""
request_data = {
"auth": {
"type": "m.login.password",
"user": user_id,
"password": "test",
},
"erase": False,
"erase": erase,
}
channel = self.make_request(
"POST", "account/deactivate", request_data, access_token=tok
+64
View File
@@ -29,6 +29,7 @@ import synapse.rest.admin
from synapse.api.constants import (
EventContentFields,
EventTypes,
JoinRules,
ReceiptTypes,
RelationTypes,
)
@@ -394,6 +395,69 @@ class SyncKnockTestCase(KnockingStrippedStateEventHelperMixin):
)
class SyncCreateEventInPrejoinStateTestCase(unittest.HomeserverTestCase):
"""MSC4311: Tests that m.room.create is present in invite_state and knock_state"""
servlets = [
synapse.rest.admin.register_servlets,
login.register_servlets,
room.register_servlets,
sync.register_servlets,
knock.register_servlets,
]
def default_config(self) -> JsonDict:
config = super().default_config()
return config
def test_create_event_present_in_invite_state(self) -> None:
"""m.room.create must appear in invite_state."""
inviter = self.register_user("inviter", "pass")
inviter_tok = self.login("inviter", "pass")
invitee = self.register_user("invitee", "pass")
invitee_tok = self.login("invitee", "pass")
room_id = self.helper.create_room_as(inviter, tok=inviter_tok)
self.helper.invite(room=room_id, src=inviter, targ=invitee, tok=inviter_tok)
channel = self.make_request("GET", "/sync", access_token=invitee_tok)
self.assertEqual(channel.code, 200, channel.json_body)
invite_state_events = channel.json_body["rooms"]["invite"][room_id][
"invite_state"
]["events"]
event_types = {stripped_event["type"] for stripped_event in invite_state_events}
self.assertIn(EventTypes.Create, event_types)
def test_create_event_present_in_knock_state(self) -> None:
"""m.room.create must appear in knock_state."""
host = self.register_user("host", "pass")
host_tok = self.login("host", "pass")
knocker = self.register_user("knocker", "pass")
knocker_tok = self.login("knocker", "pass")
room_id = self.helper.create_room_as(
host, is_public=False, room_version="7", tok=host_tok
)
self.helper.send_state(
room_id,
EventTypes.JoinRules,
{"join_rule": JoinRules.KNOCK},
tok=host_tok,
)
self.helper.knock(room_id, knocker, tok=knocker_tok)
channel = self.make_request("GET", "/sync", access_token=knocker_tok)
self.assertEqual(channel.code, 200, channel.json_body)
knock_state_events = channel.json_body["rooms"]["knock"][room_id][
"knock_state"
]["events"]
event_types = {stripped_event["type"] for stripped_event in knock_state_events}
self.assertIn(EventTypes.Create, event_types)
class UnreadMessagesTestCase(unittest.HomeserverTestCase):
servlets = [
synapse.rest.admin.register_servlets,
+51 -1
View File
@@ -23,6 +23,7 @@ from unittest.mock import patch
from twisted.internet.testing import MemoryReactor
from synapse.api.constants import EventContentFields, EventTypes, Membership, RoomTypes
from synapse.api.room_versions import RoomVersions
from synapse.config.server import DEFAULT_ROOM_VERSION
from synapse.rest import admin
from synapse.rest.client import login, room, room_upgrade_rest_servlet
@@ -58,6 +59,7 @@ class UpgradeRoomTest(unittest.HomeserverTestCase):
token: str | None = None,
room_id: str | None = None,
expire_cache: bool = True,
new_version: str = DEFAULT_ROOM_VERSION,
) -> FakeChannel:
if expire_cache:
# We don't want a cached response.
@@ -70,7 +72,7 @@ class UpgradeRoomTest(unittest.HomeserverTestCase):
"POST",
f"/_matrix/client/r0/rooms/{room_id}/upgrade",
# This will upgrade a room to the same version, but that's fine.
content={"new_version": DEFAULT_ROOM_VERSION},
content={"new_version": new_version},
access_token=token or self.creator_token,
)
@@ -431,3 +433,51 @@ class UpgradeRoomTest(unittest.HomeserverTestCase):
tok=self.creator_token,
)
self.assertEqual(content[EventContentFields.MEMBERSHIP], Membership.BAN)
def test_creator_removed_from_powerlevels_v12(self) -> None:
"""
Test that the creator is removed from the power levels users map when
upgrading to a room version with MSC4289.
"""
# Create a room on room version 11, which doesn't have MSC4289.
room_id = self.helper.create_room_as(
self.creator, tok=self.creator_token, room_version="11"
)
self.helper.join(room_id, self.other, tok=self.other_token)
# Retrieve the room's current power levels.
old_power_level_event = self.get_success(
self.hs.get_storage_controllers().state.get_current_state_event(
room_id, "m.room.power_levels", ""
)
)
assert old_power_level_event is not None
# The creator should be in the users map with power level 100.
self.assertEqual(old_power_level_event.content["users"][self.creator], 100)
# Upgrade the room to version 12, which has MSC4289.
channel = self._upgrade_room(
room_id=room_id, new_version=RoomVersions.V12.identifier
)
self.assertEqual(200, channel.code, channel.result)
# Extract the new room ID.
new_room_id = channel.json_body["replacement_room"]
# Fetch the new room's power level event.
new_power_levels = self.helper.get_state(
new_room_id,
"m.room.power_levels",
tok=self.creator_token,
)
# The creator should no longer be in the users map.
self.assertNotIn(self.creator, new_power_levels["users"])
# The creator should still be in the old power levels event with power
# level 100.
#
# This is a regression test where previously Synapse would accidentally
# mutate the old power levels event.
self.assertEqual(old_power_level_event.content["users"][self.creator], 100)