diff --git a/.ci/complement_package.gotpl b/.ci/complement_package.gotpl index 2dd5e5e0e6..e71ed9f616 100644 --- a/.ci/complement_package.gotpl +++ b/.ci/complement_package.gotpl @@ -29,7 +29,7 @@ which is under the Unlicense licence. {{- with .TestCases -}} {{- /* Passing tests are first */ -}} {{- range . -}} - {{- if eq .Result "PASS" -}} + {{- if and (eq .Result "PASS") (not $settings.HideSuccessfulTests) -}} ::group::{{ "\033" }}[0;32m✅{{ " " }}{{- .Name -}} {{- "\033" -}}[0;37m ({{if $settings.ShowTestStatus}}{{.Result}}; {{end}}{{ .Duration -}} {{- with .Coverage -}} @@ -49,7 +49,8 @@ which is under the Unlicense licence. {{- /* Then skipped tests are second */ -}} {{- range . -}} - {{- if eq .Result "SKIP" -}} + {{- /* Skipped tests are also purposefully hidden if "HideSuccessfulTests" is enabled */ -}} + {{- if and (eq .Result "SKIP") (not $settings.HideSuccessfulTests) -}} ::group::{{ "\033" }}[0;33m🚧{{ " " }}{{- .Name -}} {{- "\033" -}}[0;37m ({{if $settings.ShowTestStatus}}{{.Result}}; {{end}}{{ .Duration -}} {{- with .Coverage -}} diff --git a/.ci/scripts/gotestfmt b/.ci/scripts/gotestfmt deleted file mode 100755 index 83e0ec6361..0000000000 --- a/.ci/scripts/gotestfmt +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash -# -# wraps `gotestfmt`, hiding output from successful packages unless -# all tests passed. - -set -o pipefail -set -e - -# tee the test results to a log, whilst also piping them into gotestfmt, -# telling it to hide successful results, so that we can clearly see -# unsuccessful results. -tee complement.log | gotestfmt -hide successful-packages - -# gotestfmt will exit non-zero if there were any failures, so if we got to this -# point, we must have had a successful result. -echo "All tests successful; showing all test results" - -# Pipe the test results back through gotestfmt, showing all results. -# The log file consists of JSON lines giving the test results, interspersed -# with regular stdout lines (including reports of downloaded packages). -grep '^{"Time":' complement.log | gotestfmt diff --git a/.ci/scripts/setup_complement_prerequisites.sh b/.ci/scripts/setup_complement_prerequisites.sh index 47a3ff8e69..1fdaf5e631 100755 --- a/.ci/scripts/setup_complement_prerequisites.sh +++ b/.ci/scripts/setup_complement_prerequisites.sh @@ -10,7 +10,6 @@ alias block='{ set +x; } 2>/dev/null; func() { echo "::group::$*"; set -x; }; fu alias endblock='{ set +x; } 2>/dev/null; func() { echo "::endgroup::"; set -x; }; func' block Install Complement Dependencies - sudo apt-get -qq update && sudo apt-get install -qqy libolm3 libolm-dev go install -v github.com/gotesttools/gotestfmt/v2/cmd/gotestfmt@latest endblock diff --git a/.github/workflows/complement_tests.yml b/.github/workflows/complement_tests.yml index abbd7c8fca..064b68b9fc 100644 --- a/.github/workflows/complement_tests.yml +++ b/.github/workflows/complement_tests.yml @@ -107,6 +107,12 @@ jobs: - name: Formatted sanity check Complement test logs # Always run this step if we attempted to run the Complement tests. if: always() && steps.run_sanity_check_complement_image_test.outcome != 'skipped' + # We do not hide successful tests in `gotestfmt` here as the list of sanity + # check tests is so short. Feel free to change this when we get more tests. + # + # Note that the `-hide` argument is interpreted by `gotestfmt`. From it, + # it derives several values under `$settings` and passes them to our + # custom `.ci/complement_package.gotpl` template to render the output. run: cat /tmp/gotest-sanity-check-complement.log | gotestfmt -hide "successful-downloads,empty-packages" - name: Run Complement Tests @@ -129,10 +135,15 @@ jobs: TEST_ONLY_IGNORE_POETRY_LOCKFILE: ${{ inputs.use_latest_deps && 1 || '' }} TEST_ONLY_SKIP_DEP_HASH_VERIFICATION: ${{ inputs.use_twisted_trunk && 1 || '' }} - - name: Formatted Complement test logs + - name: Formatted Complement test logs (only failing are shown) # Always run this step if we attempted to run the Complement tests. if: always() && steps.run_complement_tests.outcome != 'skipped' - run: cat /tmp/gotest-complement.log | gotestfmt -hide "successful-downloads,empty-packages" + # Hide successful tests in order to reduce the verbosity of the otherwise very large output. + # + # Note that the `-hide` argument is interpreted by `gotestfmt`. From it, + # it derives several values under `$settings` and passes them to our + # custom `.ci/complement_package.gotpl` template to render the output. + run: cat /tmp/gotest-complement.log | gotestfmt -hide "successful-downloads,successful-tests,empty-packages" - name: Run in-repo Complement Tests id: run_in_repo_complement_tests @@ -154,7 +165,12 @@ jobs: TEST_ONLY_IGNORE_POETRY_LOCKFILE: ${{ inputs.use_latest_deps && 1 || '' }} TEST_ONLY_SKIP_DEP_HASH_VERIFICATION: ${{ inputs.use_twisted_trunk && 1 || '' }} - - name: Formatted in-repo Complement test logs + - name: Formatted in-repo Complement test logs (only failing are shown) # Always run this step if we attempted to run the Complement tests. if: always() && steps.run_in_repo_complement_tests.outcome != 'skipped' - run: cat /tmp/gotest-in-repo-complement.log | gotestfmt -hide "successful-downloads,empty-packages" + # Hide successful tests in order to reduce the verbosity of the otherwise very large output. + # + # Note that the `-hide` argument is interpreted by `gotestfmt`. From it, + # it derives several values under `$settings` and passes them to our + # custom `.ci/complement_package.gotpl` template to render the output. + run: cat /tmp/gotest-in-repo-complement.log | gotestfmt -hide "successful-downloads,successful-tests,empty-packages" diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index a796f9cf2a..d581b5ec56 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -28,7 +28,7 @@ jobs: steps: - name: Set up Docker Buildx id: buildx - uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3.12.0 + uses: docker/setup-buildx-action@4d04d5d9486b7bd6fa91e7baf45bbb4f8b9deedd # v4.0.0 - name: Checkout repository uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 @@ -41,13 +41,13 @@ jobs: echo "SYNAPSE_VERSION=$(grep "^version" pyproject.toml | sed -E 's/version\s*=\s*["]([^"]*)["]/\1/')" >> $GITHUB_ENV - name: Log in to DockerHub - uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0 + uses: docker/login-action@b45d80f862d83dbcd57f89517bcf500b2ab88fb2 # v4.0.0 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Log in to GHCR - uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0 + uses: docker/login-action@b45d80f862d83dbcd57f89517bcf500b2ab88fb2 # v4.0.0 with: registry: ghcr.io username: ${{ github.repository_owner }} @@ -79,7 +79,7 @@ jobs: services/backend-repositories/secret/data/oci.element.io password | OCI_PASSWORD ; - name: Login to Element OCI Registry - uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0 + uses: docker/login-action@b45d80f862d83dbcd57f89517bcf500b2ab88fb2 # v4.0.0 with: registry: oci-push.vpn.infra.element.io username: ${{ steps.import-secrets.outputs.OCI_USERNAME }} @@ -87,7 +87,7 @@ jobs: - name: Build and push by digest id: build - uses: docker/build-push-action@10e90e3645eae34f1e60eeb005ba3a3d33f178e8 # v6.19.2 + uses: docker/build-push-action@d08e5c354a6adb9ed34480a06d141179aa583294 # v7.0.0 with: push: true labels: | @@ -136,14 +136,14 @@ jobs: merge-multiple: true - name: Log in to DockerHub - uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0 + uses: docker/login-action@b45d80f862d83dbcd57f89517bcf500b2ab88fb2 # v4.0.0 if: ${{ startsWith(matrix.repository, 'docker.io') }} with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Log in to GHCR - uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0 + uses: docker/login-action@b45d80f862d83dbcd57f89517bcf500b2ab88fb2 # v4.0.0 if: ${{ startsWith(matrix.repository, 'ghcr.io') }} with: registry: ghcr.io @@ -176,20 +176,20 @@ jobs: services/backend-repositories/secret/data/oci.element.io password | OCI_PASSWORD ; - name: Login to Element OCI Registry - uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0 + uses: docker/login-action@b45d80f862d83dbcd57f89517bcf500b2ab88fb2 # v4.0.0 with: registry: oci-push.vpn.infra.element.io username: ${{ steps.import-secrets.outputs.OCI_USERNAME }} password: ${{ steps.import-secrets.outputs.OCI_PASSWORD }} - name: Set up Docker Buildx - uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3.12.0 + uses: docker/setup-buildx-action@4d04d5d9486b7bd6fa91e7baf45bbb4f8b9deedd # v4.0.0 - name: Install Cosign uses: sigstore/cosign-installer@faadad0cce49287aee09b3a48701e75088a2c6ad # v4.0.0 - name: Calculate docker image tag - uses: docker/metadata-action@c299e40c65443455700f0fdfc63efafe5b349051 # v5.10.0 + uses: docker/metadata-action@030e881283bb7a6894de51c315a6bfe6a94e05cf # v6.0.0 with: images: ${{ matrix.repository }} flavor: | diff --git a/.github/workflows/push_complement_image.yml b/.github/workflows/push_complement_image.yml index 12599457dc..3931dbebb3 100644 --- a/.github/workflows/push_complement_image.yml +++ b/.github/workflows/push_complement_image.yml @@ -52,14 +52,14 @@ jobs: with: poetry-version: "2.2.1" - name: Login to registry - uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0 + uses: docker/login-action@b45d80f862d83dbcd57f89517bcf500b2ab88fb2 # v4.0.0 with: registry: ghcr.io username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - name: Work out labels for complement image id: meta - uses: docker/metadata-action@c299e40c65443455700f0fdfc63efafe5b349051 # v5.10.0 + uses: docker/metadata-action@030e881283bb7a6894de51c315a6bfe6a94e05cf # v6.0.0 with: images: ghcr.io/${{ github.repository }}/complement-synapse tags: | diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml index 125d659f4e..95d3d98d75 100644 --- a/.github/workflows/release-artifacts.yml +++ b/.github/workflows/release-artifacts.yml @@ -61,7 +61,7 @@ jobs: - name: Set up Docker Buildx id: buildx - uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3.12.0 + uses: docker/setup-buildx-action@4d04d5d9486b7bd6fa91e7baf45bbb4f8b9deedd # v4.0.0 - name: Set up docker layer caching uses: actions/cache@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 diff --git a/CHANGES.md b/CHANGES.md index e7c2416238..6dbee41e7c 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,10 @@ +# Synapse 1.150.0 (2026-03-24) + +No significant changes since 1.150.0rc1. + + + + # Synapse 1.150.0rc1 (2026-03-17) ## Features diff --git a/Cargo.lock b/Cargo.lock index d6945bfbb7..a004747af5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1116,9 +1116,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.103.4" +version = "0.103.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a17884ae0c1b773f1ccd2bd4a8c72f16da897310a98b0e84bf349ad5ead92fc" +checksum = "df33b2b81ac578cabaf06b89b0631153a3f416b0a886e8a7a1707fb51abbd1ef" dependencies = [ "ring", "rustls-pki-types", @@ -1410,9 +1410,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.49.0" +version = "1.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72a2903cd7736441aac9df9d7688bd0ce48edccaadf181c3b90be801e81d3d86" +checksum = "27ad5e34374e03cfffefc301becb44e9dc3c17584f414349ebe29ed26661822d" dependencies = [ "bytes", "libc", diff --git a/changelog.d/19509.bugfix b/changelog.d/19509.bugfix new file mode 100644 index 0000000000..4c98f1f4ee --- /dev/null +++ b/changelog.d/19509.bugfix @@ -0,0 +1 @@ +Allow Synapse to start up even when discovery fails for an OpenID Connect provider. \ No newline at end of file diff --git a/changelog.d/19538.misc b/changelog.d/19538.misc new file mode 100644 index 0000000000..56d3d2744d --- /dev/null +++ b/changelog.d/19538.misc @@ -0,0 +1 @@ +Fix small comment typo in config output from the `demo/start.sh` script. diff --git a/changelog.d/19581.misc b/changelog.d/19581.misc new file mode 100644 index 0000000000..02856f3497 --- /dev/null +++ b/changelog.d/19581.misc @@ -0,0 +1 @@ +Remove `redacted_because` from internal unsigned. diff --git a/changelog.d/19589.misc b/changelog.d/19589.misc new file mode 100644 index 0000000000..f3c15326c6 --- /dev/null +++ b/changelog.d/19589.misc @@ -0,0 +1 @@ +Port `RoomVersion` to Rust. diff --git a/changelog.d/19590.misc b/changelog.d/19590.misc new file mode 100644 index 0000000000..b5357334ce --- /dev/null +++ b/changelog.d/19590.misc @@ -0,0 +1 @@ +Only show failing Complement tests in the formatted output in CI. \ No newline at end of file diff --git a/changelog.d/19602.misc.1 b/changelog.d/19602.misc.1 new file mode 100644 index 0000000000..222f26980d --- /dev/null +++ b/changelog.d/19602.misc.1 @@ -0,0 +1 @@ +Update `HomeserverTestCase.pump()` docstring to demystify behavior (Twisted reactor/clock). diff --git a/changelog.d/19602.misc.2 b/changelog.d/19602.misc.2 new file mode 100644 index 0000000000..4cadad653c --- /dev/null +++ b/changelog.d/19602.misc.2 @@ -0,0 +1 @@ +Deprecate `HomeserverTestCase.pump()` in favor of more direct `HomeserverTestCase.reactor.advance(...)` usage. diff --git a/complement/go.mod b/complement/go.mod index 2a9adb9b95..716aabdf32 100644 --- a/complement/go.mod +++ b/complement/go.mod @@ -10,7 +10,7 @@ require ( ) require ( - github.com/docker/docker v28.3.3+incompatible // indirect + github.com/docker/docker v28.3.3+incompatible github.com/docker/go-connections v0.5.0 // indirect github.com/hashicorp/go-set/v3 v3.0.0 // indirect github.com/moby/sys/atomicwriter v0.1.0 // indirect diff --git a/complement/tests/internal/dockerutil/files.go b/complement/tests/internal/dockerutil/files.go new file mode 100644 index 0000000000..62d2c557af --- /dev/null +++ b/complement/tests/internal/dockerutil/files.go @@ -0,0 +1,54 @@ +package dockerutil + +import ( + "archive/tar" + "bytes" + "fmt" + "testing" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/client" +) + +// Write `data` as a file into a container at the given `path`. +// +// Internally, produces an uncompressed single-file tape archive (tar) that is sent to the docker +// daemon to be unpacked into the container filesystem. +func WriteFileIntoContainer(t *testing.T, docker *client.Client, containerID string, path string, data []byte) error { + // Create a fake/virtual tar file in memory that we can copy to the container + // via https://stackoverflow.com/a/52131297/796832 + var buf bytes.Buffer + tw := tar.NewWriter(&buf) + err := tw.WriteHeader(&tar.Header{ + Name: path, + Mode: 0777, + Size: int64(len(data)), + }) + if err != nil { + return fmt.Errorf("WriteIntoContainer: failed to write tarball header for %s: %v", path, err) + } + _, err = tw.Write([]byte(data)) + if err != nil { + return fmt.Errorf("WriteIntoContainer: failed to write tarball data for %s: %w", path, err) + } + + err = tw.Close() + if err != nil { + return fmt.Errorf("WriteIntoContainer: failed to close tarball writer for %s: %w", path, err) + } + + // Put our new fake file in the container volume + err = docker.CopyToContainer( + t.Context(), + containerID, + "/", + &buf, + container.CopyToContainerOptions{ + AllowOverwriteDirWithFile: false, + }, + ) + if err != nil { + return fmt.Errorf("WriteIntoContainer: failed to copy: %s", err) + } + return nil +} diff --git a/complement/tests/oidc_test.go b/complement/tests/oidc_test.go new file mode 100644 index 0000000000..bddb382058 --- /dev/null +++ b/complement/tests/oidc_test.go @@ -0,0 +1,117 @@ +// This file is licensed under the Affero General Public License (AGPL) version 3. +// +// Copyright (C) 2026 Element Creations Ltd +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// See the GNU Affero General Public License for more details: +// . + +package synapse_tests + +import ( + "net/http" + "net/url" + "strings" + "testing" + + dockerClient "github.com/docker/docker/client" + "github.com/element-hq/synapse/tests/internal/dockerutil" + "github.com/matrix-org/complement" + "github.com/matrix-org/complement/client" + "github.com/matrix-org/complement/match" + "github.com/matrix-org/complement/must" +) + +const OIDC_HOMESERVER_CONFIG string = ` +oidc_providers: + - idp_id: "test_provider" + idp_name: "Test OIDC Provider" + issuer: "https://example.invalid" + client_id: "test_client_id" + client_secret: "test_secret" + scopes: ["openid"] + discover: true + user_mapping_provider: + module: "synapse.handlers.oidc.JinjaOidcMappingProvider" + config: + display_name_template: "{{ user.given_name }}" + email_template: "{{ user.email }}" +` + +// Test that Synapse still starts up when configured with an OIDC provider that is unavailable. +// +// This is a regression test: Synapse previously would fail to start up +// at all if the OIDC provider was down on startup. +// https://github.com/element-hq/synapse/issues/8088 +// +// Now instead of failing to start, Synapse will produce a 503 response on the +// `/_matrix/client/v3/login/sso/redirect/oidc-test_provider` endpoint. +func TestOIDCProviderUnavailable(t *testing.T) { + // Deploy a single homeserver + deployment := complement.Deploy(t, 1) + defer deployment.Destroy(t) + + // Get Docker client to manipulate container + dc, err := dockerClient.NewClientWithOpts( + dockerClient.FromEnv, + dockerClient.WithAPIVersionNegotiation(), + ) + must.NotError(t, "failed creating docker client", err) + + // Configure the OIDC Provider by writing a config fragment + err = dockerutil.WriteFileIntoContainer( + t, + dc, + deployment.ContainerID(t, "hs1"), + "/conf/homeserver.d/oidc_provider.yaml", + []byte(OIDC_HOMESERVER_CONFIG), + ) + if err != nil { + t.Fatalf("Failed to write updated config to container: %v", err) + } + + // Restart the homeserver to apply the new config + deployment.StopServer(t, "hs1") + // Careful: port number changes here + deployment.StartServer(t, "hs1") + // Must get after the restart so the port number is correct + unauthedClient := deployment.UnauthenticatedClient(t, "hs1") + + // Test that trying to log in with an OIDC provider that is down + // causes an HTML error page to be shown to the user. + // (This replaces the redirect that would happen if the provider was + // up.) + // + // More importantly, implicitly tests that Synapse can start up + // and answer requests even though an OIDC provider is down. + t.Run("/login/sso/redirect shows HTML error", func(t *testing.T) { + // Build a request to the /redirect/ endpoint, that would normally be navigated to + // by the user's browser in order to start the login flow. + queryParams := url.Values{} + queryParams.Add("redirectUrl", "http://redirect.invalid/redirect") + res := unauthedClient.Do(t, "GET", []string{"_matrix", "client", "v3", "login", "sso", "redirect", "oidc-test_provider"}, + client.WithQueries(queryParams), + ) + + body := must.MatchResponse(t, res, match.HTTPResponse{ + // Should get a 503 + StatusCode: http.StatusServiceUnavailable, + + Headers: map[string]string{ + // Should get an HTML page explaining the problem to the user + "Content-Type": "text/html; charset=utf-8", + }, + }) + + bodyText := string(body) + + // The HTML page contains phrases from the template we expect + if !strings.Contains(bodyText, "login provider is unavailable right now") { + t.Fatalf("Keyword not found in HTML error page, got %s", bodyText) + } + }) +} diff --git a/debian/changelog b/debian/changelog index 55af4c22b5..c8146764de 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.150.0) stable; urgency=medium + + * New synapse release 1.150.0. + + -- Synapse Packaging team Tue, 24 Mar 2026 14:17:04 +0000 + matrix-synapse-py3 (1.150.0~rc1) stable; urgency=medium [ Quentin Gliech ] diff --git a/demo/start.sh b/demo/start.sh index 0b61ac9991..471e881f46 100755 --- a/demo/start.sh +++ b/demo/start.sh @@ -49,7 +49,7 @@ for port in 8080 8081 8082; do # Please don't accidentally bork me with your fancy settings. listeners=$(cat <<-PORTLISTENERS # Configure server to listen on both $https_port and $port - # This overides some of the default settings above + # This overrides some of the default settings above listeners: - port: $https_port type: http diff --git a/docker/complement/conf/start_for_complement.sh b/docker/complement/conf/start_for_complement.sh index da1b26a283..e0d30abed3 100755 --- a/docker/complement/conf/start_for_complement.sh +++ b/docker/complement/conf/start_for_complement.sh @@ -128,6 +128,10 @@ openssl x509 -req -in /conf/server.tls.csr \ export SYNAPSE_TLS_CERT=/conf/server.tls.crt export SYNAPSE_TLS_KEY=/conf/server.tls.key +# Add a directory for tests to add config overrides if they want +mkdir --parents /conf/homeserver.d +export _SYNAPSE_COMPLEMENT_EXTRA_CONFIG_DIR=/conf/homeserver.d + # Run the script that writes the necessary config files and starts supervisord, which in turn # starts everything else exec /configure_workers_and_start.py "$@" diff --git a/docker/conf-workers/synapse.supervisord.conf.j2 b/docker/conf-workers/synapse.supervisord.conf.j2 index 4fb11b259e..9388b1ff0e 100644 --- a/docker/conf-workers/synapse.supervisord.conf.j2 +++ b/docker/conf-workers/synapse.supervisord.conf.j2 @@ -5,10 +5,16 @@ command=/usr/local/bin/python -m synapse.app.complement_fork_starter {{ main_config_path }} synapse.app.homeserver --config-path="{{ main_config_path }}" + {%- if extra_config_dir %} + --config-path="{{ extra_config_dir }}" + {%- endif %} --config-path=/conf/workers/shared.yaml {%- for worker in workers %} -- {{ worker.app }} --config-path="{{ main_config_path }}" + {%- if extra_config_dir %} + --config-path="{{ extra_config_dir }}" + {%- endif %} --config-path=/conf/workers/shared.yaml --config-path=/conf/workers/{{ worker.name }}.yaml {%- endfor %} @@ -24,6 +30,9 @@ exitcodes=0 environment=http_proxy="%(ENV_SYNAPSE_HTTP_PROXY)s",https_proxy="%(ENV_SYNAPSE_HTTPS_PROXY)s",no_proxy="%(ENV_SYNAPSE_NO_PROXY)s" command=/usr/local/bin/prefix-log /usr/local/bin/python -m synapse.app.homeserver --config-path="{{ main_config_path }}" + {%- if extra_config_dir %} + --config-path="{{ extra_config_dir }}" + {%- endif %} --config-path=/conf/workers/shared.yaml priority=10 # Log startup failures to supervisord's stdout/err diff --git a/docker/configure_workers_and_start.py b/docker/configure_workers_and_start.py index ed15e8efef..1b8d4f9989 100755 --- a/docker/configure_workers_and_start.py +++ b/docker/configure_workers_and_start.py @@ -856,6 +856,7 @@ def parse_worker_types( def generate_worker_files( environ: Mapping[str, str], config_path: str, + extra_config_dir: str | None, data_dir: str, requested_workers: list[Worker], ) -> None: @@ -865,6 +866,7 @@ def generate_worker_files( Args: environ: os.environ instance. config_path: The location of the generated Synapse main worker config file. + extra_config_dir: A directory in which extra Synapse configuration files will be loaded. data_dir: The location of the synapse data directory. Where log and user-facing config files live. requested_workers: A list of requested workers @@ -1258,6 +1260,7 @@ def generate_worker_files( "/etc/supervisor/conf.d/synapse.conf", workers=worker_descriptors, main_config_path=config_path, + extra_config_dir=extra_config_dir, use_forking_launcher=environ.get("SYNAPSE_USE_EXPERIMENTAL_FORKING_LAUNCHER"), ) @@ -1318,6 +1321,9 @@ def main(args: list[str], environ: MutableMapping[str, str]) -> None: config_dir = environ.get("SYNAPSE_CONFIG_DIR", "/data") config_path = environ.get("SYNAPSE_CONFIG_PATH", config_dir + "/homeserver.yaml") + # All files in this directory will be loaded as `homeserver.yaml` snippets + # Currently only intended for use by Complement + extra_config_dir = environ.get("_SYNAPSE_COMPLEMENT_EXTRA_CONFIG_DIR", None) data_dir = environ.get("SYNAPSE_DATA_DIR", "/data") # override SYNAPSE_NO_TLS, we don't support TLS in worker mode, @@ -1352,6 +1358,7 @@ def main(args: list[str], environ: MutableMapping[str, str]) -> None: generate_worker_files( environ=environ, config_path=config_path, + extra_config_dir=extra_config_dir, data_dir=data_dir, requested_workers=requested_workers, ) diff --git a/pyproject.toml b/pyproject.toml index adb9993aae..c2f0182687 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "matrix-synapse" -version = "1.150.0rc1" +version = "1.150.0" description = "Homeserver for the Matrix decentralised comms protocol" readme = "README.rst" authors = [ diff --git a/rust/src/events/internal_metadata.rs b/rust/src/events/internal_metadata.rs index 595f9cf7eb..a53c1e771b 100644 --- a/rust/src/events/internal_metadata.rs +++ b/rust/src/events/internal_metadata.rs @@ -261,6 +261,11 @@ pub struct EventInternalMetadata { #[pyo3(get, set)] instance_name: Option, + /// The event ID of the redaction event, if this event has been redacted. + /// This is set dynamically at load time and is not persisted to the database. + #[pyo3(get, set)] + redacted_by: Option, + /// whether this event is an outlier (ie, whether we have the state at that /// point in the DAG) #[pyo3(get, set)] @@ -289,6 +294,7 @@ impl EventInternalMetadata { data, stream_ordering: None, instance_name: None, + redacted_by: None, outlier: false, }) } diff --git a/rust/src/lib.rs b/rust/src/lib.rs index 83b8de7b64..a6e01fce64 100644 --- a/rust/src/lib.rs +++ b/rust/src/lib.rs @@ -15,6 +15,7 @@ pub mod matrix_const; pub mod msc4388_rendezvous; pub mod push; pub mod rendezvous; +pub mod room_versions; pub mod segmenter; lazy_static! { @@ -58,6 +59,7 @@ fn synapse_rust(py: Python<'_>, m: &Bound<'_, PyModule>) -> PyResult<()> { rendezvous::register_module(py, m)?; msc4388_rendezvous::register_module(py, m)?; segmenter::register_module(py, m)?; + room_versions::register_module(py, m)?; Ok(()) } diff --git a/rust/src/room_versions.rs b/rust/src/room_versions.rs new file mode 100644 index 0000000000..c0b304e18c --- /dev/null +++ b/rust/src/room_versions.rs @@ -0,0 +1,825 @@ +/* + * This file is licensed under the Affero General Public License (AGPL) version 3. + * + * Copyright (C) 2026 Element Creations Ltd. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * See the GNU Affero General Public License for more details: + * . + */ + +//! Rust implementation of room version definitions. + +use std::sync::{Arc, LazyLock, RwLock}; + +use pyo3::{ + exceptions::{PyKeyError, PyRuntimeError}, + prelude::*, + types::{PyFrozenSet, PyIterator, PyModule, PyModuleMethods}, + Bound, IntoPyObjectExt, PyResult, Python, +}; + +/// Internal enum for tracking the version of the event format, +/// independently of the room version. +/// +/// To reduce confusion, the event format versions are named after the room +/// versions that they were used or introduced in. +/// The concept of an 'event format version' is specific to Synapse (the +/// specification does not mention this term.) +#[pyclass(frozen)] +pub struct EventFormatVersions {} + +#[pymethods] +impl EventFormatVersions { + /// $id:server event id format: used for room v1 and v2 + #[classattr] + const ROOM_V1_V2: i32 = 1; + /// MSC1659-style $hash event id format: used for room v3 + #[classattr] + const ROOM_V3: i32 = 2; + /// MSC1884-style $hash format: introduced for room v4 + #[classattr] + const ROOM_V4_PLUS: i32 = 3; + /// MSC4291 room IDs as hashes: introduced for room HydraV11 + #[classattr] + const ROOM_V11_HYDRA_PLUS: i32 = 4; +} + +/// Enum to identify the state resolution algorithms. +#[pyclass(frozen)] +pub struct StateResolutionVersions {} + +#[pymethods] +impl StateResolutionVersions { + /// Room v1 state res + #[classattr] + const V1: i32 = 1; + /// MSC1442 state res: room v2 and later + #[classattr] + const V2: i32 = 2; + /// MSC4297 state res + #[classattr] + const V2_1: i32 = 3; +} + +/// Room disposition constants. +#[pyclass(frozen)] +pub struct RoomDisposition {} + +#[pymethods] +impl RoomDisposition { + #[classattr] + const STABLE: &'static str = "stable"; + #[classattr] + const UNSTABLE: &'static str = "unstable"; +} + +/// Enum for listing possible MSC3931 room version feature flags, for push rules. +#[pyclass(frozen)] +pub struct PushRuleRoomFlag {} + +#[pymethods] +impl PushRuleRoomFlag { + /// MSC3932: Room version supports MSC1767 Extensible Events. + #[classattr] + const EXTENSIBLE_EVENTS: &'static str = "org.matrix.msc3932.extensible_events"; +} + +/// An object which describes the unique attributes of a room version. +#[pyclass(frozen, eq, hash, get_all)] +#[derive(Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord)] +pub struct RoomVersion { + /// The identifier for this version. + pub identifier: &'static str, + /// One of the RoomDisposition constants. + pub disposition: &'static str, + /// One of the EventFormatVersions constants. + pub event_format: i32, + /// One of the StateResolutionVersions constants. + pub state_res: i32, + pub enforce_key_validity: bool, + /// Before MSC2432, m.room.aliases had special auth rules and redaction rules. + pub special_case_aliases_auth: bool, + /// Strictly enforce canonicaljson, do not allow: + /// * Integers outside the range of [-2^53 + 1, 2^53 - 1] + /// * Floats + /// * NaN, Infinity, -Infinity + pub strict_canonicaljson: bool, + /// MSC2209: Check 'notifications' key while verifying + /// m.room.power_levels auth rules. + pub limit_notifications_power_levels: bool, + /// MSC3820: No longer include the creator in m.room.create events (room version 11). + pub implicit_room_creator: bool, + /// MSC3820: Apply updated redaction rules algorithm from room version 11. + pub updated_redaction_rules: bool, + /// Support the 'restricted' join rule. + pub restricted_join_rule: bool, + /// Support for the proper redaction rules for the restricted join rule. + /// This requires restricted_join_rule to be enabled. + pub restricted_join_rule_fix: bool, + /// Support the 'knock' join rule. + pub knock_join_rule: bool, + /// MSC3389: Protect relation information from redaction. + pub msc3389_relation_redactions: bool, + /// Support the 'knock_restricted' join rule. + pub knock_restricted_join_rule: bool, + /// Enforce integer power levels. + pub enforce_int_power_levels: bool, + /// MSC3931: Adds a push rule condition for "room version feature flags", making + /// some push rules room version dependent. Note that adding a flag to this list + /// is not enough to mark it "supported": the push rule evaluator also needs to + /// support the flag. Unknown flags are ignored by the evaluator, making conditions + /// fail if used. Values from PushRuleRoomFlag. + pub msc3931_push_features: &'static [&'static str], + /// MSC3757: Restricting who can overwrite a state event. + pub msc3757_enabled: bool, + /// MSC4289: Creator power enabled. + pub msc4289_creator_power_enabled: bool, + /// MSC4291: Room IDs as hashes of the create event. + pub msc4291_room_ids_as_hashes: bool, + /// Set of room versions where Synapse strictly enforces event key size limits + /// in bytes, rather than in codepoints. + /// + /// In these room versions, we are stricter with event size validation. + pub strict_event_byte_limits_room_versions: bool, +} + +const ROOM_VERSION_V1: RoomVersion = RoomVersion { + identifier: "1", + disposition: RoomDisposition::STABLE, + event_format: EventFormatVersions::ROOM_V1_V2, + state_res: StateResolutionVersions::V1, + enforce_key_validity: false, + special_case_aliases_auth: true, + strict_canonicaljson: false, + limit_notifications_power_levels: false, + implicit_room_creator: false, + updated_redaction_rules: false, + restricted_join_rule: false, + restricted_join_rule_fix: false, + knock_join_rule: false, + msc3389_relation_redactions: false, + knock_restricted_join_rule: false, + enforce_int_power_levels: false, + msc3931_push_features: &[], + msc3757_enabled: false, + msc4289_creator_power_enabled: false, + msc4291_room_ids_as_hashes: false, + strict_event_byte_limits_room_versions: false, +}; + +const ROOM_VERSION_V2: RoomVersion = RoomVersion { + identifier: "2", + disposition: RoomDisposition::STABLE, + event_format: EventFormatVersions::ROOM_V1_V2, + state_res: StateResolutionVersions::V2, + enforce_key_validity: false, + special_case_aliases_auth: true, + strict_canonicaljson: false, + limit_notifications_power_levels: false, + implicit_room_creator: false, + updated_redaction_rules: false, + restricted_join_rule: false, + restricted_join_rule_fix: false, + knock_join_rule: false, + msc3389_relation_redactions: false, + knock_restricted_join_rule: false, + enforce_int_power_levels: false, + msc3931_push_features: &[], + msc3757_enabled: false, + msc4289_creator_power_enabled: false, + msc4291_room_ids_as_hashes: false, + strict_event_byte_limits_room_versions: false, +}; + +const ROOM_VERSION_V3: RoomVersion = RoomVersion { + identifier: "3", + disposition: RoomDisposition::STABLE, + event_format: EventFormatVersions::ROOM_V3, + state_res: StateResolutionVersions::V2, + enforce_key_validity: false, + special_case_aliases_auth: true, + strict_canonicaljson: false, + limit_notifications_power_levels: false, + implicit_room_creator: false, + updated_redaction_rules: false, + restricted_join_rule: false, + restricted_join_rule_fix: false, + knock_join_rule: false, + msc3389_relation_redactions: false, + knock_restricted_join_rule: false, + enforce_int_power_levels: false, + msc3931_push_features: &[], + msc3757_enabled: false, + msc4289_creator_power_enabled: false, + msc4291_room_ids_as_hashes: false, + strict_event_byte_limits_room_versions: false, +}; + +const ROOM_VERSION_V4: RoomVersion = RoomVersion { + identifier: "4", + disposition: RoomDisposition::STABLE, + event_format: EventFormatVersions::ROOM_V4_PLUS, + state_res: StateResolutionVersions::V2, + enforce_key_validity: false, + special_case_aliases_auth: true, + strict_canonicaljson: false, + limit_notifications_power_levels: false, + implicit_room_creator: false, + updated_redaction_rules: false, + restricted_join_rule: false, + restricted_join_rule_fix: false, + knock_join_rule: false, + msc3389_relation_redactions: false, + knock_restricted_join_rule: false, + enforce_int_power_levels: false, + msc3931_push_features: &[], + msc3757_enabled: false, + msc4289_creator_power_enabled: false, + msc4291_room_ids_as_hashes: false, + strict_event_byte_limits_room_versions: false, +}; + +const ROOM_VERSION_V5: RoomVersion = RoomVersion { + identifier: "5", + disposition: RoomDisposition::STABLE, + event_format: EventFormatVersions::ROOM_V4_PLUS, + state_res: StateResolutionVersions::V2, + enforce_key_validity: true, + special_case_aliases_auth: true, + strict_canonicaljson: false, + limit_notifications_power_levels: false, + implicit_room_creator: false, + updated_redaction_rules: false, + restricted_join_rule: false, + restricted_join_rule_fix: false, + knock_join_rule: false, + msc3389_relation_redactions: false, + knock_restricted_join_rule: false, + enforce_int_power_levels: false, + msc3931_push_features: &[], + msc3757_enabled: false, + msc4289_creator_power_enabled: false, + msc4291_room_ids_as_hashes: false, + strict_event_byte_limits_room_versions: false, +}; + +const ROOM_VERSION_V6: RoomVersion = RoomVersion { + identifier: "6", + disposition: RoomDisposition::STABLE, + event_format: EventFormatVersions::ROOM_V4_PLUS, + state_res: StateResolutionVersions::V2, + enforce_key_validity: true, + special_case_aliases_auth: false, + strict_canonicaljson: true, + limit_notifications_power_levels: true, + implicit_room_creator: false, + updated_redaction_rules: false, + restricted_join_rule: false, + restricted_join_rule_fix: false, + knock_join_rule: false, + msc3389_relation_redactions: false, + knock_restricted_join_rule: false, + enforce_int_power_levels: false, + msc3931_push_features: &[], + msc3757_enabled: false, + msc4289_creator_power_enabled: false, + msc4291_room_ids_as_hashes: false, + strict_event_byte_limits_room_versions: false, +}; + +const ROOM_VERSION_V7: RoomVersion = RoomVersion { + identifier: "7", + disposition: RoomDisposition::STABLE, + event_format: EventFormatVersions::ROOM_V4_PLUS, + state_res: StateResolutionVersions::V2, + enforce_key_validity: true, + special_case_aliases_auth: false, + strict_canonicaljson: true, + limit_notifications_power_levels: true, + implicit_room_creator: false, + updated_redaction_rules: false, + restricted_join_rule: false, + restricted_join_rule_fix: false, + knock_join_rule: true, + msc3389_relation_redactions: false, + knock_restricted_join_rule: false, + enforce_int_power_levels: false, + msc3931_push_features: &[], + msc3757_enabled: false, + msc4289_creator_power_enabled: false, + msc4291_room_ids_as_hashes: false, + strict_event_byte_limits_room_versions: false, +}; + +const ROOM_VERSION_V8: RoomVersion = RoomVersion { + identifier: "8", + disposition: RoomDisposition::STABLE, + event_format: EventFormatVersions::ROOM_V4_PLUS, + state_res: StateResolutionVersions::V2, + enforce_key_validity: true, + special_case_aliases_auth: false, + strict_canonicaljson: true, + limit_notifications_power_levels: true, + implicit_room_creator: false, + updated_redaction_rules: false, + restricted_join_rule: true, + restricted_join_rule_fix: false, + knock_join_rule: true, + msc3389_relation_redactions: false, + knock_restricted_join_rule: false, + enforce_int_power_levels: false, + msc3931_push_features: &[], + msc3757_enabled: false, + msc4289_creator_power_enabled: false, + msc4291_room_ids_as_hashes: false, + strict_event_byte_limits_room_versions: false, +}; + +const ROOM_VERSION_V9: RoomVersion = RoomVersion { + identifier: "9", + disposition: RoomDisposition::STABLE, + event_format: EventFormatVersions::ROOM_V4_PLUS, + state_res: StateResolutionVersions::V2, + enforce_key_validity: true, + special_case_aliases_auth: false, + strict_canonicaljson: true, + limit_notifications_power_levels: true, + implicit_room_creator: false, + updated_redaction_rules: false, + restricted_join_rule: true, + restricted_join_rule_fix: true, + knock_join_rule: true, + msc3389_relation_redactions: false, + knock_restricted_join_rule: false, + enforce_int_power_levels: false, + msc3931_push_features: &[], + msc3757_enabled: false, + msc4289_creator_power_enabled: false, + msc4291_room_ids_as_hashes: false, + strict_event_byte_limits_room_versions: false, +}; + +const ROOM_VERSION_V10: RoomVersion = RoomVersion { + identifier: "10", + disposition: RoomDisposition::STABLE, + event_format: EventFormatVersions::ROOM_V4_PLUS, + state_res: StateResolutionVersions::V2, + enforce_key_validity: true, + special_case_aliases_auth: false, + strict_canonicaljson: true, + limit_notifications_power_levels: true, + implicit_room_creator: false, + updated_redaction_rules: false, + restricted_join_rule: true, + restricted_join_rule_fix: true, + knock_join_rule: true, + msc3389_relation_redactions: false, + knock_restricted_join_rule: true, + enforce_int_power_levels: true, + msc3931_push_features: &[], + msc3757_enabled: false, + msc4289_creator_power_enabled: false, + msc4291_room_ids_as_hashes: false, + strict_event_byte_limits_room_versions: false, +}; + +/// MSC3389 (Redaction changes for events with a relation) based on room version "10". +const ROOM_VERSION_MSC3389V10: RoomVersion = RoomVersion { + identifier: "org.matrix.msc3389.10", + disposition: RoomDisposition::UNSTABLE, + event_format: EventFormatVersions::ROOM_V4_PLUS, + state_res: StateResolutionVersions::V2, + enforce_key_validity: true, + special_case_aliases_auth: false, + strict_canonicaljson: true, + limit_notifications_power_levels: true, + implicit_room_creator: false, + updated_redaction_rules: false, + restricted_join_rule: true, + restricted_join_rule_fix: true, + knock_join_rule: true, + msc3389_relation_redactions: true, // Changed from v10 + knock_restricted_join_rule: true, + enforce_int_power_levels: true, + msc3931_push_features: &[], + msc3757_enabled: false, + msc4289_creator_power_enabled: false, + msc4291_room_ids_as_hashes: false, + strict_event_byte_limits_room_versions: true, +}; + +/// MSC1767 (Extensible Events) based on room version "10". +const ROOM_VERSION_MSC1767V10: RoomVersion = RoomVersion { + identifier: "org.matrix.msc1767.10", + disposition: RoomDisposition::UNSTABLE, + event_format: EventFormatVersions::ROOM_V4_PLUS, + state_res: StateResolutionVersions::V2, + enforce_key_validity: true, + special_case_aliases_auth: false, + strict_canonicaljson: true, + limit_notifications_power_levels: true, + implicit_room_creator: false, + updated_redaction_rules: false, + restricted_join_rule: true, + restricted_join_rule_fix: true, + knock_join_rule: true, + msc3389_relation_redactions: false, + knock_restricted_join_rule: true, + enforce_int_power_levels: true, + msc3931_push_features: &[PushRuleRoomFlag::EXTENSIBLE_EVENTS], + msc3757_enabled: false, + msc4289_creator_power_enabled: false, + msc4291_room_ids_as_hashes: false, + strict_event_byte_limits_room_versions: false, +}; + +/// MSC3757 (Restricting who can overwrite a state event) based on room version "10". +const ROOM_VERSION_MSC3757V10: RoomVersion = RoomVersion { + identifier: "org.matrix.msc3757.10", + disposition: RoomDisposition::UNSTABLE, + event_format: EventFormatVersions::ROOM_V4_PLUS, + state_res: StateResolutionVersions::V2, + enforce_key_validity: true, + special_case_aliases_auth: false, + strict_canonicaljson: true, + limit_notifications_power_levels: true, + implicit_room_creator: false, + updated_redaction_rules: false, + restricted_join_rule: true, + restricted_join_rule_fix: true, + knock_join_rule: true, + msc3389_relation_redactions: false, + knock_restricted_join_rule: true, + enforce_int_power_levels: true, + msc3931_push_features: &[], + msc3757_enabled: true, + msc4289_creator_power_enabled: false, + msc4291_room_ids_as_hashes: false, + strict_event_byte_limits_room_versions: false, +}; + +const ROOM_VERSION_V11: RoomVersion = RoomVersion { + identifier: "11", + disposition: RoomDisposition::STABLE, + event_format: EventFormatVersions::ROOM_V4_PLUS, + state_res: StateResolutionVersions::V2, + enforce_key_validity: true, + special_case_aliases_auth: false, + strict_canonicaljson: true, + limit_notifications_power_levels: true, + implicit_room_creator: true, // Used by MSC3820 + updated_redaction_rules: true, // Used by MSC3820 + restricted_join_rule: true, + restricted_join_rule_fix: true, + knock_join_rule: true, + msc3389_relation_redactions: false, + knock_restricted_join_rule: true, + enforce_int_power_levels: true, + msc3931_push_features: &[], + msc3757_enabled: false, + msc4289_creator_power_enabled: false, + msc4291_room_ids_as_hashes: false, + strict_event_byte_limits_room_versions: true, // Changed from v10 +}; + +/// MSC3757 (Restricting who can overwrite a state event) based on room version "11". +const ROOM_VERSION_MSC3757V11: RoomVersion = RoomVersion { + identifier: "org.matrix.msc3757.11", + disposition: RoomDisposition::UNSTABLE, + event_format: EventFormatVersions::ROOM_V4_PLUS, + state_res: StateResolutionVersions::V2, + enforce_key_validity: true, + special_case_aliases_auth: false, + strict_canonicaljson: true, + limit_notifications_power_levels: true, + implicit_room_creator: true, // Used by MSC3820 + updated_redaction_rules: true, // Used by MSC3820 + restricted_join_rule: true, + restricted_join_rule_fix: true, + knock_join_rule: true, + msc3389_relation_redactions: false, + knock_restricted_join_rule: true, + enforce_int_power_levels: true, + msc3931_push_features: &[], + msc3757_enabled: true, + msc4289_creator_power_enabled: false, + msc4291_room_ids_as_hashes: false, + strict_event_byte_limits_room_versions: true, +}; + +const ROOM_VERSION_HYDRA_V11: RoomVersion = RoomVersion { + identifier: "org.matrix.hydra.11", + disposition: RoomDisposition::UNSTABLE, + event_format: EventFormatVersions::ROOM_V11_HYDRA_PLUS, + state_res: StateResolutionVersions::V2_1, // Changed from v11 + enforce_key_validity: true, + special_case_aliases_auth: false, + strict_canonicaljson: true, + limit_notifications_power_levels: true, + implicit_room_creator: true, // Used by MSC3820 + updated_redaction_rules: true, // Used by MSC3820 + restricted_join_rule: true, + restricted_join_rule_fix: true, + knock_join_rule: true, + msc3389_relation_redactions: false, + knock_restricted_join_rule: true, + enforce_int_power_levels: true, + msc3931_push_features: &[], + msc3757_enabled: false, + msc4289_creator_power_enabled: true, // Changed from v11 + msc4291_room_ids_as_hashes: true, // Changed from v11 + strict_event_byte_limits_room_versions: true, +}; + +const ROOM_VERSION_V12: RoomVersion = RoomVersion { + identifier: "12", + disposition: RoomDisposition::STABLE, + event_format: EventFormatVersions::ROOM_V11_HYDRA_PLUS, + state_res: StateResolutionVersions::V2_1, // Changed from v11 + enforce_key_validity: true, + special_case_aliases_auth: false, + strict_canonicaljson: true, + limit_notifications_power_levels: true, + implicit_room_creator: true, // Used by MSC3820 + updated_redaction_rules: true, // Used by MSC3820 + restricted_join_rule: true, + restricted_join_rule_fix: true, + knock_join_rule: true, + msc3389_relation_redactions: false, + knock_restricted_join_rule: true, + enforce_int_power_levels: true, + msc3931_push_features: &[], + msc3757_enabled: false, + msc4289_creator_power_enabled: true, // Changed from v11 + msc4291_room_ids_as_hashes: true, // Changed from v11 + strict_event_byte_limits_room_versions: true, +}; + +/// Helper class for managing the known room versions, and providing dict-like +/// access to them for Python. +/// +/// Note: this is not necessarily all room versions, as we may not want to +/// support all experimental room versions. +/// +/// Note: room versions can be added to this mapping at startup (allowing +/// support for experimental room versions to be behind experimental feature +/// flags). +#[pyclass(frozen, mapping)] +#[derive(Clone)] +pub struct KnownRoomVersionsMapping { + // Note we use a Vec here to ensure that the order of keys is + // deterministic. We rely on this when generating parameterized tests in + // Python, as Python will generate the tests names including the room + // version and index (and we need test names to be stable to e.g. support + // running tests in parallel). + pub versions: Arc>>, +} + +#[pymethods] +impl KnownRoomVersionsMapping { + /// Add a new room version to the mapping, indicating that this instance + /// supports it. + fn add_room_version(&self, version: RoomVersion) -> PyResult<()> { + let mut versions = self + .versions + .write() + .map_err(|_| PyRuntimeError::new_err("KnownRoomVersionsMapping lock poisoned"))?; + + if versions.iter().any(|v| v.identifier == version.identifier) { + // We already have this room version, so we don't add it again (as + // otherwise we'd end up with duplicates). + return Ok(()); + } + + versions.push(version); + Ok(()) + } + + fn __getitem__(&self, key: &str) -> PyResult { + let versions = self.versions.read().unwrap(); + versions + .iter() + .find(|v| v.identifier == key) + .copied() + .ok_or_else(|| PyKeyError::new_err(key.to_string())) + } + + fn __contains__(&self, key: &str) -> PyResult { + let versions = self.versions.read().unwrap(); + Ok(versions.iter().any(|v| v.identifier == key)) + } + + fn keys(&self) -> PyResult> { + // Note that technically we should return a view here (that also acts + // like a Set *and* has a stable ordering). We don't depend on this, so + // for simplicity we just return a list of the keys. + let versions = self.versions.read().unwrap(); + Ok(versions.iter().map(|v| v.identifier).collect()) + } + + fn values(&self) -> PyResult> { + let versions = self.versions.read().unwrap(); + Ok(versions.clone()) + } + + fn items(&self) -> PyResult> { + // Note that technically we should return a view here (that also acts + // like a Set *and* has a stable ordering). We don't depend on this, so + // for simplicity we just return a list of the items. + let versions = self.versions.read().unwrap(); + Ok(versions.iter().map(|v| (v.identifier, *v)).collect()) + } + + #[pyo3(signature = (key, default=None))] + fn get<'py>( + &self, + py: Python<'py>, + key: Bound<'py, PyAny>, + default: Option>, + ) -> PyResult>> { + // We need to accept anything as the key, but we know that only strings + // are valid keys, so if it's not a string we just return the default. + let Ok(key) = key.extract::<&str>() else { + return Ok(default); + }; + + let versions = self.versions.read().unwrap(); + if let Some(version) = versions.iter().find(|v| v.identifier == key).copied() { + return Ok(Some(version.into_bound_py_any(py)?)); + } + + Ok(default) + } + + fn __len__(&self) -> PyResult { + let versions = self + .versions + .read() + .map_err(|_| PyRuntimeError::new_err("KnownRoomVersionsMapping lock poisoned"))?; + Ok(versions.len()) + } + + fn __iter__<'py>(&self, py: Python<'py>) -> PyResult> { + let key_list = self.keys()?; + + let bound_key_list = key_list.into_bound_py_any(py)?; + PyIterator::from_object(&bound_key_list) + } +} + +impl<'py> IntoPyObject<'py> for &KnownRoomVersionsMapping { + type Target = KnownRoomVersionsMapping; + + type Output = Bound<'py, Self::Target>; + + type Error = PyErr; + + fn into_pyobject(self, py: Python<'py>) -> Result { + self.clone().into_pyobject(py) + } +} + +/// All room versions this instance knows about, used to build the +/// `KNOWN_ROOM_VERSIONS` dict. +/// +/// Note: this is not necessarily all room versions, as we may not want to +/// support all experimental room versions. +static KNOWN_ROOM_VERSIONS: LazyLock = LazyLock::new(|| { + let vec = vec![ + ROOM_VERSION_V1, + ROOM_VERSION_V2, + ROOM_VERSION_V3, + ROOM_VERSION_V4, + ROOM_VERSION_V5, + ROOM_VERSION_V6, + ROOM_VERSION_V7, + ROOM_VERSION_V8, + ROOM_VERSION_V9, + ROOM_VERSION_V10, + ROOM_VERSION_V11, + ROOM_VERSION_V12, + ROOM_VERSION_MSC3757V10, + ROOM_VERSION_MSC3757V11, + ROOM_VERSION_HYDRA_V11, + ]; + + KnownRoomVersionsMapping { + versions: Arc::new(RwLock::new(vec)), + } +}); + +/// Container class for room version constants. +/// +/// This should contain all room versions that we know about. +#[pyclass(frozen)] +pub struct RoomVersions {} + +#[pymethods] +#[allow(non_snake_case)] +impl RoomVersions { + #[classattr] + fn V1(py: Python<'_>) -> PyResult> { + ROOM_VERSION_V1.into_py_any(py) + } + #[classattr] + fn V2(py: Python<'_>) -> PyResult> { + ROOM_VERSION_V2.into_py_any(py) + } + #[classattr] + fn V3(py: Python<'_>) -> PyResult> { + ROOM_VERSION_V3.into_py_any(py) + } + #[classattr] + fn V4(py: Python<'_>) -> PyResult> { + ROOM_VERSION_V4.into_py_any(py) + } + #[classattr] + fn V5(py: Python<'_>) -> PyResult> { + ROOM_VERSION_V5.into_py_any(py) + } + #[classattr] + fn V6(py: Python<'_>) -> PyResult> { + ROOM_VERSION_V6.into_py_any(py) + } + #[classattr] + fn V7(py: Python<'_>) -> PyResult> { + ROOM_VERSION_V7.into_py_any(py) + } + #[classattr] + fn V8(py: Python<'_>) -> PyResult> { + ROOM_VERSION_V8.into_py_any(py) + } + #[classattr] + fn V9(py: Python<'_>) -> PyResult> { + ROOM_VERSION_V9.into_py_any(py) + } + #[classattr] + fn V10(py: Python<'_>) -> PyResult> { + ROOM_VERSION_V10.into_py_any(py) + } + #[classattr] + fn MSC1767v10(py: Python<'_>) -> PyResult> { + ROOM_VERSION_MSC1767V10.into_py_any(py) + } + #[classattr] + fn MSC3389v10(py: Python<'_>) -> PyResult> { + ROOM_VERSION_MSC3389V10.into_py_any(py) + } + #[classattr] + fn MSC3757v10(py: Python<'_>) -> PyResult> { + ROOM_VERSION_MSC3757V10.into_py_any(py) + } + #[classattr] + fn V11(py: Python<'_>) -> PyResult> { + ROOM_VERSION_V11.into_py_any(py) + } + #[classattr] + fn MSC3757v11(py: Python<'_>) -> PyResult> { + ROOM_VERSION_MSC3757V11.into_py_any(py) + } + #[classattr] + fn HydraV11(py: Python<'_>) -> PyResult> { + ROOM_VERSION_HYDRA_V11.into_py_any(py) + } + #[classattr] + fn V12(py: Python<'_>) -> PyResult> { + ROOM_VERSION_V12.into_py_any(py) + } +} + +/// Called when registering modules with python. +pub fn register_module(py: Python<'_>, m: &Bound<'_, PyModule>) -> PyResult<()> { + let child_module = PyModule::new(py, "room_versions")?; + child_module.add_class::()?; + child_module.add_class::()?; + child_module.add_class::()?; + child_module.add_class::()?; + child_module.add_class::()?; + child_module.add_class::()?; + child_module.add_class::()?; + + // Build KNOWN_EVENT_FORMAT_VERSIONS as a frozenset + let known_ef: [i32; 4] = [ + EventFormatVersions::ROOM_V1_V2, + EventFormatVersions::ROOM_V3, + EventFormatVersions::ROOM_V4_PLUS, + EventFormatVersions::ROOM_V11_HYDRA_PLUS, + ]; + let known_event_format_versions = PyFrozenSet::new(py, known_ef)?; + child_module.add("KNOWN_EVENT_FORMAT_VERSIONS", known_event_format_versions)?; + child_module.add("KNOWN_ROOM_VERSIONS", &*KNOWN_ROOM_VERSIONS)?; + + m.add_submodule(&child_module)?; + + // Register in sys.modules + py.import("sys")? + .getattr("modules")? + .set_item("synapse.synapse_rust.room_versions", child_module)?; + + Ok(()) +} diff --git a/synapse/_scripts/synapse_port_db.py b/synapse/_scripts/synapse_port_db.py index 79b2a0c528..eedceb170e 100755 --- a/synapse/_scripts/synapse_port_db.py +++ b/synapse/_scripts/synapse_port_db.py @@ -122,7 +122,7 @@ BOOLEAN_COLUMNS = { "presence_stream": ["currently_active"], "public_room_list_stream": ["visibility"], "pushers": ["enabled"], - "redactions": ["have_censored"], + "redactions": ["have_censored", "recheck"], "remote_media_cache": ["authenticated"], "room_memberships": ["participant"], "room_stats_state": ["is_federatable"], diff --git a/synapse/api/room_versions.py b/synapse/api/room_versions.py index 13c7758638..0035d7a58b 100644 --- a/synapse/api/room_versions.py +++ b/synapse/api/room_versions.py @@ -18,480 +18,22 @@ # # +from synapse.synapse_rust.room_versions import ( + KNOWN_EVENT_FORMAT_VERSIONS, + KNOWN_ROOM_VERSIONS, + EventFormatVersions, + PushRuleRoomFlag, + RoomVersion, + RoomVersions, + StateResolutionVersions, +) -import attr - - -class EventFormatVersions: - """This is an internal enum for tracking the version of the event format, - independently of the room version. - - To reduce confusion, the event format versions are named after the room - versions that they were used or introduced in. - The concept of an 'event format version' is specific to Synapse (the - specification does not mention this term.) - """ - - ROOM_V1_V2 = 1 # $id:server event id format: used for room v1 and v2 - ROOM_V3 = 2 # MSC1659-style $hash event id format: used for room v3 - ROOM_V4_PLUS = 3 # MSC1884-style $hash format: introduced for room v4 - ROOM_V11_HYDRA_PLUS = 4 # MSC4291 room IDs as hashes: introduced for room HydraV11 - - -KNOWN_EVENT_FORMAT_VERSIONS = { - EventFormatVersions.ROOM_V1_V2, - EventFormatVersions.ROOM_V3, - EventFormatVersions.ROOM_V4_PLUS, - EventFormatVersions.ROOM_V11_HYDRA_PLUS, -} - - -class StateResolutionVersions: - """Enum to identify the state resolution algorithms""" - - V1 = 1 # room v1 state res - V2 = 2 # MSC1442 state res: room v2 and later - V2_1 = 3 # MSC4297 state res - - -class RoomDisposition: - STABLE = "stable" - UNSTABLE = "unstable" - - -class PushRuleRoomFlag: - """Enum for listing possible MSC3931 room version feature flags, for push rules""" - - # MSC3932: Room version supports MSC1767 Extensible Events. - EXTENSIBLE_EVENTS = "org.matrix.msc3932.extensible_events" - - -@attr.s(slots=True, frozen=True, auto_attribs=True) -class RoomVersion: - """An object which describes the unique attributes of a room version.""" - - identifier: str # the identifier for this version - disposition: str # one of the RoomDispositions - event_format: int # one of the EventFormatVersions - state_res: int # one of the StateResolutionVersions - enforce_key_validity: bool - - # Before MSC2432, m.room.aliases had special auth rules and redaction rules - special_case_aliases_auth: bool - # Strictly enforce canonicaljson, do not allow: - # * Integers outside the range of [-2 ^ 53 + 1, 2 ^ 53 - 1] - # * Floats - # * NaN, Infinity, -Infinity - strict_canonicaljson: bool - # MSC2209: Check 'notifications' key while verifying - # m.room.power_levels auth rules. - limit_notifications_power_levels: bool - # MSC3820: No longer include the creator in m.room.create events (room version 11) - implicit_room_creator: bool - # MSC3820: Apply updated redaction rules algorithm from room version 11 - updated_redaction_rules: bool - # Support the 'restricted' join rule. - restricted_join_rule: bool - # Support for the proper redaction rules for the restricted join rule. This requires - # restricted_join_rule to be enabled. - restricted_join_rule_fix: bool - # Support the 'knock' join rule. - knock_join_rule: bool - # MSC3389: Protect relation information from redaction. - msc3389_relation_redactions: bool - # Support the 'knock_restricted' join rule. - knock_restricted_join_rule: bool - # Enforce integer power levels - enforce_int_power_levels: bool - # MSC3931: Adds a push rule condition for "room version feature flags", making - # some push rules room version dependent. Note that adding a flag to this list - # is not enough to mark it "supported": the push rule evaluator also needs to - # support the flag. Unknown flags are ignored by the evaluator, making conditions - # fail if used. - msc3931_push_features: tuple[str, ...] # values from PushRuleRoomFlag - # MSC3757: Restricting who can overwrite a state event - msc3757_enabled: bool - # MSC4289: Creator power enabled - msc4289_creator_power_enabled: bool - # MSC4291: Room IDs as hashes of the create event - msc4291_room_ids_as_hashes: bool - - -class RoomVersions: - V1 = RoomVersion( - "1", - RoomDisposition.STABLE, - EventFormatVersions.ROOM_V1_V2, - StateResolutionVersions.V1, - enforce_key_validity=False, - special_case_aliases_auth=True, - strict_canonicaljson=False, - limit_notifications_power_levels=False, - implicit_room_creator=False, - updated_redaction_rules=False, - restricted_join_rule=False, - restricted_join_rule_fix=False, - knock_join_rule=False, - msc3389_relation_redactions=False, - knock_restricted_join_rule=False, - enforce_int_power_levels=False, - msc3931_push_features=(), - msc3757_enabled=False, - msc4289_creator_power_enabled=False, - msc4291_room_ids_as_hashes=False, - ) - V2 = RoomVersion( - "2", - RoomDisposition.STABLE, - EventFormatVersions.ROOM_V1_V2, - StateResolutionVersions.V2, - enforce_key_validity=False, - special_case_aliases_auth=True, - strict_canonicaljson=False, - limit_notifications_power_levels=False, - implicit_room_creator=False, - updated_redaction_rules=False, - restricted_join_rule=False, - restricted_join_rule_fix=False, - knock_join_rule=False, - msc3389_relation_redactions=False, - knock_restricted_join_rule=False, - enforce_int_power_levels=False, - msc3931_push_features=(), - msc3757_enabled=False, - msc4289_creator_power_enabled=False, - msc4291_room_ids_as_hashes=False, - ) - V3 = RoomVersion( - "3", - RoomDisposition.STABLE, - EventFormatVersions.ROOM_V3, - StateResolutionVersions.V2, - enforce_key_validity=False, - special_case_aliases_auth=True, - strict_canonicaljson=False, - limit_notifications_power_levels=False, - implicit_room_creator=False, - updated_redaction_rules=False, - restricted_join_rule=False, - restricted_join_rule_fix=False, - knock_join_rule=False, - msc3389_relation_redactions=False, - knock_restricted_join_rule=False, - enforce_int_power_levels=False, - msc3931_push_features=(), - msc3757_enabled=False, - msc4289_creator_power_enabled=False, - msc4291_room_ids_as_hashes=False, - ) - V4 = RoomVersion( - "4", - RoomDisposition.STABLE, - EventFormatVersions.ROOM_V4_PLUS, - StateResolutionVersions.V2, - enforce_key_validity=False, - special_case_aliases_auth=True, - strict_canonicaljson=False, - limit_notifications_power_levels=False, - implicit_room_creator=False, - updated_redaction_rules=False, - restricted_join_rule=False, - restricted_join_rule_fix=False, - knock_join_rule=False, - msc3389_relation_redactions=False, - knock_restricted_join_rule=False, - enforce_int_power_levels=False, - msc3931_push_features=(), - msc3757_enabled=False, - msc4289_creator_power_enabled=False, - msc4291_room_ids_as_hashes=False, - ) - V5 = RoomVersion( - "5", - RoomDisposition.STABLE, - EventFormatVersions.ROOM_V4_PLUS, - StateResolutionVersions.V2, - enforce_key_validity=True, - special_case_aliases_auth=True, - strict_canonicaljson=False, - limit_notifications_power_levels=False, - implicit_room_creator=False, - updated_redaction_rules=False, - restricted_join_rule=False, - restricted_join_rule_fix=False, - knock_join_rule=False, - msc3389_relation_redactions=False, - knock_restricted_join_rule=False, - enforce_int_power_levels=False, - msc3931_push_features=(), - msc3757_enabled=False, - msc4289_creator_power_enabled=False, - msc4291_room_ids_as_hashes=False, - ) - V6 = RoomVersion( - "6", - RoomDisposition.STABLE, - EventFormatVersions.ROOM_V4_PLUS, - StateResolutionVersions.V2, - enforce_key_validity=True, - special_case_aliases_auth=False, - strict_canonicaljson=True, - limit_notifications_power_levels=True, - implicit_room_creator=False, - updated_redaction_rules=False, - restricted_join_rule=False, - restricted_join_rule_fix=False, - knock_join_rule=False, - msc3389_relation_redactions=False, - knock_restricted_join_rule=False, - enforce_int_power_levels=False, - msc3931_push_features=(), - msc3757_enabled=False, - msc4289_creator_power_enabled=False, - msc4291_room_ids_as_hashes=False, - ) - V7 = RoomVersion( - "7", - RoomDisposition.STABLE, - EventFormatVersions.ROOM_V4_PLUS, - StateResolutionVersions.V2, - enforce_key_validity=True, - special_case_aliases_auth=False, - strict_canonicaljson=True, - limit_notifications_power_levels=True, - implicit_room_creator=False, - updated_redaction_rules=False, - restricted_join_rule=False, - restricted_join_rule_fix=False, - knock_join_rule=True, - msc3389_relation_redactions=False, - knock_restricted_join_rule=False, - enforce_int_power_levels=False, - msc3931_push_features=(), - msc3757_enabled=False, - msc4289_creator_power_enabled=False, - msc4291_room_ids_as_hashes=False, - ) - V8 = RoomVersion( - "8", - RoomDisposition.STABLE, - EventFormatVersions.ROOM_V4_PLUS, - StateResolutionVersions.V2, - enforce_key_validity=True, - special_case_aliases_auth=False, - strict_canonicaljson=True, - limit_notifications_power_levels=True, - implicit_room_creator=False, - updated_redaction_rules=False, - restricted_join_rule=True, - restricted_join_rule_fix=False, - knock_join_rule=True, - msc3389_relation_redactions=False, - knock_restricted_join_rule=False, - enforce_int_power_levels=False, - msc3931_push_features=(), - msc3757_enabled=False, - msc4289_creator_power_enabled=False, - msc4291_room_ids_as_hashes=False, - ) - V9 = RoomVersion( - "9", - RoomDisposition.STABLE, - EventFormatVersions.ROOM_V4_PLUS, - StateResolutionVersions.V2, - enforce_key_validity=True, - special_case_aliases_auth=False, - strict_canonicaljson=True, - limit_notifications_power_levels=True, - implicit_room_creator=False, - updated_redaction_rules=False, - restricted_join_rule=True, - restricted_join_rule_fix=True, - knock_join_rule=True, - msc3389_relation_redactions=False, - knock_restricted_join_rule=False, - enforce_int_power_levels=False, - msc3931_push_features=(), - msc3757_enabled=False, - msc4289_creator_power_enabled=False, - msc4291_room_ids_as_hashes=False, - ) - V10 = RoomVersion( - "10", - RoomDisposition.STABLE, - EventFormatVersions.ROOM_V4_PLUS, - StateResolutionVersions.V2, - enforce_key_validity=True, - special_case_aliases_auth=False, - strict_canonicaljson=True, - limit_notifications_power_levels=True, - implicit_room_creator=False, - updated_redaction_rules=False, - restricted_join_rule=True, - restricted_join_rule_fix=True, - knock_join_rule=True, - msc3389_relation_redactions=False, - knock_restricted_join_rule=True, - enforce_int_power_levels=True, - msc3931_push_features=(), - msc3757_enabled=False, - msc4289_creator_power_enabled=False, - msc4291_room_ids_as_hashes=False, - ) - MSC1767v10 = RoomVersion( - # MSC1767 (Extensible Events) based on room version "10" - "org.matrix.msc1767.10", - RoomDisposition.UNSTABLE, - EventFormatVersions.ROOM_V4_PLUS, - StateResolutionVersions.V2, - enforce_key_validity=True, - special_case_aliases_auth=False, - strict_canonicaljson=True, - limit_notifications_power_levels=True, - implicit_room_creator=False, - updated_redaction_rules=False, - restricted_join_rule=True, - restricted_join_rule_fix=True, - knock_join_rule=True, - msc3389_relation_redactions=False, - knock_restricted_join_rule=True, - enforce_int_power_levels=True, - msc3931_push_features=(PushRuleRoomFlag.EXTENSIBLE_EVENTS,), - msc3757_enabled=False, - msc4289_creator_power_enabled=False, - msc4291_room_ids_as_hashes=False, - ) - MSC3757v10 = RoomVersion( - # MSC3757 (Restricting who can overwrite a state event) based on room version "10" - "org.matrix.msc3757.10", - RoomDisposition.UNSTABLE, - EventFormatVersions.ROOM_V4_PLUS, - StateResolutionVersions.V2, - enforce_key_validity=True, - special_case_aliases_auth=False, - strict_canonicaljson=True, - limit_notifications_power_levels=True, - implicit_room_creator=False, - updated_redaction_rules=False, - restricted_join_rule=True, - restricted_join_rule_fix=True, - knock_join_rule=True, - msc3389_relation_redactions=False, - knock_restricted_join_rule=True, - enforce_int_power_levels=True, - msc3931_push_features=(), - msc3757_enabled=True, - msc4289_creator_power_enabled=False, - msc4291_room_ids_as_hashes=False, - ) - V11 = RoomVersion( - "11", - RoomDisposition.STABLE, - EventFormatVersions.ROOM_V4_PLUS, - StateResolutionVersions.V2, - enforce_key_validity=True, - special_case_aliases_auth=False, - strict_canonicaljson=True, - limit_notifications_power_levels=True, - implicit_room_creator=True, # Used by MSC3820 - updated_redaction_rules=True, # Used by MSC3820 - restricted_join_rule=True, - restricted_join_rule_fix=True, - knock_join_rule=True, - msc3389_relation_redactions=False, - knock_restricted_join_rule=True, - enforce_int_power_levels=True, - msc3931_push_features=(), - msc3757_enabled=False, - msc4289_creator_power_enabled=False, - msc4291_room_ids_as_hashes=False, - ) - MSC3757v11 = RoomVersion( - # MSC3757 (Restricting who can overwrite a state event) based on room version "11" - "org.matrix.msc3757.11", - RoomDisposition.UNSTABLE, - EventFormatVersions.ROOM_V4_PLUS, - StateResolutionVersions.V2, - enforce_key_validity=True, - special_case_aliases_auth=False, - strict_canonicaljson=True, - limit_notifications_power_levels=True, - implicit_room_creator=True, # Used by MSC3820 - updated_redaction_rules=True, # Used by MSC3820 - restricted_join_rule=True, - restricted_join_rule_fix=True, - knock_join_rule=True, - msc3389_relation_redactions=False, - knock_restricted_join_rule=True, - enforce_int_power_levels=True, - msc3931_push_features=(), - msc3757_enabled=True, - msc4289_creator_power_enabled=False, - msc4291_room_ids_as_hashes=False, - ) - HydraV11 = RoomVersion( - "org.matrix.hydra.11", - RoomDisposition.UNSTABLE, - EventFormatVersions.ROOM_V11_HYDRA_PLUS, - StateResolutionVersions.V2_1, # Changed from v11 - enforce_key_validity=True, - special_case_aliases_auth=False, - strict_canonicaljson=True, - limit_notifications_power_levels=True, - implicit_room_creator=True, # Used by MSC3820 - updated_redaction_rules=True, # Used by MSC3820 - restricted_join_rule=True, - restricted_join_rule_fix=True, - knock_join_rule=True, - msc3389_relation_redactions=False, - knock_restricted_join_rule=True, - enforce_int_power_levels=True, - msc3931_push_features=(), - msc3757_enabled=False, - msc4289_creator_power_enabled=True, # Changed from v11 - msc4291_room_ids_as_hashes=True, # Changed from v11 - ) - V12 = RoomVersion( - "12", - RoomDisposition.STABLE, - EventFormatVersions.ROOM_V11_HYDRA_PLUS, - StateResolutionVersions.V2_1, # Changed from v11 - enforce_key_validity=True, - special_case_aliases_auth=False, - strict_canonicaljson=True, - limit_notifications_power_levels=True, - implicit_room_creator=True, # Used by MSC3820 - updated_redaction_rules=True, # Used by MSC3820 - restricted_join_rule=True, - restricted_join_rule_fix=True, - knock_join_rule=True, - msc3389_relation_redactions=False, - knock_restricted_join_rule=True, - enforce_int_power_levels=True, - msc3931_push_features=(), - msc3757_enabled=False, - msc4289_creator_power_enabled=True, # Changed from v11 - msc4291_room_ids_as_hashes=True, # Changed from v11 - ) - - -KNOWN_ROOM_VERSIONS: dict[str, RoomVersion] = { - v.identifier: v - for v in ( - RoomVersions.V1, - RoomVersions.V2, - RoomVersions.V3, - RoomVersions.V4, - RoomVersions.V5, - RoomVersions.V6, - RoomVersions.V7, - RoomVersions.V8, - RoomVersions.V9, - RoomVersions.V10, - RoomVersions.V11, - RoomVersions.V12, - RoomVersions.MSC3757v10, - RoomVersions.MSC3757v11, - RoomVersions.HydraV11, - ) -} +__all__ = [ + "EventFormatVersions", + "KNOWN_EVENT_FORMAT_VERSIONS", + "KNOWN_ROOM_VERSIONS", + "PushRuleRoomFlag", + "RoomVersion", + "RoomVersions", + "StateResolutionVersions", +] diff --git a/synapse/app/_base.py b/synapse/app/_base.py index 7f4855f36b..55df4c382e 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -700,12 +700,10 @@ async def start(hs: "HomeServer", *, freeze: bool = True) -> None: # Load the OIDC provider metadatas, if OIDC is enabled. if hs.config.oidc.oidc_enabled: oidc = hs.get_oidc_handler() + # Preload the provider metadata. + # This will spawn fire-and-forget background processes. # Loading the provider metadata also ensures the provider config is valid. - # - # FIXME: It feels a bit strange to validate and block on startup as one of these - # OIDC providers could be temporarily unavailable and cause Synapse to be unable - # to start. - await oidc.load_metadata() + oidc.preload_metadata() # Load the certificate from disk. refresh_certificate(hs) diff --git a/synapse/appservice/api.py b/synapse/appservice/api.py index 2bbf77a352..d4e9d50b96 100644 --- a/synapse/appservice/api.py +++ b/synapse/appservice/api.py @@ -32,7 +32,7 @@ from typing import ( from prometheus_client import Counter from typing_extensions import ParamSpec, TypeGuard -from synapse.api.constants import EventTypes, Membership, ThirdPartyEntityKind +from synapse.api.constants import ThirdPartyEntityKind from synapse.api.errors import CodeMessageException, HttpResponseException from synapse.appservice import ( ApplicationService, @@ -40,7 +40,7 @@ from synapse.appservice import ( TransactionUnusedFallbackKeys, ) from synapse.events import EventBase -from synapse.events.utils import SerializeEventConfig, serialize_event +from synapse.events.utils import SerializeEventConfig from synapse.http.client import SimpleHttpClient, is_unknown_endpoint from synapse.logging import opentracing from synapse.metrics import SERVER_NAME_LABEL @@ -128,6 +128,7 @@ class ApplicationServiceApi(SimpleHttpClient): self.server_name = hs.hostname self.clock = hs.get_clock() self.config = hs.config.appservice + self._event_serializer = hs.get_event_client_serializer() self.protocol_meta_cache: ResponseCache[tuple[str, str]] = ResponseCache( clock=hs.get_clock(), @@ -343,7 +344,7 @@ class ApplicationServiceApi(SimpleHttpClient): # This is required by the configuration. assert service.hs_token is not None - serialized_events = self._serialize(service, events) + serialized_events = await self._serialize(service, events) if txn_id is None: logger.warning( @@ -539,30 +540,23 @@ class ApplicationServiceApi(SimpleHttpClient): return response - def _serialize( + async def _serialize( self, service: "ApplicationService", events: Iterable[EventBase] ) -> list[JsonDict]: time_now = self.clock.time_msec() - return [ - serialize_event( - e, - time_now, - config=SerializeEventConfig( - as_client_event=True, - # If this is an invite or a knock membership event, and we're interested - # in this user, then include any stripped state alongside the event. - include_stripped_room_state=( - e.type == EventTypes.Member - and ( - e.membership == Membership.INVITE - or e.membership == Membership.KNOCK - ) - and service.is_interested_in_user(e.state_key) - ), - # Appservices are considered 'trusted' by the admin and should have - # applicable metadata on their events. - include_admin_metadata=True, - ), - ) - for e in events - ] + return await self._event_serializer.serialize_events( + list(events), + time_now, + config=SerializeEventConfig( + as_client_event=True, + # If this is an invite or a knock membership event, then include + # any stripped state alongside the event. We could narrow this + # down to only users the appservice is "interested in", however + # it's not worth the complexity of doing so, and it's simpler to + # just include it for all users. + include_stripped_room_state=True, + # Appservices are considered 'trusted' by the admin and should have + # applicable metadata on their events. + include_admin_metadata=True, + ), + ) diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index ff9b394eb9..e77412a797 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -477,8 +477,7 @@ class ExperimentalConfig(Config): self.msc1767_enabled: bool = experimental.get("msc1767_enabled", False) if self.msc1767_enabled: # Enable room version (and thus applicable push rules from MSC3931/3932) - version_id = RoomVersions.MSC1767v10.identifier - KNOWN_ROOM_VERSIONS[version_id] = RoomVersions.MSC1767v10 + KNOWN_ROOM_VERSIONS.add_room_version(RoomVersions.MSC1767v10) # MSC3391: Removing account data. self.msc3391_enabled = experimental.get("msc3391_enabled", False) diff --git a/synapse/event_auth.py b/synapse/event_auth.py index 7098843742..f39f55b472 100644 --- a/synapse/event_auth.py +++ b/synapse/event_auth.py @@ -60,7 +60,6 @@ from synapse.api.room_versions import ( KNOWN_ROOM_VERSIONS, EventFormatVersions, RoomVersion, - RoomVersions, ) from synapse.events import is_creator from synapse.state import CREATE_KEY @@ -383,25 +382,6 @@ def check_state_dependent_auth_rules( logger.debug("Allowing! %s", event) -# Set of room versions where Synapse did not apply event key size limits -# in bytes, but rather in codepoints. -# In these room versions, we are more lenient with event size validation. -LENIENT_EVENT_BYTE_LIMITS_ROOM_VERSIONS = { - RoomVersions.V1, - RoomVersions.V2, - RoomVersions.V3, - RoomVersions.V4, - RoomVersions.V5, - RoomVersions.V6, - RoomVersions.V7, - RoomVersions.V8, - RoomVersions.V9, - RoomVersions.V10, - RoomVersions.MSC1767v10, - RoomVersions.MSC3757v10, -} - - def _check_size_limits(event: "EventBase") -> None: """ Checks the size limits in a PDU. @@ -440,9 +420,7 @@ def _check_size_limits(event: "EventBase") -> None: if len(event.event_id) > 255: raise EventSizeError("'event_id' too large", unpersistable=True) - strict_byte_limits = ( - event.room_version not in LENIENT_EVENT_BYTE_LIMITS_ROOM_VERSIONS - ) + strict_byte_limits = event.room_version.strict_event_byte_limits_room_versions # Byte size check: if these fail, then be lenient to avoid breaking rooms. if len(event.user_id.encode("utf-8")) > 255: diff --git a/synapse/events/utils.py b/synapse/events/utils.py index 89eb2182af..76ebac8b17 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py @@ -88,6 +88,7 @@ def prune_event(event: EventBase) -> EventBase: ) pruned_event.internal_metadata.instance_name = event.internal_metadata.instance_name pruned_event.internal_metadata.outlier = event.internal_metadata.outlier + pruned_event.internal_metadata.redacted_by = event.internal_metadata.redacted_by # Mark the event as redacted pruned_event.internal_metadata.redacted = True @@ -123,6 +124,7 @@ def clone_event(event: EventBase) -> EventBase: ) new_event.internal_metadata.instance_name = event.internal_metadata.instance_name new_event.internal_metadata.outlier = event.internal_metadata.outlier + new_event.internal_metadata.redacted_by = event.internal_metadata.redacted_by return new_event @@ -423,7 +425,7 @@ class SerializeEventConfig: # the transaction_id and delay_id in the unsigned section of the event. requester: Requester | None = None # List of event fields to include. If empty, all fields will be returned. - only_event_fields: list[str] | None = None + only_event_fields: list[str] | None = attr.ib(default=None) # Some events can have stripped room state stored in the `unsigned` field. # This is required for invite and knock functionality. If this option is # False, that state will be removed from the event before it is returned. @@ -434,6 +436,16 @@ class SerializeEventConfig: # whether an event was soft failed by the server. include_admin_metadata: bool = False + @only_event_fields.validator + def _validate_only_event_fields( + self, attribute: attr.Attribute, value: Any + ) -> None: + if value is None: + return + + if not isinstance(value, list) or not all(isinstance(f, str) for f in value): + raise TypeError("only_event_fields must be a list of strings") + _DEFAULT_SERIALIZE_EVENT_CONFIG = SerializeEventConfig() @@ -444,7 +456,7 @@ def make_config_for_admin(existing: SerializeEventConfig) -> SerializeEventConfi return attr.evolve(existing, include_admin_metadata=True) -def serialize_event( +def _serialize_event( e: JsonDict | EventBase, time_now_ms: int, *, @@ -476,13 +488,6 @@ def serialize_event( d["unsigned"]["age"] = time_now_ms - d["unsigned"]["age_ts"] del d["unsigned"]["age_ts"] - if "redacted_because" in e.unsigned: - d["unsigned"]["redacted_because"] = serialize_event( - e.unsigned["redacted_because"], - time_now_ms, - config=config, - ) - # If we have applicable fields saved in the internal_metadata, include them in the # unsigned section of the event if the event was sent by the same session (or when # appropriate, just the same sender) as the one requesting the event. @@ -559,14 +564,6 @@ def serialize_event( if e.internal_metadata.policy_server_spammy: d["unsigned"]["io.element.synapse.policy_server_spammy"] = True - only_event_fields = config.only_event_fields - if only_event_fields: - if not isinstance(only_event_fields, list) or not all( - isinstance(f, str) for f in only_event_fields - ): - raise TypeError("only_event_fields must be a list of strings") - d = only_fields(d, only_event_fields) - return d @@ -591,6 +588,7 @@ class EventClientSerializer: *, config: SerializeEventConfig = _DEFAULT_SERIALIZE_EVENT_CONFIG, bundle_aggregations: dict[str, "BundledAggregations"] | None = None, + redaction_map: Mapping[str, "EventBase"] | None = None, ) -> JsonDict: """Serializes a single event. @@ -600,6 +598,8 @@ class EventClientSerializer: config: Event serialization config bundle_aggregations: A map from event_id to the aggregations to be bundled into the event. + redaction_map: Optional pre-fetched map from redaction event_id to event, + used to avoid per-event DB lookups when serializing many events. Returns: The serialized event @@ -617,7 +617,34 @@ class EventClientSerializer: ): config = make_config_for_admin(config) - serialized_event = serialize_event(event, time_now, config=config) + serialized_event = _serialize_event(event, time_now, config=config) + + # If the event was redacted, fetch the redaction event from the database + # and include it in the serialized event's unsigned section. + redacted_by: str | None = event.internal_metadata.redacted_by + if redacted_by is not None: + serialized_event.setdefault("unsigned", {})["redacted_by"] = redacted_by + if redaction_map is not None: + redaction_event: EventBase | None = redaction_map.get(redacted_by) + else: + redaction_event = await self._store.get_event( + redacted_by, + allow_none=True, + ) + if redaction_event is not None: + serialized_redaction = _serialize_event( + redaction_event, time_now, config=config + ) + serialized_event.setdefault("unsigned", {})["redacted_because"] = ( + serialized_redaction + ) + # format_event_for_client_v1 copies redacted_because to the + # top level, but since we add it after that runs, do it here. + if ( + config.as_client_event + and config.event_format is format_event_for_client_v1 + ): + serialized_event["redacted_because"] = serialized_redaction new_unsigned = {} for callback in self._add_extra_fields_to_unsigned_client_event_callbacks: @@ -630,6 +657,13 @@ class EventClientSerializer: new_unsigned.update(serialized_event["unsigned"]) serialized_event["unsigned"] = new_unsigned + # Only include fields that the client has requested. + # + # Note: we always return bundled aggregations, though it is unclear why. + only_event_fields = config.only_event_fields + if only_event_fields: + serialized_event = only_fields(serialized_event, only_event_fields) + # Check if there are any bundled aggregations to include with the event. if bundle_aggregations: if event.event_id in bundle_aggregations: @@ -745,12 +779,23 @@ class EventClientSerializer: str(len(events)), ) + # Batch-fetch all redaction events in one go rather than one per event. + redaction_ids = { + e.internal_metadata.redacted_by + for e in events + if isinstance(e, EventBase) and e.internal_metadata.redacted_by is not None + } + redaction_map = ( + await self._store.get_events(redaction_ids) if redaction_ids else {} + ) + return [ await self.serialize_event( event, time_now, config=config, bundle_aggregations=bundle_aggregations, + redaction_map=redaction_map, ) for event in events ] diff --git a/synapse/handlers/oidc.py b/synapse/handlers/oidc.py index 429a739380..09b19056a7 100644 --- a/synapse/handlers/oidc.py +++ b/synapse/handlers/oidc.py @@ -49,18 +49,20 @@ from pymacaroons.exceptions import ( MacaroonInvalidSignatureException, ) +from twisted.internet import defer +from twisted.internet.defer import Deferred from twisted.web.client import readBody from twisted.web.http_headers import Headers from synapse.api.errors import SynapseError from synapse.config import ConfigError from synapse.config.oidc import OidcProviderClientSecretJwtKey, OidcProviderConfig -from synapse.handlers.sso import MappingException, UserAttributes +from synapse.handlers.sso import MappingException, SsoSetupError, UserAttributes from synapse.http.server import finish_request from synapse.http.servlet import parse_string from synapse.http.site import SynapseRequest from synapse.logging.context import make_deferred_yieldable -from synapse.module_api import ModuleApi +from synapse.metrics.background_process_metrics import wrap_as_background_process from synapse.types import JsonDict, UserID, map_username_to_mxid_localpart from synapse.util.caches.cached_call import RetryOnExceptionCachedCall from synapse.util.clock import Clock @@ -69,6 +71,7 @@ from synapse.util.macaroons import MacaroonGenerator, OidcSessionData from synapse.util.templates import _localpart_from_email_filter if TYPE_CHECKING: + from synapse.module_api import ModuleApi from synapse.server import HomeServer logger = logging.getLogger(__name__) @@ -123,6 +126,8 @@ class OidcHandler: def __init__(self, hs: "HomeServer"): self._sso_handler = hs.get_sso_handler() + # Needed for wrap_as_background_process + self.hs = hs provider_confs = hs.config.oidc.oidc_providers # we should not have been instantiated if there is no configured provider. @@ -134,20 +139,47 @@ class OidcHandler: for p in provider_confs } - async def load_metadata(self) -> None: - """Validate the config and load the metadata from the remote endpoint. + @wrap_as_background_process("preload_oidc_metadata") + async def _preload_metadata_one_provider( + self, idp_id: str, p: "OidcProvider" + ) -> None: + """Attempt to preload the metadata from a single OIDC provider's remote endpoint + in the background. - Called at startup to ensure we have everything we need. + Will not raise exceptions, but will log at CRITICAL if an OIDC provider is broken. """ + + logger.info("Preloading OIDC provider %r", idp_id) + try: + await p.load_metadata() + if not p._uses_userinfo: + await p.load_jwks() + except Exception: + logger.critical( + # Include 'login' keyword for searchability. + "Error while preloading OIDC provider %r. Login may be broken!", + idp_id, + exc_info=True, + ) + + def preload_metadata(self) -> "Deferred[list[tuple[bool, None]]]": + """Attempt to preload the metadata from all the OIDC providers' remote endpoints + in the background. + + Will not raise exceptions, but will log at CRITICAL if an OIDC provider is broken. + + Can be **optionally** awaited in which case it will resolve when all + preloads are finished. + """ + + to_wait = [] for idp_id, p in self._providers.items(): - try: - await p.load_metadata() - if not p._uses_userinfo: - await p.load_jwks() - except Exception as e: - raise Exception( - "Error while initialising OIDC provider %r" % (idp_id,) - ) from e + to_wait.append(self._preload_metadata_one_provider(idp_id, p)) + + return defer.DeferredList( + to_wait, + consumeErrors=True, + ) async def handle_oidc_callback(self, request: SynapseRequest) -> None: """Handle an incoming request to /_synapse/client/oidc/callback @@ -359,6 +391,18 @@ class OidcError(Exception): return self.error +class OidcDiscoveryError(SsoSetupError): + """ + Used to catch and mark errors when performing OIDC discovery. + """ + + +class OidcMetadataError(SsoSetupError): + """ + Used to catch and mark errors in the OIDC metadata configuration. + """ + + class OidcProvider: """Wraps the config for a single OIDC IdentityProvider @@ -372,6 +416,9 @@ class OidcProvider: macaroon_generator: MacaroonGenerator, provider: OidcProviderConfig, ): + # Needed here to break import loops + from synapse.module_api import ModuleApi + self._store = hs.get_datastores().main self._clock = hs.get_clock() @@ -644,9 +691,15 @@ class OidcProvider: # load any data from the discovery endpoint, if enabled if self._config.discover: - url = get_well_known_url(self._config.issuer, external=True) - metadata_response = await self._http_client.get_json(url) - metadata.update(metadata_response) + try: + url = get_well_known_url(self._config.issuer, external=True) + metadata_response = await self._http_client.get_json(url) + metadata.update(metadata_response) + except Exception as e: + # This `Exception` bound is a bit broad, but at least expecting + # `twisted.internet.error.ConnectionRefusedError` + # and likely many other network or JSON errors. + raise OidcDiscoveryError() from e # override any discovered data with any settings in our config if self._config.authorization_endpoint: @@ -671,7 +724,12 @@ class OidcProvider: self._config.id_token_signing_alg_values_supported ) - self._validate_metadata(metadata) + try: + self._validate_metadata(metadata) + except ValueError as e: + # Wrap this error so that we can special-case it higher up + # Pass through `str(e)` as the message so tests can match it. + raise OidcMetadataError(str(e)) from e return metadata @@ -1685,7 +1743,7 @@ class JinjaOidcMappingProvider(OidcMappingProvider[JinjaOidcMappingConfig]): This is the default mapping provider. """ - def __init__(self, config: JinjaOidcMappingConfig, module_api: ModuleApi): + def __init__(self, config: JinjaOidcMappingConfig, module_api: "ModuleApi"): self._config = config @staticmethod diff --git a/synapse/handlers/sso.py b/synapse/handlers/sso.py index 0b2587f94f..bb5ca329e0 100644 --- a/synapse/handlers/sso.py +++ b/synapse/handlers/sso.py @@ -70,6 +70,15 @@ class MappingException(Exception): """ +class SsoSetupError(Exception): + """ + Used to catch and tag errors relating to an SSO setup's. + + Used as the superclass of specific error classes, + as note we can't e.g. import the OIDC module unless OIDC dependencies are installed. + """ + + class SsoIdentityProvider(Protocol): """Abstract base class to be implemented by SSO Identity Providers diff --git a/synapse/res/templates/sso_error.html b/synapse/res/templates/sso_error.html index 6fa36c11c9..e7e34c7293 100644 --- a/synapse/res/templates/sso_error.html +++ b/synapse/res/templates/sso_error.html @@ -20,6 +20,27 @@

You are not allowed to log in here.

+{% elif error == "provider_unavailable" %} +
+

This login provider is unavailable right now.

+

+ There has been a problem which means that it's not currently possible + to log in with that provider. +

+

+ If your account has a password, or is connected to other login providers, + try those methods to log in. +

+

+ This issue could be temporary, so please try again later. + If the problem persists, please contact the server's administrator. +

+ + If you are the administrator of this server, please check the server logs for more information. + This could be caused by a server misconfiguration or an issue with the provider. + +
+ {% include "sso_footer.html" without context %} {% else %}

There was an error

diff --git a/synapse/rest/admin/events.py b/synapse/rest/admin/events.py index 1c39d5caf3..8da7a67820 100644 --- a/synapse/rest/admin/events.py +++ b/synapse/rest/admin/events.py @@ -5,7 +5,6 @@ from synapse.api.errors import NotFoundError from synapse.events.utils import ( SerializeEventConfig, format_event_raw, - serialize_event, ) from synapse.http.servlet import RestServlet from synapse.http.site import SynapseRequest @@ -40,6 +39,7 @@ class EventRestServlet(RestServlet): self._auth = hs.get_auth() self._store = hs.get_datastores().main self._clock = hs.get_clock() + self._event_serializer = hs.get_event_client_serializer() async def on_GET( self, request: SynapseRequest, event_id: str @@ -64,6 +64,10 @@ class EventRestServlet(RestServlet): include_stripped_room_state=True, include_admin_metadata=True, ) - res = {"event": serialize_event(event, self._clock.time_msec(), config=config)} + res = { + "event": await self._event_serializer.serialize_event( + event, self._clock.time_msec(), config=config + ) + } return HTTPStatus.OK, res diff --git a/synapse/rest/client/login.py b/synapse/rest/client/login.py index fe3cb9aa3d..a49b27d009 100644 --- a/synapse/rest/client/login.py +++ b/synapse/rest/client/login.py @@ -18,7 +18,6 @@ # [This file includes modifications made by New Vector Limited] # # - import logging import re from typing import ( @@ -42,7 +41,7 @@ from synapse.api.errors import ( from synapse.api.ratelimiting import Ratelimiter from synapse.api.urls import CLIENT_API_PREFIX from synapse.appservice import ApplicationService -from synapse.handlers.sso import SsoIdentityProvider +from synapse.handlers.sso import SsoIdentityProvider, SsoSetupError from synapse.http import get_request_uri from synapse.http.server import HttpServer, finish_request from synapse.http.servlet import ( @@ -679,11 +678,26 @@ class SsoRedirectServlet(RestServlet): args: dict[bytes, list[bytes]] = request.args # type: ignore client_redirect_url = parse_bytes_from_args(args, "redirectUrl", required=True) - sso_url = await self._sso_handler.handle_redirect_request( - request, - client_redirect_url, - idp_id, - ) + try: + sso_url = await self._sso_handler.handle_redirect_request( + request, + client_redirect_url, + idp_id, + ) + except SsoSetupError: + logger.exception( + "Login redirect failed because SSO/identity provider %r unavailable", + idp_id, + ) + # Show an error page that is slightly more friendly than JSON + self._sso_handler.render_error( + request, + "provider_unavailable", + "This login provider is currently unavailable on this homeserver.", + code=503, + ) + return + logger.info("Redirecting to %s", sso_url) request.redirect(sso_url) finish_request(request) diff --git a/synapse/rest/client/room.py b/synapse/rest/client/room.py index 9172bfcb4e..65d9c130ef 100644 --- a/synapse/rest/client/room.py +++ b/synapse/rest/client/room.py @@ -55,7 +55,6 @@ from synapse.events.utils import ( EventClientSerializer, SerializeEventConfig, format_event_for_client_v2, - serialize_event, ) from synapse.handlers.pagination import GetMessagesResult from synapse.http.server import HttpServer @@ -214,6 +213,7 @@ class RoomStateEventRestServlet(RestServlet): self.delayed_events_handler = hs.get_delayed_events_handler() self.auth = hs.get_auth() self.clock = hs.get_clock() + self._event_serializer = hs.get_event_client_serializer() self._max_event_delay_ms = hs.config.server.max_event_delay_ms self._spam_checker_module_callbacks = hs.get_module_api_callbacks().spam_checker self._msc4354_enabled = hs.config.experimental.msc4354_enabled @@ -285,7 +285,7 @@ class RoomStateEventRestServlet(RestServlet): raise SynapseError(404, "Event not found.", errcode=Codes.NOT_FOUND) if format == "event": - event = serialize_event( + event = await self._event_serializer.serialize_event( data, self.clock.time_msec(), config=SerializeEventConfig( diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index cb452dbc9b..941a5f9f3a 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -2944,6 +2944,7 @@ class PersistEventsStore: values={ "redacts": event.redacts, "received_ts": self._clock.time_msec(), + "recheck": event.internal_metadata.need_to_check_redaction(), }, ) diff --git a/synapse/storage/databases/main/events_bg_updates.py b/synapse/storage/databases/main/events_bg_updates.py index f8300e016b..934cd157ca 100644 --- a/synapse/storage/databases/main/events_bg_updates.py +++ b/synapse/storage/databases/main/events_bg_updates.py @@ -159,6 +159,11 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS "redactions_received_ts", self._redactions_received_ts ) + self.db_pool.updates.register_background_update_handler( + _BackgroundUpdates.REDACTIONS_RECHECK_BG_UPDATE, + self._redactions_recheck_bg_update, + ) + # This index gets deleted in `event_fix_redactions_bytes` update self.db_pool.updates.register_background_index_update( "event_fix_redactions_bytes_create_index", @@ -747,6 +752,66 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS return count + async def _redactions_recheck_bg_update( + self, progress: JsonDict, batch_size: int + ) -> int: + """Fills in the `recheck` column of the `redactions` table based on + the `recheck_redaction` field in each event's internal metadata.""" + last_event_id = progress.get("last_event_id", "") + + def _txn(txn: LoggingTransaction) -> int: + sql = """ + SELECT r.event_id, ej.internal_metadata + FROM redactions AS r + LEFT JOIN event_json AS ej USING (event_id) + WHERE r.event_id > ? + ORDER BY r.event_id ASC + LIMIT ? + """ + txn.execute(sql, (last_event_id, batch_size)) + rows = txn.fetchall() + if not rows: + return 0 + + updates = [] + for event_id, internal_metadata_json in rows: + if internal_metadata_json is not None: + internal_metadata = db_to_json(internal_metadata_json) + recheck = bool(internal_metadata.get("recheck_redaction", False)) + else: + recheck = False + if not recheck: + # Column defaults to true, so we only need to update rows + # where recheck should be false. + updates.append((event_id, recheck)) + + self.db_pool.simple_update_many_txn( + txn, + table="redactions", + key_names=("event_id",), + key_values=[(event_id,) for event_id, _ in updates], + value_names=("recheck",), + value_values=[(recheck,) for _, recheck in updates], + ) + + upper_event_id = rows[-1][0] + self.db_pool.updates._background_update_progress_txn( + txn, + _BackgroundUpdates.REDACTIONS_RECHECK_BG_UPDATE, + {"last_event_id": upper_event_id}, + ) + + return len(rows) + + count = await self.db_pool.runInteraction("_redactions_recheck_bg_update", _txn) + + if not count: + await self.db_pool.updates._end_background_update( + _BackgroundUpdates.REDACTIONS_RECHECK_BG_UPDATE + ) + + return count + async def _event_fix_redactions_bytes( self, progress: JsonDict, batch_size: int ) -> int: diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index cb0764feb8..cc79b8042b 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -179,7 +179,11 @@ class _EventRow: rejected_reason: if the event was rejected, the reason why. - redactions: a list of event-ids which (claim to) redact this event. + unconfirmed_redactions: a list of event-ids which (claim to) redact this event + and need to be rechecked. + + confirmed_redactions: a list of event-ids which redact this event and have been + confirmed as valid redactions. outlier: True if this event is an outlier. """ @@ -192,7 +196,8 @@ class _EventRow: format_version: int | None room_version_id: str | None rejected_reason: str | None - redactions: list[str] + unconfirmed_redactions: list[str] + confirmed_redactions: list[str] outlier: bool @@ -1359,14 +1364,20 @@ class EventsWorkerStore(SQLBaseStore): ) row_map = await self._enqueue_events(event_ids_to_fetch) - # we need to recursively fetch any redactions of those events + # we need to recursively fetch redaction events that require + # rechecking, so we can validate them redaction_ids: set[str] = set() for event_id in event_ids_to_fetch: row = row_map.get(event_id) fetched_event_ids.add(event_id) if row: fetched_events[event_id] = row - redaction_ids.update(row.redactions) + + # If this event only has unconfirmed redactions we fetch + # them from the DB so that we check them to see if any are + # valid. + if not row.confirmed_redactions: + redaction_ids.update(row.unconfirmed_redactions) event_ids_to_fetch = redaction_ids.difference(fetched_event_ids) return event_ids_to_fetch @@ -1510,9 +1521,12 @@ class EventsWorkerStore(SQLBaseStore): # the cache entries. result_map: dict[str, EventCacheEntry] = {} for event_id, original_ev in event_map.items(): - redactions = fetched_events[event_id].redactions + row = fetched_events[event_id] redacted_event = self._maybe_redact_event_row( - original_ev, redactions, event_map + original_ev, + row.unconfirmed_redactions, + row.confirmed_redactions, + event_map, ) cache_entry = EventCacheEntry( @@ -1606,21 +1620,25 @@ class EventsWorkerStore(SQLBaseStore): format_version=row[5], room_version_id=row[6], rejected_reason=row[7], - redactions=[], + unconfirmed_redactions=[], + confirmed_redactions=[], outlier=bool(row[8]), # This is an int in SQLite3 ) # check for redactions - redactions_sql = "SELECT event_id, redacts FROM redactions WHERE " + redactions_sql = "SELECT event_id, redacts, recheck FROM redactions WHERE " clause, args = make_in_list_sql_clause(txn.database_engine, "redacts", evs) txn.execute(redactions_sql + clause, args) - for redacter, redacted in txn: + for redacter, redacted, recheck in txn: d = event_dict.get(redacted) if d: - d.redactions.append(redacter) + if recheck: + d.unconfirmed_redactions.append(redacter) + else: + d.confirmed_redactions.append(redacter) # check for MSC4293 redactions to_check = [] @@ -1669,24 +1687,28 @@ class EventsWorkerStore(SQLBaseStore): # backfilled events, as they have a negative stream ordering if e_row.stream_ordering >= redact_end_ordering: continue - e_row.redactions.append(redacting_event_id) + e_row.unconfirmed_redactions.append(redacting_event_id) return event_dict def _maybe_redact_event_row( self, original_ev: EventBase, - redactions: Iterable[str], + unconfirmed_redactions: Iterable[str], + confirmed_redactions: Iterable[str], event_map: dict[str, EventBase], ) -> EventBase | None: - """Given an event object and a list of possible redacting event ids, + """Given an event object and lists of possible redacting event ids, determine whether to honour any of those redactions and if so return a redacted event. Args: original_ev: The original event. - redactions: list of event ids of potential redaction events + unconfirmed_redactions: list of event ids of redaction events that need + domain rechecking (room v3+). + confirmed_redactions: list of event ids of redaction events that have + already been validated and do not need rechecking. event_map: other events which have been fetched, in which we can - look up the redaaction events. Map from event id to event. + look up the redaction events. Map from event id to event. Returns: If the event should be redacted, a pruned event object. Otherwise, None. @@ -1695,7 +1717,12 @@ class EventsWorkerStore(SQLBaseStore): # we choose to ignore redactions of m.room.create events. return None - for redaction_id in redactions: + for redaction_id in confirmed_redactions: + redacted_event = prune_event(original_ev) + redacted_event.internal_metadata.redacted_by = redaction_id + return redacted_event + + for redaction_id in unconfirmed_redactions: redaction_event = event_map.get(redaction_id) if not redaction_event or redaction_event.rejected_reason: # we don't have the redaction event, or the redaction event was not @@ -1736,12 +1763,10 @@ class EventsWorkerStore(SQLBaseStore): # we found a good redaction event. Redact! redacted_event = prune_event(original_ev) - redacted_event.unsigned["redacted_by"] = redaction_id - - # It's fine to add the event directly, since get_pdu_json - # will serialise this field correctly - redacted_event.unsigned["redacted_because"] = redaction_event + redacted_event.internal_metadata.redacted_by = redaction_id + # Note: The `redacted_because` field will later be populated by + # `EventClientSerializer.serialize_event`. return redacted_event # no valid redaction found for this event diff --git a/synapse/storage/schema/__init__.py b/synapse/storage/schema/__init__.py index c4c4d7bcc4..e3095a9d0d 100644 --- a/synapse/storage/schema/__init__.py +++ b/synapse/storage/schema/__init__.py @@ -19,7 +19,7 @@ # # -SCHEMA_VERSION = 93 # remember to update the list below when updating +SCHEMA_VERSION = 94 # remember to update the list below when updating """Represents the expectations made by the codebase about the database schema This should be incremented whenever the codebase changes its requirements on the @@ -171,6 +171,9 @@ Changes in SCHEMA_VERSION = 92 Changes in SCHEMA_VERSION = 93 - MSC4140: Set delayed events to be uniquely identifiable by their delay ID. + +Changes in SCHEMA_VERSION = 94 + - Add `recheck` column (boolean, default true) to the `redactions` table. """ diff --git a/synapse/storage/schema/main/delta/94/01_redactions_recheck.sql b/synapse/storage/schema/main/delta/94/01_redactions_recheck.sql new file mode 100644 index 0000000000..e99aa52f1f --- /dev/null +++ b/synapse/storage/schema/main/delta/94/01_redactions_recheck.sql @@ -0,0 +1,15 @@ +-- +-- This file is licensed under the Affero General Public License (AGPL) version 3. +-- +-- Copyright (C) 2026 Element Creations, Ltd +-- +-- This program is free software: you can redistribute it and/or modify +-- it under the terms of the GNU Affero General Public License as +-- published by the Free Software Foundation, either version 3 of the +-- License, or (at your option) any later version. +-- +-- See the GNU Affero General Public License for more details: +-- . + + +ALTER TABLE redactions ADD COLUMN recheck boolean NOT NULL DEFAULT true; diff --git a/synapse/storage/schema/main/delta/94/02_redactions_recheck_bg_update.sql b/synapse/storage/schema/main/delta/94/02_redactions_recheck_bg_update.sql new file mode 100644 index 0000000000..6367afd318 --- /dev/null +++ b/synapse/storage/schema/main/delta/94/02_redactions_recheck_bg_update.sql @@ -0,0 +1,15 @@ +-- +-- This file is licensed under the Affero General Public License (AGPL) version 3. +-- +-- Copyright (C) 2026 Element Creations, Ltd +-- +-- This program is free software: you can redistribute it and/or modify +-- it under the terms of the GNU Affero General Public License as +-- published by the Free Software Foundation, either version 3 of the +-- License, or (at your option) any later version. +-- +-- See the GNU Affero General Public License for more details: +-- . + +INSERT INTO background_updates (ordering, update_name, progress_json) VALUES + (9402, 'redactions_recheck', '{}'); diff --git a/synapse/synapse_rust/events.pyi b/synapse/synapse_rust/events.pyi index 185f29694b..e6753bbbad 100644 --- a/synapse/synapse_rust/events.pyi +++ b/synapse/synapse_rust/events.pyi @@ -21,6 +21,8 @@ class EventInternalMetadata: """the stream ordering of this event. None, until it has been persisted.""" instance_name: str | None """the instance name of the server that persisted this event. None, until it has been persisted.""" + redacted_by: str | None + """the event ID of the redaction event, if this event has been redacted. Set dynamically at load time, not persisted.""" outlier: bool """whether this event is an outlier (ie, whether we have the state at that diff --git a/synapse/synapse_rust/push.pyi b/synapse/synapse_rust/push.pyi index 9d8f0389e8..ef0d5f94f4 100644 --- a/synapse/synapse_rust/push.pyi +++ b/synapse/synapse_rust/push.pyi @@ -65,7 +65,7 @@ class PushRuleEvaluator: notification_power_levels: Mapping[str, int], related_events_flattened: Mapping[str, Mapping[str, JsonValue]], related_event_match_enabled: bool, - room_version_feature_flags: tuple[str, ...], + room_version_feature_flags: list[str], msc3931_enabled: bool, msc4210_enabled: bool, msc4306_enabled: bool, diff --git a/synapse/synapse_rust/room_versions.pyi b/synapse/synapse_rust/room_versions.pyi new file mode 100644 index 0000000000..909e3a1c26 --- /dev/null +++ b/synapse/synapse_rust/room_versions.pyi @@ -0,0 +1,142 @@ +# This file is licensed under the Affero General Public License (AGPL) version 3. +# +# Copyright (C) 2026 Element Creations Ltd. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# See the GNU Affero General Public License for more details: +# . + +from collections.abc import Mapping +from typing import Iterator + +class EventFormatVersions: + """Internal enum for tracking the version of the event format, + independently of the room version. + + To reduce confusion, the event format versions are named after the room + versions that they were used or introduced in. + The concept of an 'event format version' is specific to Synapse (the + specification does not mention this term.) + """ + + ROOM_V1_V2: int + """:server event id format: used for room v1 and v2""" + ROOM_V3: int + """MSC1659-style event id format: used for room v3""" + ROOM_V4_PLUS: int + """MSC1884-style format: introduced for room v4""" + ROOM_V11_HYDRA_PLUS: int + """MSC4291 room IDs as hashes: introduced for room HydraV11""" + +KNOWN_EVENT_FORMAT_VERSIONS: frozenset[int] + +class StateResolutionVersions: + """Enum to identify the state resolution algorithms.""" + + V1: int + """Room v1 state res""" + V2: int + """MSC1442 state res: room v2 and later""" + V2_1: int + """MSC4297 state res""" + +class RoomDisposition: + """Room disposition constants.""" + + STABLE: str + UNSTABLE: str + +class PushRuleRoomFlag: + """Enum for listing possible MSC3931 room version feature flags, for push rules.""" + + EXTENSIBLE_EVENTS: str + """MSC3932: Room version supports MSC1767 Extensible Events.""" + +class RoomVersion: + """An object which describes the unique attributes of a room version.""" + + identifier: str + """The identifier for this version.""" + disposition: str + """One of the RoomDisposition constants.""" + event_format: int + """One of the EventFormatVersions constants.""" + state_res: int + """One of the StateResolutionVersions constants.""" + enforce_key_validity: bool + special_case_aliases_auth: bool + """Before MSC2432, m.room.aliases had special auth rules and redaction rules.""" + strict_canonicaljson: bool + """Strictly enforce canonicaljson, do not allow: + * Integers outside the range of [-2^53 + 1, 2^53 - 1] + * Floats + * NaN, Infinity, -Infinity + """ + limit_notifications_power_levels: bool + """MSC2209: Check 'notifications' key while verifying + m.room.power_levels auth rules.""" + implicit_room_creator: bool + """MSC3820: No longer include the creator in m.room.create events (room version 11).""" + updated_redaction_rules: bool + """MSC3820: Apply updated redaction rules algorithm from room version 11.""" + restricted_join_rule: bool + """Support the 'restricted' join rule.""" + restricted_join_rule_fix: bool + """Support for the proper redaction rules for the restricted join rule. + This requires restricted_join_rule to be enabled.""" + knock_join_rule: bool + """Support the 'knock' join rule.""" + msc3389_relation_redactions: bool + """MSC3389: Protect relation information from redaction.""" + knock_restricted_join_rule: bool + """Support the 'knock_restricted' join rule.""" + enforce_int_power_levels: bool + """Enforce integer power levels.""" + msc3931_push_features: list[str] + """MSC3931: Adds a push rule condition for "room version feature flags", making + some push rules room version dependent. Note that adding a flag to this list + is not enough to mark it "supported": the push rule evaluator also needs to + support the flag. Unknown flags are ignored by the evaluator, making conditions + fail if used. Values from PushRuleRoomFlag.""" + msc3757_enabled: bool + """MSC3757: Restricting who can overwrite a state event.""" + msc4289_creator_power_enabled: bool + """MSC4289: Creator power enabled.""" + msc4291_room_ids_as_hashes: bool + """MSC4291: Room IDs as hashes of the create event.""" + strict_event_byte_limits_room_versions: bool + """Whether this room version strictly enforces event key size limits in bytes, + rather than in codepoints. + + If true, this room version uses stricter event size validation.""" + +class RoomVersions: + V1: RoomVersion + V2: RoomVersion + V3: RoomVersion + V4: RoomVersion + V5: RoomVersion + V6: RoomVersion + V7: RoomVersion + V8: RoomVersion + V9: RoomVersion + V10: RoomVersion + MSC1767v10: RoomVersion + MSC3389v10: RoomVersion + MSC3757v10: RoomVersion + V11: RoomVersion + MSC3757v11: RoomVersion + HydraV11: RoomVersion + V12: RoomVersion + +class KnownRoomVersionsMapping(Mapping[str, RoomVersion]): + def add_room_version(self, room_version: RoomVersion) -> None: ... + def __getitem__(self, key: str) -> RoomVersion: ... + def __len__(self) -> int: ... + def __iter__(self) -> Iterator[str]: ... + +KNOWN_ROOM_VERSIONS: KnownRoomVersionsMapping diff --git a/synapse/types/storage/__init__.py b/synapse/types/storage/__init__.py index b01653246a..992c36caba 100644 --- a/synapse/types/storage/__init__.py +++ b/synapse/types/storage/__init__.py @@ -64,3 +64,5 @@ class _BackgroundUpdates: ) FIXUP_MAX_DEPTH_CAP = "fixup_max_depth_cap" + + REDACTIONS_RECHECK_BG_UPDATE = "redactions_recheck" diff --git a/tests/events/test_utils.py b/tests/events/test_utils.py index 9ea015e138..af44b5dec1 100644 --- a/tests/events/test_utils.py +++ b/tests/events/test_utils.py @@ -20,9 +20,8 @@ # import unittest as stdlib_unittest -from typing import Any, Mapping +from typing import TYPE_CHECKING, Any, Mapping -import attr from parameterized import parameterized from synapse.api.constants import EventContentFields @@ -38,11 +37,15 @@ from synapse.events.utils import ( make_config_for_admin, maybe_upsert_event_field, prune_event, - serialize_event, ) from synapse.types import JsonDict, create_requester from synapse.util.frozenutils import freeze +from tests.unittest import HomeserverTestCase + +if TYPE_CHECKING: + from synapse.server import HomeServer + def MockEvent(**kwargs: Any) -> EventBase: if "event_id" not in kwargs: @@ -553,11 +556,6 @@ class PruneEventTestCase(stdlib_unittest.TestCase): room_version=RoomVersions.V10, ) - # Create a new room version. - msc3389_room_ver = attr.evolve( - RoomVersions.V10, msc3389_relation_redactions=True - ) - self.run_test( { "type": "m.room.message", @@ -581,7 +579,7 @@ class PruneEventTestCase(stdlib_unittest.TestCase): "signatures": {}, "unsigned": {}, }, - room_version=msc3389_room_ver, + room_version=RoomVersions.MSC3389v10, ) # If the field is not an object, redact it. @@ -599,7 +597,7 @@ class PruneEventTestCase(stdlib_unittest.TestCase): "signatures": {}, "unsigned": {}, }, - room_version=msc3389_room_ver, + room_version=RoomVersions.MSC3389v10, ) # If the m.relates_to property would be empty, redact it. @@ -614,7 +612,7 @@ class PruneEventTestCase(stdlib_unittest.TestCase): "signatures": {}, "unsigned": {}, }, - room_version=msc3389_room_ver, + room_version=RoomVersions.MSC3389v10, ) @@ -644,19 +642,27 @@ class CloneEventTestCase(stdlib_unittest.TestCase): self.assertEqual(cloned.internal_metadata.txn_id, "txn") -class SerializeEventTestCase(stdlib_unittest.TestCase): +class SerializeEventTestCase(HomeserverTestCase): + def prepare(self, reactor: Any, clock: Any, hs: "HomeServer") -> None: + self._event_serializer = hs.get_event_client_serializer() + def serialize( self, ev: EventBase, fields: list[str] | None, include_admin_metadata: bool = False, + redaction_map: Mapping[str, EventBase] | None = None, ) -> JsonDict: - return serialize_event( - ev, - 1479807801915, - config=SerializeEventConfig( - only_event_fields=fields, include_admin_metadata=include_admin_metadata - ), + return self.get_success( + self._event_serializer.serialize_event( + ev, + 1479807801915, + config=SerializeEventConfig( + only_event_fields=fields, + include_admin_metadata=include_admin_metadata, + ), + redaction_map=redaction_map, + ) ) def test_event_fields_works_with_keys(self) -> None: @@ -770,9 +776,8 @@ class SerializeEventTestCase(stdlib_unittest.TestCase): def test_event_fields_fail_if_fields_not_str(self) -> None: with self.assertRaises(TypeError): - self.serialize( - MockEvent(room_id="!foo:bar", content={"foo": "bar"}), - ["room_id", 4], # type: ignore[list-item] + SerializeEventConfig( + only_event_fields=["room_id", 4], # type: ignore[list-item] ) def test_default_serialize_config_excludes_admin_metadata(self) -> None: @@ -873,6 +878,52 @@ class SerializeEventTestCase(stdlib_unittest.TestCase): ) self.assertTrue(admin_config.include_admin_metadata) + def test_redacted_because_is_filtered_out(self) -> None: + """If an event's unsigned dict has a `redacted_by` field, then the + `redacted_because` should be filtered out if not specified in + `only_event_fields`.""" + + redaction_id = "$redaction_event_id" + + event = MockEvent( + type="foo", + event_id="test", + room_id="!foo:bar", + content={"foo": "bar"}, + ) + event.internal_metadata.redacted_by = redaction_id + + redaction_event = MockEvent( + type="m.room.redaction", + event_id=redaction_id, + content={"redacts": "test"}, + ) + + self.assertEqual( + self.serialize( + event, + ["content.foo"], + redaction_map={redaction_id: redaction_event}, + ), + { + "content": {"foo": "bar"}, + }, + ) + + self.assertEqual( + self.serialize( + event, + ["content.foo", "unsigned.redacted_because"], + redaction_map={redaction_id: redaction_event}, + ), + { + "content": {"foo": "bar"}, + "unsigned": { + "redacted_because": self.serialize(redaction_event, fields=None), + }, + }, + ) + class CopyPowerLevelsContentTestCase(stdlib_unittest.TestCase): def setUp(self) -> None: diff --git a/tests/handlers/test_oidc.py b/tests/handlers/test_oidc.py index 4583afb625..62b84c77a4 100644 --- a/tests/handlers/test_oidc.py +++ b/tests/handlers/test_oidc.py @@ -19,7 +19,7 @@ # # import os -from typing import Any, Awaitable, ContextManager +from typing import Any, ContextManager from unittest.mock import ANY, AsyncMock, Mock, patch from urllib.parse import parse_qs, urlparse @@ -29,6 +29,7 @@ from twisted.internet.testing import MemoryReactor from synapse.handlers.sso import MappingException from synapse.http.site import SynapseRequest +from synapse.logging.context import make_deferred_yieldable from synapse.server import HomeServer from synapse.types import JsonDict, UserID from synapse.util.clock import Clock @@ -44,7 +45,7 @@ try: from authlib.oidc.core import UserInfo from authlib.oidc.discovery import OpenIDProviderMetadata - from synapse.handlers.oidc import Token, UserAttributeDict + from synapse.handlers.oidc import OidcMetadataError, Token, UserAttributeDict HAS_OIDC = True except ImportError: @@ -264,6 +265,29 @@ class OidcHandlerTestCase(HomeserverTestCase): self.get_success(self.provider.load_metadata()) self.fake_server.get_metadata_handler.assert_not_called() + @override_config({"oidc_config": {**DEFAULT_CONFIG, "discover": True}}) + def test_startup_metadata_preload_failure_is_logged(self) -> None: + """ + The metadata preload on startup causes logs to be emitted, but no + exceptions. + """ + with self.fake_server.buggy_endpoint(metadata=True): + with self.assertLogs("synapse.handlers.oidc", level="CRITICAL") as logs: + self.get_success( + make_deferred_yieldable(self.handler.preload_metadata()) + ) + + self.assertIn( + "CRITICAL:synapse.handlers.oidc:Error while preloading OIDC provider 'oidc'. Login may be broken!", + [line for record in logs.output for line in record.split("\n")], + ) + + # Even though the preload failed, if we try to load the metadata later, + # the attempt still goes through. + self.fake_server.get_metadata_handler.assert_not_called() + self.get_success(self.provider.load_metadata()) + self.fake_server.get_metadata_handler.assert_called_once() + @override_config({"oidc_config": {**EXPLICIT_ENDPOINT_CONFIG, "discover": True}}) def test_discovery_with_explicit_config(self) -> None: """ @@ -351,49 +375,53 @@ class OidcHandlerTestCase(HomeserverTestCase): """Provider metadatas are extensively validated.""" h = self.provider - def force_load_metadata() -> Awaitable[None]: + def force_load_metadata() -> None: async def force_load() -> "OpenIDProviderMetadata": return await h.load_metadata(force=True) - return get_awaitable_result(force_load()) + get_awaitable_result(force_load()) # Default test config does not throw force_load_metadata() with self.metadata_edit({"issuer": None}): - self.assertRaisesRegex(ValueError, "issuer", force_load_metadata) + self.assertRaisesRegex(OidcMetadataError, "issuer", force_load_metadata) with self.metadata_edit({"issuer": "http://insecure/"}): - self.assertRaisesRegex(ValueError, "issuer", force_load_metadata) + self.assertRaisesRegex(OidcMetadataError, "issuer", force_load_metadata) with self.metadata_edit({"issuer": "https://invalid/?because=query"}): - self.assertRaisesRegex(ValueError, "issuer", force_load_metadata) + self.assertRaisesRegex(OidcMetadataError, "issuer", force_load_metadata) with self.metadata_edit({"authorization_endpoint": None}): self.assertRaisesRegex( - ValueError, "authorization_endpoint", force_load_metadata + OidcMetadataError, "authorization_endpoint", force_load_metadata ) with self.metadata_edit({"authorization_endpoint": "http://insecure/auth"}): self.assertRaisesRegex( - ValueError, "authorization_endpoint", force_load_metadata + OidcMetadataError, "authorization_endpoint", force_load_metadata ) with self.metadata_edit({"token_endpoint": None}): - self.assertRaisesRegex(ValueError, "token_endpoint", force_load_metadata) + self.assertRaisesRegex( + OidcMetadataError, "token_endpoint", force_load_metadata + ) with self.metadata_edit({"token_endpoint": "http://insecure/token"}): - self.assertRaisesRegex(ValueError, "token_endpoint", force_load_metadata) + self.assertRaisesRegex( + OidcMetadataError, "token_endpoint", force_load_metadata + ) with self.metadata_edit({"jwks_uri": None}): - self.assertRaisesRegex(ValueError, "jwks_uri", force_load_metadata) + self.assertRaisesRegex(OidcMetadataError, "jwks_uri", force_load_metadata) with self.metadata_edit({"jwks_uri": "http://insecure/jwks.json"}): - self.assertRaisesRegex(ValueError, "jwks_uri", force_load_metadata) + self.assertRaisesRegex(OidcMetadataError, "jwks_uri", force_load_metadata) with self.metadata_edit({"response_types_supported": ["id_token"]}): self.assertRaisesRegex( - ValueError, "response_types_supported", force_load_metadata + OidcMetadataError, "response_types_supported", force_load_metadata ) with self.metadata_edit( @@ -406,7 +434,7 @@ class OidcHandlerTestCase(HomeserverTestCase): {"token_endpoint_auth_methods_supported": ["client_secret_post"]} ): self.assertRaisesRegex( - ValueError, + OidcMetadataError, "token_endpoint_auth_methods_supported", force_load_metadata, ) @@ -423,7 +451,9 @@ class OidcHandlerTestCase(HomeserverTestCase): h._scopes = [] self.assertTrue(h._uses_userinfo) with self.metadata_edit({"userinfo_endpoint": None}): - self.assertRaisesRegex(ValueError, "userinfo_endpoint", force_load_metadata) + self.assertRaisesRegex( + OidcMetadataError, "userinfo_endpoint", force_load_metadata + ) with self.metadata_edit({"jwks_uri": None}): # Shouldn't raise with a valid userinfo, even without jwks diff --git a/tests/replication/storage/test_events.py b/tests/replication/storage/test_events.py index 28bfb8b8ea..b7b94482ef 100644 --- a/tests/replication/storage/test_events.py +++ b/tests/replication/storage/test_events.py @@ -101,11 +101,10 @@ class EventsWorkerStoreTestCase(BaseWorkerStoreTestCase): msg_dict = msg.get_dict() msg_dict["content"] = {} - msg_dict["unsigned"]["redacted_by"] = redaction.event_id - msg_dict["unsigned"]["redacted_because"] = redaction redacted = make_event_from_dict( msg_dict, internal_metadata_dict=msg.internal_metadata.get_dict() ) + redacted.internal_metadata.redacted_by = redaction.event_id self.check( "get_event", [msg.event_id], redacted, asserter=self.assertEventsEqual ) @@ -125,11 +124,10 @@ class EventsWorkerStoreTestCase(BaseWorkerStoreTestCase): msg_dict = msg.get_dict() msg_dict["content"] = {} - msg_dict["unsigned"]["redacted_by"] = redaction.event_id - msg_dict["unsigned"]["redacted_because"] = redaction redacted = make_event_from_dict( msg_dict, internal_metadata_dict=msg.internal_metadata.get_dict() ) + redacted.internal_metadata.redacted_by = redaction.event_id self.check( "get_event", [msg.event_id], redacted, asserter=self.assertEventsEqual ) diff --git a/tests/rest/client/test_rooms.py b/tests/rest/client/test_rooms.py index f85c9939ce..221121007d 100644 --- a/tests/rest/client/test_rooms.py +++ b/tests/rest/client/test_rooms.py @@ -4746,10 +4746,10 @@ class MSC4293RedactOnBanKickTestCase(unittest.FederatingHomeserverTestCase): original = self.get_success(self.store.get_event(message.event_id)) if not original: self.fail("Expected to find remote message in DB") - redacted_because = original.unsigned.get("redacted_because") - if not redacted_because: - self.fail("Did not find redacted_because field") - self.assertEqual(redacted_because.event_id, ban_event_id) + redacted_by = original.internal_metadata.redacted_by + if not redacted_by: + self.fail("Did not find redacted_by field") + self.assertEqual(redacted_by, ban_event_id) def test_unbanning_remote_user_stops_redaction_action(self) -> None: bad_user = "@remote_bad_user:" + self.OTHER_SERVER_NAME @@ -5111,7 +5111,7 @@ class MSC4293RedactOnBanKickTestCase(unittest.FederatingHomeserverTestCase): original = self.get_success(self.store.get_event(message.event_id)) if not original: self.fail("Expected to find remote message in DB") - self.assertEqual(original.unsigned["redacted_by"], ban_event_id) + self.assertEqual(original.internal_metadata.redacted_by, ban_event_id) def test_rejoining_kicked_remote_user_stops_redaction_action(self) -> None: bad_user = "@remote_bad_user:" + self.OTHER_SERVER_NAME diff --git a/tests/server.py b/tests/server.py index d17b2478e3..55860701da 100644 --- a/tests/server.py +++ b/tests/server.py @@ -1358,6 +1358,16 @@ def start_test_homeserver( hs.get_media_sender_thread_pool = thread_pool # type: ignore[method-assign] + # Load the OIDC provider metadatas, if OIDC is enabled. + # This matches `start` in synapse/app/_base.py + # + # TODO: Extract common startup logic somewhere cleaner + if hs.config.oidc.oidc_enabled: + oidc = hs.get_oidc_handler() + # Preload the provider metadata. + # This will spawn fire-and-forget background processes. + oidc.preload_metadata() + # Load any configured modules into the homeserver module_api = hs.get_module_api() for module, module_config in hs.config.modules.loaded_modules: diff --git a/tests/storage/test_events_bg_updates.py b/tests/storage/test_events_bg_updates.py index d1a794c5a1..a5b53de77f 100644 --- a/tests/storage/test_events_bg_updates.py +++ b/tests/storage/test_events_bg_updates.py @@ -14,11 +14,14 @@ # +from canonicaljson import encode_canonical_json + from twisted.internet.testing import MemoryReactor from synapse.api.constants import MAX_DEPTH from synapse.api.room_versions import RoomVersion, RoomVersions from synapse.server import HomeServer +from synapse.types.storage import _BackgroundUpdates from synapse.util.clock import Clock from tests.unittest import HomeserverTestCase @@ -154,3 +157,133 @@ class TestFixupMaxDepthCapBgUpdate(HomeserverTestCase): # Assert that the topological_ordering of events has not been changed # from their depth. self.assertDictEqual(event_id_to_depth, dict(rows)) + + +class TestRedactionsRecheckBgUpdate(HomeserverTestCase): + """Test the background update that backfills the `recheck` column in redactions.""" + + def prepare( + self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer + ) -> None: + self.store = self.hs.get_datastores().main + self.db_pool = self.store.db_pool + + # Re-insert the background update, since it already ran during setup. + self.get_success( + self.db_pool.simple_insert( + table="background_updates", + values={ + "update_name": _BackgroundUpdates.REDACTIONS_RECHECK_BG_UPDATE, + "progress_json": "{}", + }, + ) + ) + self.db_pool.updates._all_done = False + + def _insert_redaction( + self, + event_id: str, + redacts: str, + recheck_redaction: bool | None = None, + insert_event_json: bool = True, + ) -> None: + """Insert a row into `redactions` and optionally a matching `event_json` row. + + Args: + event_id: The event ID of the redaction event. + redacts: The event ID being redacted. + recheck_redaction: The value of `recheck_redaction` in internal metadata. + If None, the key is omitted from internal metadata. + insert_event_json: Whether to insert a corresponding row in `event_json`. + """ + self.get_success( + self.db_pool.simple_insert( + table="redactions", + values={ + "event_id": event_id, + "redacts": redacts, + "have_censored": False, + "received_ts": 0, + }, + ) + ) + + if insert_event_json: + internal_metadata: dict = {} + if recheck_redaction is not None: + internal_metadata["recheck_redaction"] = recheck_redaction + + self.get_success( + self.db_pool.simple_insert( + table="event_json", + values={ + "event_id": event_id, + "room_id": "!room:test", + "internal_metadata": encode_canonical_json( + internal_metadata + ).decode("utf-8"), + "json": "{}", + "format_version": 3, + }, + ) + ) + + def _get_recheck(self, event_id: str) -> bool: + row = self.get_success( + self.db_pool.simple_select_one( + table="redactions", + keyvalues={"event_id": event_id}, + retcols=["recheck"], + ) + ) + return bool(row[0]) + + def test_recheck_true(self) -> None: + """A redaction with recheck_redaction=True in internal metadata gets recheck=True.""" + self._insert_redaction("$redact1:test", "$target1:test", recheck_redaction=True) + + self.wait_for_background_updates() + + self.assertTrue(self._get_recheck("$redact1:test")) + + def test_recheck_false(self) -> None: + """A redaction with recheck_redaction=False in internal metadata gets recheck=False.""" + self._insert_redaction( + "$redact2:test", "$target2:test", recheck_redaction=False + ) + + self.wait_for_background_updates() + + self.assertFalse(self._get_recheck("$redact2:test")) + + def test_recheck_absent_from_metadata(self) -> None: + """A redaction with no recheck_redaction key in internal metadata gets recheck=False.""" + self._insert_redaction("$redact3:test", "$target3:test", recheck_redaction=None) + + self.wait_for_background_updates() + + self.assertFalse(self._get_recheck("$redact3:test")) + + def test_recheck_no_event_json(self) -> None: + """A redaction with no event_json row gets recheck=False.""" + self._insert_redaction( + "$redact4:test", "$target4:test", insert_event_json=False + ) + + self.wait_for_background_updates() + + self.assertFalse(self._get_recheck("$redact4:test")) + + def test_batching(self) -> None: + """The update processes rows in batches, completing when all are done.""" + self._insert_redaction("$redact5:test", "$target5:test", recheck_redaction=True) + self._insert_redaction( + "$redact6:test", "$target6:test", recheck_redaction=False + ) + self._insert_redaction("$redact7:test", "$target7:test", recheck_redaction=True) + + self.wait_for_background_updates() + + self.assertTrue(self._get_recheck("$redact5:test")) + self.assertFalse(self._get_recheck("$redact6:test")) + self.assertTrue(self._get_recheck("$redact7:test")) diff --git a/tests/storage/test_redaction.py b/tests/storage/test_redaction.py index 92eb99f1d5..c82ccf1600 100644 --- a/tests/storage/test_redaction.py +++ b/tests/storage/test_redaction.py @@ -158,7 +158,7 @@ class RedactionTestCase(unittest.HomeserverTestCase): event, ) - self.assertFalse("redacted_because" in event.unsigned) + self.assertIsNone(event.internal_metadata.redacted_by) # Redact event reason = "Because I said so" @@ -168,7 +168,7 @@ class RedactionTestCase(unittest.HomeserverTestCase): self.assertEqual(msg_event.event_id, event.event_id) - self.assertTrue("redacted_because" in event.unsigned) + self.assertIsNotNone(event.internal_metadata.redacted_by) self.assertObjectHasAttributes( { @@ -179,15 +179,6 @@ class RedactionTestCase(unittest.HomeserverTestCase): event, ) - self.assertObjectHasAttributes( - { - "type": EventTypes.Redaction, - "user_id": self.u_alice.to_string(), - "content": {"reason": reason}, - }, - event.unsigned["redacted_because"], - ) - def test_redact_join(self) -> None: self.inject_room_member(self.room1, self.u_alice, Membership.JOIN) @@ -206,7 +197,7 @@ class RedactionTestCase(unittest.HomeserverTestCase): event, ) - self.assertFalse(hasattr(event, "redacted_because")) + self.assertIsNone(event.internal_metadata.redacted_by) # Redact event reason = "Because I said so" @@ -216,7 +207,7 @@ class RedactionTestCase(unittest.HomeserverTestCase): event = self.get_success(self.store.get_event(msg_event.event_id)) - self.assertTrue("redacted_because" in event.unsigned) + self.assertIsNotNone(event.internal_metadata.redacted_by) self.assertObjectHasAttributes( { @@ -227,15 +218,6 @@ class RedactionTestCase(unittest.HomeserverTestCase): event, ) - self.assertObjectHasAttributes( - { - "type": EventTypes.Redaction, - "user_id": self.u_alice.to_string(), - "content": {"reason": reason}, - }, - event.unsigned["redacted_because"], - ) - def test_circular_redaction(self) -> None: redaction_event_id1 = "$redaction1_id:test" redaction_event_id2 = "$redaction2_id:test" @@ -331,10 +313,7 @@ class RedactionTestCase(unittest.HomeserverTestCase): fetched = self.get_success(self.store.get_event(redaction_event_id1)) # it should have been redacted - self.assertEqual(fetched.unsigned["redacted_by"], redaction_event_id2) - self.assertEqual( - fetched.unsigned["redacted_because"].event_id, redaction_event_id2 - ) + self.assertEqual(fetched.internal_metadata.redacted_by, redaction_event_id2) def test_redact_censor(self) -> None: """Test that a redacted event gets censored in the DB after a month""" @@ -355,7 +334,7 @@ class RedactionTestCase(unittest.HomeserverTestCase): event, ) - self.assertFalse("redacted_because" in event.unsigned) + self.assertIsNone(event.internal_metadata.redacted_by) # Redact event reason = "Because I said so" @@ -363,7 +342,7 @@ class RedactionTestCase(unittest.HomeserverTestCase): event = self.get_success(self.store.get_event(msg_event.event_id)) - self.assertTrue("redacted_because" in event.unsigned) + self.assertIsNotNone(event.internal_metadata.redacted_by) self.assertObjectHasAttributes( { diff --git a/tests/unittest.py b/tests/unittest.py index 6022c750d0..03db9a4282 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -697,8 +697,43 @@ class HomeserverTestCase(TestCase): def pump(self, by: float = 0.0) -> None: """ - Pump the reactor enough that Deferreds will fire. + XXX: Deprecated: This method is deprecated. Use `self.reactor.advance(...)` + directly instead. + + Pump the reactor enough that `clock.call_later` scheduled callbacks will fire. + + To demystify this function, it simply advances time by the number of seconds + specified (defaults to `0`, we also multiply by 100, so `pump(1)` is 100 seconds + in 1 second steps/increments) whilst calling any pending callbacks, allowing any + queued/pending tasks to run because enough time has passed. + + So for example, if you have some Synapse code that does + `clock.call_later(Duration(seconds=2), callback)`, then calling + `self.pump(by=0.02)` will advance time by 2 seconds, which is enough for that + callback to be ready to run now. Same for `clock.sleep(...)` , + `clock.looping_call(...)`, and whatever other clock utilities that use + `clock.call_later` under the hood for scheduling tasks. Trying to use + `pump(by=...)` with exact math to meet a specific deadline feels pretty dirty + though which is why we recommend using `self.reactor.advance(...)` directly + nowadays. + + We don't have any exact historical context for why `pump()` was introduced into + the codebase beyond the code itself. We assume that we multiply by 100 so that + when you use the clock to schedule something that schedules more things, it + tries to run the whole chain to completion. + + XXX: If you're having to call this function, please call out in comments, which + scheduled thing you're aiming to trigger. Please also check whether the + `pump(...)` is even necessary as it was often misused. + + Args: + by: The time increment in seconds to advance time by. We will advance time + in 100 steps, each step by this value. """ + # We multiply by 100, so `pump(1)` actually advances time by 100 seconds in 1 + # second steps/increments. We assume this was done so that when you use the + # clock to schedule something that schedules more things, it tries to run the + # whole chain to completion. self.reactor.pump([by] * 100) def get_success(self, d: Awaitable[TV], by: float = 0.0) -> TV: