diff --git a/.github/workflows/complement_tests.yml b/.github/workflows/complement_tests.yml index f98a14e710..bc0e82b667 100644 --- a/.github/workflows/complement_tests.yml +++ b/.github/workflows/complement_tests.yml @@ -55,6 +55,13 @@ jobs: toolchain: ${{ env.RUST_VERSION }} - uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 + # We use `poetry` in `complement.sh` + - uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0 + with: + poetry-version: "2.1.1" + # Matches the `path` where we checkout Synapse above + working-directory: "synapse" + - name: Prepare Complement's Prerequisites run: synapse/.ci/scripts/setup_complement_prerequisites.sh @@ -76,6 +83,32 @@ jobs: poetry lock working-directory: synapse + # Run the image sanity check test first as this is the first thing we want to know + # about (are we actually testing what we expect?) and we don't want to debug + # downstream failures (wild goose chase). + - name: Sanity check Complement image + id: run_sanity_check_complement_image_test + # -p=1: We're using `-p 1` to force the test packages to run serially as GHA boxes + # are underpowered and don't like running tons of Synapse instances at once. + # -json: Output JSON format so that gotestfmt can parse it. + # + # tee /tmp/gotest-complement.log: We tee the output to a file so that we can re-process it + # later on for better formatting with gotestfmt. But we still want the command + # to output to the terminal as it runs so we can see what's happening in + # real-time. + run: | + set -o pipefail + COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh --in-repo -p 1 -json -run 'TestSynapseVersion/Synapse_version_matches_current_git_checkout' 2>&1 | tee /tmp/gotest-sanity-check-complement.log + shell: bash + env: + POSTGRES: ${{ (matrix.database == 'Postgres') && 1 || '' }} + WORKERS: ${{ (matrix.arrangement == 'workers') && 1 || '' }} + + - name: Formatted sanity check Complement test logs + # Always run this step if we attempted to run the Complement tests. + if: always() && steps.run_sanity_check_complement_image_test.outcome != 'skipped' + run: cat /tmp/gotest-sanity-check-complement.log | gotestfmt -hide "successful-downloads,empty-packages" + - name: Run Complement Tests id: run_complement_tests # -p=1: We're using `-p 1` to force the test packages to run serially as GHA boxes diff --git a/CHANGES.md b/CHANGES.md index 5996b7bcdf..1a5ff136c8 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,19 @@ +# Synapse 1.149.1 (2026-03-11) + +## Internal Changes + +- Bump `matrix-synapse-ldap3` to `0.4.0` to support `setuptools>=82.0.0`. Fixes [\#19541](https://github.com/element-hq/synapse/issues/19541). ([\#19543](https://github.com/element-hq/synapse/issues/19543)) + + + + +# Synapse 1.149.0 (2026-03-10) + +No significant changes since 1.149.0rc1. + + + + # Synapse 1.149.0rc1 (2026-03-03) ## Features diff --git a/changelog.d/19476.misc b/changelog.d/19476.misc new file mode 100644 index 0000000000..c1869911a4 --- /dev/null +++ b/changelog.d/19476.misc @@ -0,0 +1 @@ +Add in-repo Complement test to sanity check Synapse version matches git checkout (testing what we think we are). diff --git a/changelog.d/19487.feature b/changelog.d/19487.feature new file mode 100644 index 0000000000..4eb1d8f261 --- /dev/null +++ b/changelog.d/19487.feature @@ -0,0 +1 @@ +Expose [MSC4354 Sticky Events](https://github.com/matrix-org/matrix-spec-proposals/pull/4354) over the legacy (v3) /sync API. \ No newline at end of file diff --git a/changelog.d/19491.misc b/changelog.d/19491.misc new file mode 100644 index 0000000000..62b0ddddc2 --- /dev/null +++ b/changelog.d/19491.misc @@ -0,0 +1 @@ +Remove the optional `systemd-python` dependency and the `systemd` extra on the `synapse` package. diff --git a/changelog.d/19542.bugfix b/changelog.d/19542.bugfix new file mode 100644 index 0000000000..ab72504335 --- /dev/null +++ b/changelog.d/19542.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in v1.26.0 that caused deactivated, erased users to not be removed from the user directory. \ No newline at end of file diff --git a/complement/go.mod b/complement/go.mod index c6c1678bac..2a9adb9b95 100644 --- a/complement/go.mod +++ b/complement/go.mod @@ -31,6 +31,7 @@ require ( github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect + github.com/matrix-org/gomatrix v0.0.0-20220926102614-ceba4d9f7530 // indirect github.com/matrix-org/util v0.0.0-20221111132719-399730281e66 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/term v0.5.2 // indirect diff --git a/complement/go.sum b/complement/go.sum index c674730c05..79a35aa14c 100644 --- a/complement/go.sum +++ b/complement/go.sum @@ -38,6 +38,8 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo= github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI= +github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542 h1:2VTzZjLZBgl62/EtslCrtky5vbi9dd7HrQPQIx6wqiw= +github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542/go.mod h1:Ow0tF8D4Kplbc8s8sSb3V2oUCygFHVp8gC3Dn6U4MNI= github.com/hashicorp/go-set/v3 v3.0.0 h1:CaJBQvQCOWoftrBcDt7Nwgo0kdpmrKxar/x2o6pV9JA= github.com/hashicorp/go-set/v3 v3.0.0/go.mod h1:IEghM2MpE5IaNvL+D7X480dfNtxjRXZ6VMpK3C8s2ok= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= @@ -50,6 +52,8 @@ github.com/matrix-org/gomatrixserverlib v0.0.0-20250813150445-9f5070a65744 h1:5G github.com/matrix-org/gomatrixserverlib v0.0.0-20250813150445-9f5070a65744/go.mod h1:b6KVfDjXjA5Q7vhpOaMqIhFYvu5BuFVZixlNeTV/CLc= github.com/matrix-org/util v0.0.0-20221111132719-399730281e66 h1:6z4KxomXSIGWqhHcfzExgkH3Z3UkIXry4ibJS4Aqz2Y= github.com/matrix-org/util v0.0.0-20221111132719-399730281e66/go.mod h1:iBI1foelCqA09JJgPV0FYz4qA5dUXYOxMi57FxKBdd4= +github.com/miekg/dns v1.1.66 h1:FeZXOS3VCVsKnEAd+wBkjMC3D2K+ww66Cq3VnCINuJE= +github.com/miekg/dns v1.1.66/go.mod h1:jGFzBsSNbJw6z1HYut1RKBKHA9PBdxeHrZG8J+gC2WE= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/sys/atomicwriter v0.1.0 h1:kw5D/EqkBwsBFi0ss9v1VG3wIkVhzGvLklJ+w3A14Sw= @@ -120,6 +124,8 @@ golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0 golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU= +golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -129,6 +135,8 @@ golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.14.0 h1:woo0S4Yywslg6hp4eUFjTVOyKt0RookbpAHG4c1HmhQ= +golang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -147,6 +155,8 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.33.0 h1:4qz2S3zmRxbGIhDIAgjxvFutSvH5EfnsYrRBj0UI0bc= +golang.org/x/tools v0.33.0/go.mod h1:CIJMaWEY88juyUfo7UbgPqbC8rU2OqfAV1h2Qp0oMYI= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -160,6 +170,8 @@ google.golang.org/grpc v1.72.2/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3i google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/h2non/gock.v1 v1.1.2 h1:jBbHXgGBK/AoPVfJh5x4r/WxIrElvbLel8TCZkkZJoY= +gopkg.in/h2non/gock.v1 v1.1.2/go.mod h1:n7UGz/ckNChHiK05rDoiC4MYSunEC/lyaUm2WWaDva0= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/complement/tests/synapse_version_check_test.go b/complement/tests/synapse_version_check_test.go new file mode 100644 index 0000000000..790a6a40ce --- /dev/null +++ b/complement/tests/synapse_version_check_test.go @@ -0,0 +1,101 @@ +// This file is licensed under the Affero General Public License (AGPL) version 3. +// +// Copyright (C) 2026 Element Creations Ltd +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// See the GNU Affero General Public License for more details: +// . + +package synapse_tests + +import ( + "net/http" + "os/exec" + "strings" + "testing" + + "github.com/matrix-org/complement" + "github.com/matrix-org/complement/match" + "github.com/matrix-org/complement/must" + "github.com/tidwall/gjson" +) + +func TestSynapseVersion(t *testing.T) { + deployment := complement.Deploy(t, 1) + defer deployment.Destroy(t) + + unauthedClient := deployment.UnauthenticatedClient(t, "hs1") + + // Sanity check that the version of Synapse used in the `COMPLEMENT_BASE_IMAGE` + // matches the same git commit we have checked out. This ensures that the image being + // used in Complement is the one that we just built locally with `complement.sh` + // instead of accidentally pulling in some remote one. + // + // This test is expected to pass if you use `complement.sh`. + // + // If this test fails, it probably means that Complement is using an image that + // doesn't encompass the changes you have checked out (unexpected). We want to yell + // loudly and point out what's wrong instead of silently letting your PRs pass + // without actually being tested. + t.Run("Synapse version matches current git checkout", func(t *testing.T) { + // Get the Synapse version details of the current git checkout + checkoutSynapseVersion := runCommand( + t, + []string{ + "poetry", + "run", + "python", + "-c", + "from synapse.util import SYNAPSE_VERSION; print(SYNAPSE_VERSION)", + }, + ) + + // Find the version details of the Synapse instance deployed from the Docker image + res := unauthedClient.MustDo(t, "GET", []string{"_matrix", "federation", "v1", "version"}) + body := must.MatchResponse(t, res, match.HTTPResponse{ + StatusCode: http.StatusOK, + JSON: []match.JSON{ + match.JSONKeyPresent("server"), + }, + }) + rawSynapseVersionString := gjson.GetBytes(body, "server.version").Str + t.Logf( + "Synapse version string from federation version endpoint: %s", + rawSynapseVersionString, + ) + + must.Equal( + t, + rawSynapseVersionString, + checkoutSynapseVersion, + "Synapse version in the checkout doesn't match the Synapse version that Complement is running. "+ + "If this test fails, it probably means that Complement is using an image that "+ + "doesn't encompass the changes you have checked out (unexpected). We want to yell "+ + "loudly and point out what's wrong instead of silently letting your PRs pass "+ + "without actually being tested.", + ) + }) +} + +// runCommand will run the given command and return the stdout (whitespace +// trimmed). +func runCommand(t *testing.T, commandPieces []string) string { + t.Helper() + + // Then run our actual command + cmd := exec.Command(commandPieces[0], commandPieces[1:]...) + output, err := cmd.Output() + if err != nil { + t.Fatalf( + "runCommand: failed to run command (%s): %v", + strings.Join(commandPieces, " "), + err, + ) + } + + return strings.TrimSpace(string(output)) +} diff --git a/contrib/systemd/README.md b/contrib/systemd/README.md index d12d8ec37b..456b4e9a16 100644 --- a/contrib/systemd/README.md +++ b/contrib/systemd/README.md @@ -16,3 +16,9 @@ appropriate locations of your installation. 5. Start Synapse: `sudo systemctl start matrix-synapse` 6. Verify Synapse is running: `sudo systemctl status matrix-synapse` 7. *optional* Enable Synapse to start at system boot: `sudo systemctl enable matrix-synapse` + +## Logging + +If you use `contrib/systemd/log_config.yaml`, install `systemd-python` in the +same Python environment as Synapse. The config uses +`systemd.journal.JournalHandler`, which requires that package. diff --git a/contrib/systemd/log_config.yaml b/contrib/systemd/log_config.yaml index 22f67a50ce..4c29787647 100644 --- a/contrib/systemd/log_config.yaml +++ b/contrib/systemd/log_config.yaml @@ -1,5 +1,6 @@ version: 1 +# Requires the `systemd-python` package in Synapse's runtime environment. # In systemd's journal, loglevel is implicitly stored, so let's omit it # from the message text. formatters: diff --git a/debian/build_virtualenv b/debian/build_virtualenv index 9e7fb95c8e..70d4efcbd0 100755 --- a/debian/build_virtualenv +++ b/debian/build_virtualenv @@ -39,8 +39,10 @@ pip install poetry==2.1.1 poetry-plugin-export==1.9.0 poetry export \ --extras all \ --extras test \ - --extras systemd \ -o exported_requirements.txt +# Keep journald logging support in the packaged virtualenv when using +# systemd.journal.JournalHandler in /etc/matrix-synapse/log.yaml. +echo "systemd-python==235 --hash=sha256:4e57f39797fd5d9e2d22b8806a252d7c0106c936039d1e71c8c6b8008e695c0a" >> exported_requirements.txt deactivate rm -rf "$TEMP_VENV" @@ -57,7 +59,7 @@ dh_virtualenv \ --extra-pip-arg="--no-deps" \ --extra-pip-arg="--no-cache-dir" \ --extra-pip-arg="--compile" \ - --extras="all,systemd,test" \ + --extras="all,test" \ --requirements="exported_requirements.txt" PACKAGE_BUILD_DIR="$(pwd)/debian/matrix-synapse-py3" diff --git a/debian/changelog b/debian/changelog index 583bb35ad5..9cd39ee76c 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,21 @@ +matrix-synapse-py3 (1.149.1+nmu1) UNRELEASED; urgency=medium + + * Change how the systemd journald integration is installed. + + -- Quentin Gliech Fri, 20 Feb 2026 19:19:51 +0100 + +matrix-synapse-py3 (1.149.1) stable; urgency=medium + + * New synapse release 1.149.1. + + -- Synapse Packaging team Wed, 11 Mar 2026 09:30:24 +0000 + +matrix-synapse-py3 (1.149.0) stable; urgency=medium + + * New synapse release 1.149.0. + + -- Synapse Packaging team Tue, 10 Mar 2026 13:02:53 +0000 + matrix-synapse-py3 (1.149.0~rc1) stable; urgency=medium * New synapse release 1.149.0rc1. diff --git a/docker/Dockerfile b/docker/Dockerfile index 9f74e48df3..59771ae88f 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -171,6 +171,14 @@ FROM docker.io/library/python:${PYTHON_VERSION}-slim-${DEBIAN_VERSION} ARG TARGETARCH +# If specified, Synapse will use this as the version string in the app. +# +# This can be useful to capture the git info of the build as `.git/` won't be +# available in the Docker image for Synapse to generate from. +ARG SYNAPSE_VERSION_STRING +# Pass it through to Synapse as an environment variable. +ENV SYNAPSE_VERSION_STRING=${SYNAPSE_VERSION_STRING} + LABEL org.opencontainers.image.url='https://github.com/element-hq/synapse' LABEL org.opencontainers.image.documentation='https://element-hq.github.io/synapse/latest/' LABEL org.opencontainers.image.source='https://github.com/element-hq/synapse.git' diff --git a/docs/admin_api/user_admin_api.md b/docs/admin_api/user_admin_api.md index 9e0a1cb70c..72e0e8d91a 100644 --- a/docs/admin_api/user_admin_api.md +++ b/docs/admin_api/user_admin_api.md @@ -403,6 +403,7 @@ is set to `true`: - Remove the user's display name - Remove the user's avatar URL +- Remove the user's custom profile fields - Mark the user as erased The following actions are **NOT** performed. The list may be incomplete. diff --git a/docs/setup/installation.md b/docs/setup/installation.md index a48662362a..c887f9900c 100644 --- a/docs/setup/installation.md +++ b/docs/setup/installation.md @@ -229,6 +229,11 @@ pip install --upgrade setuptools pip install matrix-synapse ``` +If you want to use a logging configuration that references +`systemd.journal.JournalHandler` (for example `contrib/systemd/log_config.yaml`), +you must install `systemd-python` separately in the same environment. +Synapse no longer provides a `matrix-synapse[systemd]` extra. + This will download Synapse from [PyPI](https://pypi.org/project/matrix-synapse) and install it, along with the python libraries it uses, into a virtual environment under `~/synapse/env`. Feel free to pick a different directory if you diff --git a/docs/upgrade.md b/docs/upgrade.md index 1630c6ab40..777e57c492 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -117,6 +117,35 @@ each upgrade are complete before moving on to the next upgrade, to avoid stacking them up. You can monitor the currently running background updates with [the Admin API](usage/administration/admin_api/background_updates.html#status). +# Upgrading to v1.150.0 + +## Removal of the `systemd` pip extra + +The `matrix-synapse[systemd]` pip extra has been removed. +If you use `systemd.journal.JournalHandler` in your logging configuration +(e.g. `contrib/systemd/log_config.yaml`), you must now install +`systemd-python` manually in Synapse's runtime environment: + +```bash +pip install systemd-python +``` + +No action is needed if you do not use journal logging, or if you installed +Synapse from the Debian packages (which handle this automatically). + +## Module API: Deprecation of the `deactivation` parameter in the `set_displayname` method + +If you have Synapse modules installed that use the `set_displayname` method to change +the display name of your users, please ensure that it doesn't pass the optional +`deactivation` parameter. + +This parameter is now deprecated and it is intended to be removed in 2027. +No immediate change is necessary, however once the parameter is removed, modules passing it will produce errors. +[Issue #19546](https://github.com/element-hq/synapse/issues/19546) tracks this removal. + +From this version, when the parameter is passed, an error such as +``Deprecated `deactivation` parameter passed to `set_displayname` Module API (value: False). This will break in 2027.`` will be logged. The method will otherwise continue to work. + # Upgrading to v1.146.0 ## Drop support for Ubuntu 25.04 Plucky Puffin, and add support for 25.10 Questing Quokka diff --git a/docs/usage/configuration/logging_sample_config.md b/docs/usage/configuration/logging_sample_config.md index 23a55abdcc..9cc96df73c 100644 --- a/docs/usage/configuration/logging_sample_config.md +++ b/docs/usage/configuration/logging_sample_config.md @@ -14,6 +14,9 @@ It should be named `.log.config` by default. Hint: If you're looking for a guide on what each of the fields in the "Processed request" log lines mean, see [Request log format](../administration/request_log.md). +If you use `systemd.journal.JournalHandler` in your own logging config, ensure +`systemd-python` is installed in Synapse's runtime environment. + ```yaml {{#include ../../sample_log_config.yaml}} ``` diff --git a/poetry.lock b/poetry.lock index 7f9b34d38f..0d03c860f5 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1548,24 +1548,25 @@ test = ["aiounittest", "tox", "twisted"] [[package]] name = "matrix-synapse-ldap3" -version = "0.3.0" +version = "0.4.0" description = "An LDAP3 auth provider for Synapse" optional = true -python-versions = ">=3.7" +python-versions = ">=3.10" groups = ["main"] markers = "extra == \"matrix-synapse-ldap3\" or extra == \"all\"" files = [ - {file = "matrix-synapse-ldap3-0.3.0.tar.gz", hash = "sha256:8bb6517173164d4b9cc44f49de411d8cebdb2e705d5dd1ea1f38733c4a009e1d"}, - {file = "matrix_synapse_ldap3-0.3.0-py3-none-any.whl", hash = "sha256:8b4d701f8702551e98cc1d8c20dbed532de5613584c08d0df22de376ba99159d"}, + {file = "matrix_synapse_ldap3-0.4.0-py3-none-any.whl", hash = "sha256:bf080037230d2af5fd3639cb87266de65c1cad7a68ea206278c5b4bf9c1a17f3"}, + {file = "matrix_synapse_ldap3-0.4.0.tar.gz", hash = "sha256:cff52ba780170de5e6e8af42863d2648ee23f3bf0a9fea6db52372f9fc00be2b"}, ] [package.dependencies] ldap3 = ">=2.8" -service-identity = "*" +packaging = ">=14.1" +service_identity = "*" Twisted = ">=15.1.0" [package.extras] -dev = ["black (==22.3.0)", "flake8 (==4.0.1)", "isort (==5.9.3)", "ldaptor", "matrix-synapse", "mypy (==0.910)", "tox", "types-setuptools"] +dev = ["black (==24.2.0)", "flake8 (==7.0.0)", "isort (==5.9.3)", "ldaptor", "matrix-synapse", "mypy (==1.9.0)", "tox", "types-setuptools"] [[package]] name = "mdurl" @@ -3128,18 +3129,6 @@ c = ["sqlglotc"] dev = ["duckdb (>=0.6)", "mypy", "pandas", "pandas-stubs", "pdoc", "pre-commit", "pyperf", "python-dateutil", "pytz", "ruff (==0.7.2)", "types-python-dateutil", "types-pytz", "typing_extensions"] rs = ["sqlglotrs (==0.13.0)"] -[[package]] -name = "systemd-python" -version = "235" -description = "Python interface for libsystemd" -optional = true -python-versions = "*" -groups = ["main"] -markers = "extra == \"systemd\"" -files = [ - {file = "systemd-python-235.tar.gz", hash = "sha256:4e57f39797fd5d9e2d22b8806a252d7c0106c936039d1e71c8c6b8008e695c0a"}, -] - [[package]] name = "threadloop" version = "1.0.2" @@ -3761,11 +3750,10 @@ postgres = ["psycopg2", "psycopg2cffi", "psycopg2cffi-compat"] redis = ["hiredis", "txredisapi"] saml2 = ["defusedxml", "pysaml2", "pytz"] sentry = ["sentry-sdk"] -systemd = ["systemd-python"] test = ["idna", "parameterized"] url-preview = ["lxml"] [metadata] lock-version = "2.1" python-versions = ">=3.10.0,<4.0.0" -content-hash = "1caa5072f6304122c89377420f993a54f54587f3618ccc8094ec31642264592c" +content-hash = "dd63614889e7e181fca33760741a490e65fe4ef4f42756cafd0f804ae7324916" diff --git a/pyproject.toml b/pyproject.toml index 9d5e3fec2d..07ab1ee5db 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "matrix-synapse" -version = "1.149.0rc1" +version = "1.149.1" description = "Homeserver for the Matrix decentralised comms protocol" readme = "README.rst" authors = [ @@ -135,10 +135,6 @@ saml2 = [ "pytz>=2018.3", # via pysaml2 ] oidc = ["authlib>=0.15.1"] -# systemd-python is necessary for logging to the systemd journal via -# `systemd.journal.JournalHandler`, as is documented in -# `contrib/systemd/log_config.yaml`. -systemd = ["systemd-python>=231"] url-preview = ["lxml>=4.6.3"] sentry = ["sentry-sdk>=0.7.2"] opentracing = [ @@ -194,7 +190,6 @@ all = [ "pympler>=1.0", # omitted: # - test: it's useful to have this separate from dev deps in the olddeps job - # - systemd: this is a system-based requirement # Transitive dependencies # These dependencies aren't directly required by Synapse. diff --git a/scripts-dev/complement.sh b/scripts-dev/complement.sh index fec005fdb1..c65ae53df0 100755 --- a/scripts-dev/complement.sh +++ b/scripts-dev/complement.sh @@ -215,11 +215,17 @@ main() { $CONTAINER_RUNTIME run --rm -v $editable_mount --entrypoint 'cp' "$COMPLEMENT_SYNAPSE_EDITABLE_IMAGE_PATH" -- /synapse_rust.abi3.so.bak /editable-src/synapse/synapse_rust.abi3.so else + # We remove the `egg-info` as it can contain outdated information which won't line + # up with our current reality. + rm -rf matrix_synapse.egg-info/ + # Figure out the Synapse version string in our current checkout + synapse_version_string="$(poetry run python -c 'from synapse.util import SYNAPSE_VERSION; print(SYNAPSE_VERSION)')" # Build the base Synapse image from the local checkout echo_if_github "::group::Build Docker image: matrixdotorg/synapse" $CONTAINER_RUNTIME build \ -t "$SYNAPSE_IMAGE_PATH" \ + --build-arg SYNAPSE_VERSION_STRING="$synapse_version_string" \ --build-arg TEST_ONLY_SKIP_DEP_HASH_VERIFICATION \ --build-arg TEST_ONLY_IGNORE_POETRY_LOCKFILE \ -f "docker/Dockerfile" . diff --git a/synapse/handlers/deactivate_account.py b/synapse/handlers/deactivate_account.py index e4c646ce87..538bdaaaf8 100644 --- a/synapse/handlers/deactivate_account.py +++ b/synapse/handlers/deactivate_account.py @@ -167,13 +167,13 @@ class DeactivateAccountHandler: # Mark the user as erased, if they asked for that if erase_data: user = UserID.from_string(user_id) - # Remove avatar URL from this user - await self._profile_handler.set_avatar_url( - user, requester, "", by_admin, deactivation=True - ) - # Remove displayname from this user - await self._profile_handler.set_displayname( - user, requester, "", by_admin, deactivation=True + # Remove displayname, avatar URL and custom profile fields from this user + # + # Note that displayname and avatar URL may persist as historical state events + # in rooms, but these cases behave like message history, following + # https://spec.matrix.org/v1.17/client-server-api/#post_matrixclientv3accountdeactivate + await self._profile_handler.delete_profile_upon_deactivation( + user, requester, by_admin ) logger.info("Marking %s as erased", user_id) diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index 6bec901a79..d123bcdd36 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -35,6 +35,7 @@ from synapse.api.errors import ( SynapseError, ) from synapse.storage.databases.main.media_repository import LocalMedia, RemoteMedia +from synapse.storage.roommember import ProfileInfo from synapse.types import ( JsonDict, JsonMapping, @@ -193,18 +194,24 @@ class ProfileHandler: target_user: UserID, requester: Requester, new_displayname: str, + *, by_admin: bool = False, - deactivation: bool = False, propagate: bool = True, ) -> None: """Set the displayname of a user + Preconditions: + - This must NOT be called as part of deactivating the user, because we will + notify modules about the change whilst claiming it is not related + to user deactivation and we will also (if `propagate=True`) send + updates into rooms, which could cause rooms to be accidentally joined + after the deactivated user has left them. + Args: target_user: the user whose displayname is to be changed. requester: The user attempting to make this change. new_displayname: The displayname to give this user. by_admin: Whether this change was made by an administrator. - deactivation: Whether this change was made while deactivating the user. propagate: Whether this change also applies to the user's membership events. """ if not self.hs.is_mine(target_user): @@ -248,12 +255,13 @@ class ProfileHandler: await self.store.set_profile_displayname(target_user, displayname_to_set) profile = await self.store.get_profileinfo(target_user) + await self.user_directory_handler.handle_local_profile_change( target_user.to_string(), profile ) await self._third_party_rules.on_profile_update( - target_user.to_string(), profile, by_admin, deactivation + target_user.to_string(), profile, by_admin, deactivation=False ) if propagate: @@ -297,18 +305,24 @@ class ProfileHandler: target_user: UserID, requester: Requester, new_avatar_url: str, + *, by_admin: bool = False, - deactivation: bool = False, propagate: bool = True, ) -> None: """Set a new avatar URL for a user. + Preconditions: + - This must NOT be called as part of deactivating the user, because we will + notify modules about the change whilst claiming it is not related + to user deactivation and we will also (if `propagate=True`) send + updates into rooms, which could cause rooms to be accidentally joined + after the deactivated user has left them. + Args: target_user: the user whose avatar URL is to be changed. requester: The user attempting to make this change. new_avatar_url: The avatar URL to give this user. by_admin: Whether this change was made by an administrator. - deactivation: Whether this change was made while deactivating the user. propagate: Whether this change also applies to the user's membership events. """ if not self.hs.is_mine(target_user): @@ -350,17 +364,79 @@ class ProfileHandler: await self.store.set_profile_avatar_url(target_user, avatar_url_to_set) profile = await self.store.get_profileinfo(target_user) + await self.user_directory_handler.handle_local_profile_change( target_user.to_string(), profile ) await self._third_party_rules.on_profile_update( - target_user.to_string(), profile, by_admin, deactivation + target_user.to_string(), profile, by_admin, deactivation=False ) if propagate: await self._update_join_states(requester, target_user) + async def delete_profile_upon_deactivation( + self, + target_user: UserID, + requester: Requester, + by_admin: bool = False, + ) -> None: + """ + Clear the user's profile upon user deactivation (specifically, when user erasure is needed). + + This includes the displayname, avatar_url, all custom profile fields. + + The user directory is NOT updated in any way; it is the caller's responsibility to remove + the user from the user directory. + + Rooms' join states are NOT updated in any way; it is the caller's responsibility to soon + **leave** the room on the user's behalf, so there's no point sending new join events into + rooms to propagate the profile deletion. + See the `users_pending_deactivation` table and the associated user parter loop. + """ + if not self.hs.is_mine(target_user): + raise SynapseError(400, "User is not hosted on this homeserver") + + # Prevent users from deactivating anyone but themselves, + # except for admins who can deactivate anyone. + if not by_admin and target_user != requester.user: + # It's a little strange to have this check here, but given all the sibling + # methods have these checks, it'd be even stranger to be inconsistent and not + # have it. + raise AuthError(400, "Cannot remove another user's profile") + + if not by_admin: + current_profile = await self.store.get_profileinfo(target_user) + if not self.hs.config.registration.enable_set_displayname: + if current_profile.display_name: + # SUSPICIOUS: It seems strange to block deactivation on this, + # though this is preserving previous behaviour. + raise SynapseError( + 400, + "Changing display name is disabled on this server", + Codes.FORBIDDEN, + ) + + if not self.hs.config.registration.enable_set_avatar_url: + if current_profile.avatar_url: + # SUSPICIOUS: It seems strange to block deactivation on this, + # though this is preserving previous behaviour. + raise SynapseError( + 400, + "Changing avatar is disabled on this server", + Codes.FORBIDDEN, + ) + + await self.store.delete_profile(target_user) + + await self._third_party_rules.on_profile_update( + target_user.to_string(), + ProfileInfo(None, None), + by_admin, + deactivation=True, + ) + @cached() async def check_avatar_size_and_mime_type(self, mxc: str) -> bool: """Check that the size and content type of the avatar at the given MXC URI are @@ -482,18 +558,22 @@ class ProfileHandler: requester: Requester, field_name: str, new_value: JsonValue, + *, by_admin: bool = False, - deactivation: bool = False, ) -> None: """Set a new profile field for a user. + Preconditions: + - This must NOT be called as part of deactivating the user, because we will + notify modules about the change whilst claiming it is not related + to user deactivation. + Args: target_user: the user whose profile is to be changed. requester: The user attempting to make this change. field_name: The name of the profile field to update. new_value: The new field value for this user. by_admin: Whether this change was made by an administrator. - deactivation: Whether this change was made while deactivating the user. """ if not self.hs.is_mine(target_user): raise SynapseError(400, "User is not hosted on this homeserver") @@ -506,7 +586,7 @@ class ProfileHandler: # Custom fields do not propagate into the user directory *or* rooms. profile = await self.store.get_profileinfo(target_user) await self._third_party_rules.on_profile_update( - target_user.to_string(), profile, by_admin, deactivation + target_user.to_string(), profile, by_admin, deactivation=False ) async def delete_profile_field( @@ -514,17 +594,21 @@ class ProfileHandler: target_user: UserID, requester: Requester, field_name: str, + *, by_admin: bool = False, - deactivation: bool = False, ) -> None: """Delete a field from a user's profile. + Preconditions: + - This must NOT be called as part of deactivating the user, because we will + notify modules about the change whilst claiming it is not related + to user deactivation. + Args: target_user: the user whose profile is to be changed. requester: The user attempting to make this change. field_name: The name of the profile field to remove. by_admin: Whether this change was made by an administrator. - deactivation: Whether this change was made while deactivating the user. """ if not self.hs.is_mine(target_user): raise SynapseError(400, "User is not hosted on this homeserver") @@ -537,7 +621,7 @@ class ProfileHandler: # Custom fields do not propagate into the user directory *or* rooms. profile = await self.store.get_profileinfo(target_user) await self._third_party_rules.on_profile_update( - target_user.to_string(), profile, by_admin, deactivation + target_user.to_string(), profile, by_admin, deactivation=False ) async def on_profile_query(self, args: JsonDict) -> JsonDict: diff --git a/synapse/handlers/sso.py b/synapse/handlers/sso.py index ebbe7afa84..0b2587f94f 100644 --- a/synapse/handlers/sso.py +++ b/synapse/handlers/sso.py @@ -522,7 +522,10 @@ class SsoHandler: authenticated_entity=user_id, ) await self._profile_handler.set_displayname( - user_id_obj, requester, attributes.display_name, True + user_id_obj, + requester, + attributes.display_name, + by_admin=True, ) if attributes.picture: await self.set_avatar(user_id, attributes.picture) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index a32748c2a9..c8ef5e2aa6 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -37,6 +37,7 @@ from synapse.api.constants import ( EventContentFields, EventTypes, Membership, + StickyEvent, ) from synapse.api.filtering import FilterCollection from synapse.api.presence import UserPresenceState @@ -147,6 +148,7 @@ class JoinedSyncResult: state: StateMap[EventBase] ephemeral: list[JsonDict] account_data: list[JsonDict] + sticky: list[EventBase] unread_notifications: JsonDict unread_thread_notifications: JsonDict summary: JsonDict | None @@ -157,7 +159,11 @@ class JoinedSyncResult: to tell if room needs to be part of the sync result. """ return bool( - self.timeline or self.state or self.ephemeral or self.account_data + self.timeline + or self.state + or self.ephemeral + or self.account_data + or self.sticky # nb the notification count does not, er, count: if there's nothing # else in the result, we don't need to send it. ) @@ -601,6 +607,41 @@ class SyncHandler: return now_token, ephemeral_by_room + async def sticky_events_by_room( + self, + sync_result_builder: "SyncResultBuilder", + now_token: StreamToken, + since_token: StreamToken | None = None, + ) -> tuple[StreamToken, dict[str, list[str]]]: + """Get the sticky events for each room the user is in + Args: + sync_result_builder + now_token: Where the server is currently up to. + since_token: Where the server was when the client last synced. + Returns: + A tuple of the now StreamToken, updated to reflect the which sticky + events are included, and a dict mapping from room_id to a list + of sticky event IDs for that room (in sticky event stream order). + """ + now = self.clock.time_msec() + with Measure( + self.clock, name="sticky_events_by_room", server_name=self.server_name + ): + from_id = since_token.sticky_events_key if since_token else 0 + + room_ids = sync_result_builder.joined_room_ids + + to_id, sticky_by_room = await self.store.get_sticky_events_in_rooms( + room_ids, + from_id=from_id, + to_id=now_token.sticky_events_key, + now=now, + limit=StickyEvent.MAX_EVENTS_IN_SYNC, + ) + now_token = now_token.copy_and_replace(StreamKeyType.STICKY_EVENTS, to_id) + + return now_token, sticky_by_room + async def _load_filtered_recents( self, room_id: str, @@ -2177,11 +2218,43 @@ class SyncHandler: ) sync_result_builder.now_token = now_token + sticky_by_room: dict[str, list[str]] = {} + if self.hs_config.experimental.msc4354_enabled: + now_token, sticky_by_room = await self.sticky_events_by_room( + sync_result_builder, now_token, since_token + ) + sync_result_builder.now_token = now_token + # 2. We check up front if anything has changed, if it hasn't then there is # no point in going further. + # + # If this is an initial sync (no since_token), then of course we can't skip + # the sync entry, as we have no base to use as a comparison for the question + # 'has anything changed' (this is the client's first time 'seeing' anything). + # + # Otherwise, for incremental syncs, we consider skipping the sync entry, + # doing cheap checks first: + # + # - are there any per-room EDUs; + # - is there any Room Account Data; or + # - are there any sticky events in the rooms; or + # - might the rooms have changed + # (using in-memory event stream change caches, which can + # only answer either 'Not changed' or 'Possibly changed') + # + # If none of those cheap checks give us a reason to continue generating the sync entry, + # we finally query the database to check for changed room tags. + # If there are also no changed tags, we can short-circuit return an empty sync entry. if not sync_result_builder.full_state: - if since_token and not ephemeral_by_room and not account_data_by_room: - have_changed = await self._have_rooms_changed(sync_result_builder) + # Cheap checks first + if ( + since_token + and not ephemeral_by_room + and not account_data_by_room + and not sticky_by_room + ): + # This is also a cheap check, but we log the answer + have_changed = self._may_have_rooms_changed(sync_result_builder) log_kv({"rooms_have_changed": have_changed}) if not have_changed: tags_by_room = await self.store.get_updated_tags( @@ -2225,6 +2298,7 @@ class SyncHandler: ephemeral=ephemeral_by_room.get(room_entry.room_id, []), tags=tags_by_room.get(room_entry.room_id), account_data=account_data_by_room.get(room_entry.room_id, {}), + sticky_event_ids=sticky_by_room.get(room_entry.room_id, []), always_include=sync_result_builder.full_state, ) logger.debug("Generated room entry for %s", room_entry.room_id) @@ -2237,11 +2311,9 @@ class SyncHandler: return set(newly_joined_rooms), set(newly_left_rooms) - async def _have_rooms_changed( - self, sync_result_builder: "SyncResultBuilder" - ) -> bool: + def _may_have_rooms_changed(self, sync_result_builder: "SyncResultBuilder") -> bool: """Returns whether there may be any new events that should be sent down - the sync. Returns True if there are. + the sync. Returns True if there **may** be. Does not modify the `sync_result_builder`. """ @@ -2611,6 +2683,7 @@ class SyncHandler: ephemeral: list[JsonDict], tags: Mapping[str, JsonMapping] | None, account_data: Mapping[str, JsonMapping], + sticky_event_ids: list[str], always_include: bool = False, ) -> None: """Populates the `joined` and `archived` section of `sync_result_builder` @@ -2640,6 +2713,8 @@ class SyncHandler: tags: List of *all* tags for room, or None if there has been no change. account_data: List of new account data for room + sticky_event_ids: MSC4354 sticky events in the room, if any. + In sticky event stream order. always_include: Always include this room in the sync response, even if empty. """ @@ -2650,7 +2725,13 @@ class SyncHandler: events = room_builder.events # We want to shortcut out as early as possible. - if not (always_include or account_data or ephemeral or full_state): + if not ( + always_include + or account_data + or ephemeral + or full_state + or sticky_event_ids + ): if events == [] and tags is None: return @@ -2742,6 +2823,7 @@ class SyncHandler: or account_data_events or ephemeral or full_state + or sticky_event_ids ): return @@ -2788,6 +2870,32 @@ class SyncHandler: if room_builder.rtype == "joined": unread_notifications: dict[str, int] = {} + sticky_events: list[EventBase] = [] + if sticky_event_ids: + # As per MSC4354: + # Remove sticky events that are already in the timeline, else we will needlessly duplicate + # events. + # There is no purpose in including sticky events in the sticky section if they're already in + # the timeline, as either way the client becomes aware of them. + # This is particularly important given the risk of sticky events spam since + # anyone can send sticky events, so halving the bandwidth on average for each sticky + # event is helpful. + timeline_event_id_set = {ev.event_id for ev in batch.events} + # Must preserve sticky event stream order + sticky_event_ids = [ + e for e in sticky_event_ids if e not in timeline_event_id_set + ] + if sticky_event_ids: + # Fetch and filter the sticky events + sticky_events = await filter_and_transform_events_for_client( + self._storage_controllers, + sync_result_builder.sync_config.user.to_string(), + await self.store.get_events_as_list(sticky_event_ids), + # As per MSC4354: + # > History visibility checks MUST NOT be applied to sticky events. + # > Any joined user is authorised to see sticky events for the duration they remain sticky. + always_include_ids=frozenset(sticky_event_ids), + ) room_sync = JoinedSyncResult( room_id=room_id, timeline=batch, @@ -2798,6 +2906,7 @@ class SyncHandler: unread_thread_notifications={}, summary=summary, unread_count=0, + sticky=sticky_events, ) if room_sync or always_include: diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index 0580f3665c..947be24d3e 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -160,6 +160,7 @@ from synapse.util.caches.descriptors import CachedFunction, cached as _cached from synapse.util.clock import Clock from synapse.util.duration import Duration from synapse.util.frozenutils import freeze +from synapse.util.sentinel import Sentinel if TYPE_CHECKING: # Old versions don't have `LiteralString` @@ -1989,27 +1990,47 @@ class ModuleApi: self, user_id: UserID, new_displayname: str, - deactivation: bool = False, + deactivation: bool | Sentinel = Sentinel.UNSET_SENTINEL, ) -> None: """Sets a user's display name. Added in Synapse v1.76.0. + (Synapse Developer note: All future arguments should be kwargs-only + due to https://github.com/element-hq/synapse/issues/19546) + Args: user_id: The user whose display name is to be changed. new_displayname: The new display name to give the user. deactivation: + **deprecated since v1.150.0** + Callers should NOT pass this argument. Instead, omit it and leave it to the default. + Will log an error if it is passed. + Remove after 2027-01-01 + Tracked by https://github.com/element-hq/synapse/issues/19546 + Whether this change was made while deactivating the user. + + Should be omitted, will produce a logged error if set to True. + It's likely that this flag should have stayed internal-only and + was accidentally exposed to the Module API. + It no longer has any function. """ requester = create_requester(user_id) + + if deactivation is not Sentinel.UNSET_SENTINEL: + logger.error( + "Deprecated `deactivation` parameter passed to `set_displayname` Module API (value: %r). This will break in 2027.", + deactivation, + ) + await self._hs.get_profile_handler().set_displayname( target_user=user_id, requester=requester, new_displayname=new_displayname, by_admin=True, - deactivation=deactivation, ) def get_current_time_msec(self) -> int: diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py index 807a9cad5b..8265c2d789 100644 --- a/synapse/rest/admin/users.py +++ b/synapse/rest/admin/users.py @@ -369,7 +369,7 @@ class UserRestServletV2(UserRestServletV2Get): if user: # modify user if "displayname" in body: await self.profile_handler.set_displayname( - target_user, requester, body["displayname"], True + target_user, requester, body["displayname"], by_admin=True ) if threepids is not None: @@ -418,7 +418,7 @@ class UserRestServletV2(UserRestServletV2Get): if "avatar_url" in body: await self.profile_handler.set_avatar_url( - target_user, requester, body["avatar_url"], True + target_user, requester, body["avatar_url"], by_admin=True ) if "admin" in body: @@ -526,7 +526,7 @@ class UserRestServletV2(UserRestServletV2Get): if "avatar_url" in body and isinstance(body["avatar_url"], str): await self.profile_handler.set_avatar_url( - target_user, requester, body["avatar_url"], True + target_user, requester, body["avatar_url"], by_admin=True ) user_info_dict = await self.admin_handler.get_user(target_user) diff --git a/synapse/rest/client/profile.py b/synapse/rest/client/profile.py index 7f3128cb61..c2ec5b3611 100644 --- a/synapse/rest/client/profile.py +++ b/synapse/rest/client/profile.py @@ -206,15 +206,15 @@ class ProfileFieldRestServlet(RestServlet): if field_name == ProfileFields.DISPLAYNAME: await self.profile_handler.set_displayname( - user, requester, new_value, is_admin, propagate=propagate + user, requester, new_value, by_admin=is_admin, propagate=propagate ) elif field_name == ProfileFields.AVATAR_URL: await self.profile_handler.set_avatar_url( - user, requester, new_value, is_admin, propagate=propagate + user, requester, new_value, by_admin=is_admin, propagate=propagate ) else: await self.profile_handler.set_profile_field( - user, requester, field_name, new_value, is_admin + user, requester, field_name, new_value, by_admin=is_admin ) return 200, {} @@ -263,15 +263,15 @@ class ProfileFieldRestServlet(RestServlet): if field_name == ProfileFields.DISPLAYNAME: await self.profile_handler.set_displayname( - user, requester, "", is_admin, propagate=propagate + user, requester, "", by_admin=is_admin, propagate=propagate ) elif field_name == ProfileFields.AVATAR_URL: await self.profile_handler.set_avatar_url( - user, requester, "", is_admin, propagate=propagate + user, requester, "", by_admin=is_admin, propagate=propagate ) else: await self.profile_handler.delete_profile_field( - user, requester, field_name, is_admin + user, requester, field_name, by_admin=is_admin ) return 200, {} diff --git a/synapse/rest/client/sync.py b/synapse/rest/client/sync.py index 91f2f16771..710d097eab 100644 --- a/synapse/rest/client/sync.py +++ b/synapse/rest/client/sync.py @@ -619,6 +619,14 @@ class SyncRestServlet(RestServlet): ephemeral_events = room.ephemeral result["ephemeral"] = {"events": ephemeral_events} result["unread_notifications"] = room.unread_notifications + if room.sticky: + # The sticky events have already been deduplicated so that events + # appearing in the timeline won't appear again here + result["msc4354_sticky"] = { + "events": await self._event_serializer.serialize_events( + room.sticky, time_now, config=serialize_options + ) + } if room.unread_thread_notifications: result["unread_thread_notifications"] = room.unread_thread_notifications if self._msc3773_enabled: diff --git a/synapse/storage/databases/main/profile.py b/synapse/storage/databases/main/profile.py index 11ad516eb3..9b787e19a3 100644 --- a/synapse/storage/databases/main/profile.py +++ b/synapse/storage/databases/main/profile.py @@ -534,6 +534,18 @@ class ProfileWorkerStore(SQLBaseStore): await self.db_pool.runInteraction("delete_profile_field", delete_profile_field) + async def delete_profile(self, user_id: UserID) -> None: + """ + Deletes an entire user profile, including displayname, avatar_url and all custom fields. + Used at user deactivation when erasure is requested. + """ + + await self.db_pool.simple_delete( + desc="delete_profile", + table="profiles", + keyvalues={"full_user_id": user_id.to_string()}, + ) + class ProfileStore(ProfileWorkerStore): pass diff --git a/synapse/storage/databases/main/sticky_events.py b/synapse/storage/databases/main/sticky_events.py index 101306296e..38b84443df 100644 --- a/synapse/storage/databases/main/sticky_events.py +++ b/synapse/storage/databases/main/sticky_events.py @@ -13,9 +13,7 @@ import logging import random from dataclasses import dataclass -from typing import ( - TYPE_CHECKING, -) +from typing import TYPE_CHECKING, Collection, cast from twisted.internet.defer import Deferred @@ -25,6 +23,7 @@ from synapse.storage.database import ( DatabasePool, LoggingDatabaseConnection, LoggingTransaction, + make_in_list_sql_clause, ) from synapse.storage.databases.main.cache import CacheInvalidationWorkerStore from synapse.storage.databases.main.state import StateGroupWorkerStore @@ -138,6 +137,98 @@ class StickyEventsWorkerStore(StateGroupWorkerStore, CacheInvalidationWorkerStor def get_sticky_events_stream_id_generator(self) -> MultiWriterIdGenerator: return self._sticky_events_id_gen + async def get_sticky_events_in_rooms( + self, + room_ids: Collection[str], + *, + from_id: int, + to_id: int, + now: int, + limit: int | None, + ) -> tuple[int, dict[str, list[str]]]: + """ + Fetch all the sticky events' IDs in the given rooms, with sticky stream IDs satisfying + from_id < sticky stream ID <= to_id. + + The events are returned ordered by the sticky events stream. + + Args: + room_ids: The room IDs to return sticky events in. + from_id: The sticky stream ID that sticky events should be returned from (exclusive). + to_id: The sticky stream ID that sticky events should end at (inclusive). + now: The current time in unix millis, used for skipping expired events. + limit: Max sticky events to return, or None to apply no limit. + Returns: + to_id, dict[room_id, list[event_ids]] + """ + sticky_events_rows = await self.db_pool.runInteraction( + "get_sticky_events_in_rooms", + self._get_sticky_events_in_rooms_txn, + room_ids, + from_id=from_id, + to_id=to_id, + now=now, + limit=limit, + ) + + if not sticky_events_rows: + return to_id, {} + + # Get stream_id of the last row, which is the highest + new_to_id, _, _ = sticky_events_rows[-1] + + # room ID -> event IDs + room_id_to_event_ids: dict[str, list[str]] = {} + for _, room_id, event_id in sticky_events_rows: + events = room_id_to_event_ids.setdefault(room_id, []) + events.append(event_id) + + return (new_to_id, room_id_to_event_ids) + + def _get_sticky_events_in_rooms_txn( + self, + txn: LoggingTransaction, + room_ids: Collection[str], + *, + from_id: int, + to_id: int, + now: int, + limit: int | None, + ) -> list[tuple[int, str, str]]: + if len(room_ids) == 0: + return [] + room_id_in_list_clause, room_id_in_list_values = make_in_list_sql_clause( + txn.database_engine, "se.room_id", room_ids + ) + limit_clause = "" + limit_params: tuple[int, ...] = () + if limit is not None: + limit_clause = "LIMIT ?" + limit_params = (limit,) + + if isinstance(self.database_engine, PostgresEngine): + expr_soft_failed = "COALESCE(((ej.internal_metadata::jsonb)->>'soft_failed')::boolean, FALSE)" + else: + expr_soft_failed = "COALESCE(ej.internal_metadata->>'soft_failed', FALSE)" + + txn.execute( + f""" + SELECT se.stream_id, se.room_id, event_id + FROM sticky_events se + INNER JOIN event_json ej USING (event_id) + WHERE + NOT {expr_soft_failed} + AND ? < expires_at + AND ? < stream_id + AND stream_id <= ? + AND {room_id_in_list_clause} + ORDER BY stream_id ASC + {limit_clause} + """, + (now, from_id, to_id, *room_id_in_list_values, *limit_params), + ) + return cast(list[tuple[int, str, str]], txn.fetchall()) + async def get_updated_sticky_events( self, *, from_id: int, to_id: int, limit: int ) -> list[StickyEventUpdate]: diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py index fbd01914d5..f2898de0b8 100644 --- a/synapse/util/__init__.py +++ b/synapse/util/__init__.py @@ -21,6 +21,7 @@ import collections.abc import logging +import os import typing from typing import ( Iterator, @@ -74,9 +75,15 @@ def log_failure( return None -# Version string with git info. Computed here once so that we don't invoke git multiple -# times. -SYNAPSE_VERSION = get_distribution_version_string("matrix-synapse", __file__) +SYNAPSE_VERSION = os.getenv( + "SYNAPSE_VERSION_STRING" +) or get_distribution_version_string("matrix-synapse", __file__) +""" +Version string with git info. + +This can be overridden via the `SYNAPSE_VERSION_STRING` environment variable or is +computed here once so that we don't invoke git multiple times. +""" class ExceptionBundle(Exception): diff --git a/tests/rest/client/test_sync_sticky_events.py b/tests/rest/client/test_sync_sticky_events.py new file mode 100644 index 0000000000..7a38debdb9 --- /dev/null +++ b/tests/rest/client/test_sync_sticky_events.py @@ -0,0 +1,529 @@ +# +# This file is licensed under the Affero General Public License (AGPL) version 3. +# +# Copyright (C) 2026, Element Creations Ltd. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# See the GNU Affero General Public License for more details: +# . +import json +import sqlite3 +from dataclasses import dataclass +from unittest.mock import patch +from urllib.parse import quote + +from twisted.internet.testing import MemoryReactor + +from synapse.api.constants import EventTypes, EventUnsignedContentFields, StickyEvent +from synapse.rest import admin +from synapse.rest.client import account_data, login, register, room, sync +from synapse.server import HomeServer +from synapse.types import JsonDict +from synapse.util.clock import Clock +from synapse.util.duration import Duration + +from tests import unittest +from tests.utils import USE_POSTGRES_FOR_TESTS + + +class SyncStickyEventsTestCase(unittest.HomeserverTestCase): + """ + Tests for oldschool (v3) /sync with sticky events (MSC4354) + """ + + if not USE_POSTGRES_FOR_TESTS and sqlite3.sqlite_version_info < (3, 40, 0): + # We need the JSON functionality in SQLite + skip = f"SQLite version is too old to support sticky events: {sqlite3.sqlite_version_info} (See https://github.com/element-hq/synapse/issues/19428)" + + servlets = [ + room.register_servlets, + login.register_servlets, + register.register_servlets, + admin.register_servlets, + sync.register_servlets, + account_data.register_servlets, + ] + + def default_config(self) -> JsonDict: + config = super().default_config() + config["experimental_features"] = {"msc4354_enabled": True} + return config + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + # Register an account + self.user_id = self.register_user("user1", "pass") + self.token = self.login(self.user_id, "pass") + + # Create a room + self.room_id = self.helper.create_room_as(self.user_id, tok=self.token) + + def test_single_sticky_event_appears_in_initial_sync(self) -> None: + """ + Test sending a single sticky event and then doing an initial /sync. + """ + + # Send a sticky event + sticky_event_response = self.helper.send_sticky_event( + self.room_id, + EventTypes.Message, + duration=Duration(minutes=1), + content={"body": "sticky message", "msgtype": "m.text"}, + tok=self.token, + ) + sticky_event_id = sticky_event_response["event_id"] + + # Perform initial sync + channel = self.make_request( + "GET", + "/sync", + access_token=self.token, + ) + + self.assertEqual(channel.code, 200, channel.result) + + # Get timeline events from the sync response + timeline_events = channel.json_body["rooms"]["join"][self.room_id]["timeline"][ + "events" + ] + + # Verify the sticky event is present and has the sticky TTL field + self.assertEqual( + timeline_events[-1]["event_id"], + sticky_event_id, + f"Sticky event {sticky_event_id} not found in sync timeline", + ) + self.assertEqual( + timeline_events[-1]["unsigned"][EventUnsignedContentFields.STICKY_TTL], + # The other 100 ms is advanced in FakeChannel.await_result. + 59_900, + ) + + self.assertNotIn( + "sticky", + channel.json_body["rooms"]["join"][self.room_id], + "Unexpected sticky section of sync response (sticky event should be deduplicated)", + ) + + def test_sticky_event_beyond_timeline_in_initial_sync(self) -> None: + """ + Test that when sending a sticky event which is subsequently + pushed out of the timeline window by other messages, + the sticky event comes down the dedicated sticky section of the sync response. + """ + # Send the first sticky event + first_sticky_response = self.helper.send_sticky_event( + self.room_id, + EventTypes.Message, + duration=Duration(minutes=1), + content={"body": "first sticky", "msgtype": "m.text"}, + tok=self.token, + ) + first_sticky_event_id = first_sticky_response["event_id"] + + # Send 10 regular timeline events, + # in order to push the sticky event out of the timeline window + # that the /sync will get. + regular_event_ids = [] + for i in range(10): + # (Note: each one advances time by 100ms) + response = self.helper.send( + room_id=self.room_id, + body=f"regular message {i}", + tok=self.token, + ) + regular_event_ids.append(response["event_id"]) + + # Send another sticky event + # (Note: this advances time by 100ms) + second_sticky_response = self.helper.send_sticky_event( + self.room_id, + EventTypes.Message, + duration=Duration(minutes=1), + content={"body": "second sticky", "msgtype": "m.text"}, + tok=self.token, + ) + second_sticky_event_id = second_sticky_response["event_id"] + + # Perform initial sync + channel = self.make_request( + "GET", + "/sync", + access_token=self.token, + ) + self.assertEqual(channel.code, 200, channel.result) + + # Get timeline events from the sync response + timeline_events = channel.json_body["rooms"]["join"][self.room_id]["timeline"][ + "events" + ] + timeline_event_ids = [event["event_id"] for event in timeline_events] + sticky_events = channel.json_body["rooms"]["join"][self.room_id][ + "msc4354_sticky" + ]["events"] + + # This is canary to check the test setup is valid and that we're actually excluding the sticky event + # because it's outside the timeline window, not for some other potential reason. + self.assertNotIn( + regular_event_ids[0], + timeline_event_ids, + f"First regular event {regular_event_ids[0]} unexpectedly found in sync timeline (this means our test is invalid)", + ) + + # Assertions for the first sticky event: should be only in sticky section + self.assertNotIn( + first_sticky_event_id, + timeline_event_ids, + f"First sticky event {first_sticky_event_id} unexpectedly found in sync timeline (expected it to be outside the timeline window)", + ) + self.assertEqual( + len(sticky_events), + 1, + f"Expected exactly 1 item in sticky events section, got {sticky_events}", + ) + self.assertEqual(sticky_events[0]["event_id"], first_sticky_event_id) + self.assertEqual( + # The 'missing' 1100 ms were elapsed when sending events + sticky_events[0]["unsigned"][EventUnsignedContentFields.STICKY_TTL], + 58_800, + ) + + # Assertions for the second sticky event: should be only in timeline section + self.assertEqual( + timeline_events[-1]["event_id"], + second_sticky_event_id, + f"Second sticky event {second_sticky_event_id} not found in sync timeline", + ) + self.assertEqual( + timeline_events[-1]["unsigned"][EventUnsignedContentFields.STICKY_TTL], + # The other 100 ms is advanced in FakeChannel.await_result. + 59_900, + ) + # (sticky section: we already checked it only has 1 item and + # that item was the first above) + + def test_sticky_event_filtered_from_timeline_appears_in_sticky_section( + self, + ) -> None: + """ + Test that a sticky event which is excluded from the timeline by a timeline filter + still appears in the sticky section of the sync response. + + > Interaction with RoomFilter: The RoomFilter does not apply to the sticky.events section, + > as it is neither timeline nor state. However, the timeline filter MUST be applied before + > applying the deduplication logic above. In other words, if a sticky event would normally + > appear in both the timeline.events section and the sticky.events section, but is filtered + > out by the timeline filter, the sticky event MUST appear in sticky.events. + > — https://github.com/matrix-org/matrix-spec-proposals/blob/4340903c15e9eab1bfb2f6a31cfa08fd535f7e7c/proposals/4354-sticky-events.md#sync-api-changes + """ + # A filter that excludes message events + filter_json = json.dumps( + { + "room": { + # We only want these io.element.example events + "timeline": {"types": ["io.element.example"]}, + } + } + ) + + # Send a sticky message event (will be filtered by our filter) + sticky_event_id = self.helper.send_sticky_event( + self.room_id, + EventTypes.Message, + duration=Duration(minutes=1), + content={"body": "sticky message", "msgtype": "m.text"}, + tok=self.token, + )["event_id"] + + # Send a non-message event (will pass the filter) + nonmessage_event_id = self.helper.send_event( + room_id=self.room_id, + type="io.element.example", + content={"membership": "join"}, + tok=self.token, + )["event_id"] + + # Perform initial sync with our filter + channel = self.make_request( + "GET", + f"/sync?filter={quote(filter_json)}", + access_token=self.token, + ) + self.assertEqual(channel.code, 200, channel.result) + + # Get timeline and sticky events from the sync response + timeline_events = channel.json_body["rooms"]["join"][self.room_id]["timeline"][ + "events" + ] + sticky_events = channel.json_body["rooms"]["join"][self.room_id][ + "msc4354_sticky" + ]["events"] + + # Extract event IDs from the timeline + timeline_event_ids = [event["event_id"] for event in timeline_events] + + # The sticky message event should NOT be in the timeline (filtered out) + self.assertNotIn( + sticky_event_id, + timeline_event_ids, + f"Sticky message event {sticky_event_id} should be filtered from timeline", + ) + + # The member event should be in the timeline + self.assertIn( + nonmessage_event_id, + timeline_event_ids, + f"Non-message event {nonmessage_event_id} should be in timeline", + ) + + # The sticky message event MUST appear in the sticky section + # because it was filtered from the timeline + received_sticky_event_ids = [e["event_id"] for e in sticky_events] + self.assertEqual( + received_sticky_event_ids, + [sticky_event_id], + ) + + def test_ignored_users_sticky_events(self) -> None: + """ + Test that sticky events from ignored users are not delivered to clients. + + > As with normal events, sticky events sent by ignored users MUST NOT be + > delivered to clients. + > — https://github.com/matrix-org/matrix-spec-proposals/blob/4340903c15e9eab1bfb2f6a31cfa08fd535f7e7c/proposals/4354-sticky-events.md#sync-api-changes + """ + # Register a second user who will be ignored + user2_id = self.register_user("user2", "pass") + user2_token = self.login(user2_id, "pass") + + # Join user2 to the room + self.helper.join(self.room_id, user2_id, tok=user2_token) + + # User1 ignores user2 + channel = self.make_request( + "PUT", + f"/_matrix/client/v3/user/{self.user_id}/account_data/m.ignored_user_list", + {"ignored_users": {user2_id: {}}}, + access_token=self.token, + ) + self.assertEqual(channel.code, 200, channel.result) + + # User2 sends a sticky event + sticky_response = self.helper.send_sticky_event( + self.room_id, + EventTypes.Message, + duration=Duration(minutes=1), + content={"body": "sticky from ignored user", "msgtype": "m.text"}, + tok=user2_token, + ) + sticky_event_id = sticky_response["event_id"] + + # User1 syncs + channel = self.make_request( + "GET", + "/sync", + access_token=self.token, + ) + self.assertEqual(channel.code, 200, channel.result) + + # Check timeline events - should not include the sticky event from ignored user + timeline_events = channel.json_body["rooms"]["join"][self.room_id]["timeline"][ + "events" + ] + timeline_event_ids = [event["event_id"] for event in timeline_events] + + self.assertNotIn( + sticky_event_id, + timeline_event_ids, + f"Sticky event from ignored user {sticky_event_id} should not be in timeline", + ) + + # Check sticky events section - should also not include the event + sticky_events = ( + channel.json_body["rooms"]["join"][self.room_id] + .get("msc4354_sticky", {}) + .get("events", []) + ) + + sticky_event_ids = [event["event_id"] for event in sticky_events] + + self.assertNotIn( + sticky_event_id, + sticky_event_ids, + f"Sticky event from ignored user {sticky_event_id} should not be in sticky section", + ) + + def test_history_visibility_bypass_for_sticky_events(self) -> None: + """ + Test that joined users can see sticky events even when history visibility + is set to "joined" and they joined after the event was sent. + This is required by MSC4354: "History visibility checks MUST NOT + be applied to sticky events. Any joined user is authorised to see + sticky events for the duration they remain sticky." + """ + # Create a new room with restricted history visibility + room_id = self.helper.create_room_as( + self.user_id, + tok=self.token, + extra_content={ + # Anyone can join + "preset": "public_chat", + # But you can't see history before you joined + "initial_state": [ + { + "type": EventTypes.RoomHistoryVisibility, + "state_key": "", + "content": {"history_visibility": "joined"}, + } + ], + }, + is_public=False, + ) + + # User1 sends a sticky event + sticky_event_id = self.helper.send_sticky_event( + room_id, + EventTypes.Message, + duration=Duration(minutes=5), + content={"body": "sticky message", "msgtype": "m.text"}, + tok=self.token, + )["event_id"] + + # User1 also sends a regular event, to verify our test setup + regular_event_id = self.helper.send( + room_id=room_id, + body="regular message", + tok=self.token, + )["event_id"] + + # Register and join a second user after the sticky event was sent + user2_id = self.register_user("user2", "pass") + user2_token = self.login(user2_id, "pass") + self.helper.join(room_id, user2_id, tok=user2_token) + + # User2 syncs - they should see the sticky event even though + # history visibility is "joined" and they joined after it was sent + channel = self.make_request( + "GET", + "/sync", + access_token=user2_token, + ) + self.assertEqual(channel.code, 200, channel.result) + + # Get sticky events from the sync response + sticky_events = channel.json_body["rooms"]["join"][room_id]["msc4354_sticky"][ + "events" + ] + sticky_event_ids = [event["event_id"] for event in sticky_events] + + # The sticky event should be visible to user2 + self.assertEqual( + [sticky_event_id], + sticky_event_ids, + f"Sticky event {sticky_event_id} should be visible despite history visibility", + ) + + # Also check that the regular (non-sticky) event sent at the same time + # is NOT visible. This is to verify our test setup. + timeline_events = channel.json_body["rooms"]["join"][room_id]["timeline"][ + "events" + ] + timeline_event_ids = [event["event_id"] for event in timeline_events] + + # The regular message should NOT be visible (history visibility = joined) + self.assertNotIn( + regular_event_id, + timeline_event_ids, + f"Expecting to not see regular event ({regular_event_id}) before user1 joined.", + ) + + @patch.object(StickyEvent, "MAX_EVENTS_IN_SYNC", 3) + def test_pagination_with_many_sticky_events(self) -> None: + """ + Test that pagination works correctly when there are more sticky events than + the intended limit. + + The MSC doesn't define a limit or how to set one. + See thread: https://github.com/matrix-org/matrix-spec-proposals/pull/4354#discussion_r2885670008 + + But Synapse currently emits 100 at a time, controlled by `MAX_EVENTS_IN_SYNC`. + In this test we patch it to 3 (as sending 100 events is not very efficient). + """ + + # A filter that excludes message events + # This is needed so that the sticky events come down the sticky section + # and not the timeline section, which would hamper our test. + filter_json = json.dumps( + { + "room": { + # We only want these io.element.example events + "timeline": {"types": ["io.element.example"]}, + } + } + ) + + # Send 8 sticky events: enough for 2 full pages and then a partial page with 2. + sent_sticky_event_ids = [] + for i in range(8): + response = self.helper.send_sticky_event( + self.room_id, + EventTypes.Message, + duration=Duration(minutes=1), + content={"body": f"sticky message {i}", "msgtype": "m.text"}, + tok=self.token, + ) + sent_sticky_event_ids.append(response["event_id"]) + + @dataclass + class SyncHelperResponse: + sticky_event_ids: list[str] + """Event IDs of events returned from the sticky section, in-order.""" + + next_batch: str + """Sync token to pass to `?since=` in order to do an incremental sync.""" + + def _do_sync(*, since: str | None) -> SyncHelperResponse: + """Small helper to do a sync and get the sticky events out.""" + and_since_param = "" if since is None else f"&since={since}" + channel = self.make_request( + "GET", + f"/sync?filter={quote(filter_json)}{and_since_param}", + access_token=self.token, + ) + self.assertEqual(channel.code, 200, channel.result) + + # Get sticky events from the sync response + sticky_events = [] + # In this test, when no sticky events are in the response, + # we don't have a rooms section at all + if "rooms" in channel.json_body: + sticky_events = channel.json_body["rooms"]["join"][self.room_id][ + "msc4354_sticky" + ]["events"] + + return SyncHelperResponse( + sticky_event_ids=[ + sticky_event["event_id"] for sticky_event in sticky_events + ], + next_batch=channel.json_body["next_batch"], + ) + + # Perform initial sync, we should get the first 3 sticky events, + # in order. + sync1 = _do_sync(since=None) + self.assertEqual(sync1.sticky_event_ids, sent_sticky_event_ids[0:3]) + + # Now do an incremental sync and expect the next page of 3 + sync2 = _do_sync(since=sync1.next_batch) + self.assertEqual(sync2.sticky_event_ids, sent_sticky_event_ids[3:6]) + + # Now do another incremental sync and expect the last 2 + sync3 = _do_sync(since=sync2.next_batch) + self.assertEqual(sync3.sticky_event_ids, sent_sticky_event_ids[6:8]) + + # Finally, expect an empty incremental sync at the end + sync4 = _do_sync(since=sync3.next_batch) + self.assertEqual(sync4.sticky_event_ids, []) diff --git a/tests/rest/client/test_third_party_rules.py b/tests/rest/client/test_third_party_rules.py index 0d319dff7e..1709b27f67 100644 --- a/tests/rest/client/test_third_party_rules.py +++ b/tests/rest/client/test_third_party_rules.py @@ -734,9 +734,9 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): self.assertTrue(args[1]) self.assertFalse(args[2]) - # Check that the profile update callback was called twice (once for the display - # name and once for the avatar URL), and that the "deactivation" boolean is true. - self.assertEqual(profile_mock.call_count, 2) + # Check that the profile update callback was called once + # and that the "deactivation" boolean is true. + self.assertEqual(profile_mock.call_count, 1) args = profile_mock.call_args[0] self.assertTrue(args[3]) diff --git a/tests/rest/synapse/mas/test_users.py b/tests/rest/synapse/mas/test_users.py index 4e8cf90700..f0f26a939c 100644 --- a/tests/rest/synapse/mas/test_users.py +++ b/tests/rest/synapse/mas/test_users.py @@ -10,11 +10,14 @@ # # See the GNU Affero General Public License for more details: # . - +from http import HTTPStatus from urllib.parse import urlencode +from parameterized import parameterized + from twisted.internet.testing import MemoryReactor +from synapse.api.errors import StoreError from synapse.appservice import ApplicationService from synapse.server import HomeServer from synapse.types import JsonDict, UserID, create_requester @@ -621,6 +624,78 @@ class MasDeleteUserResource(BaseTestCase): # And erased self.assertTrue(self.get_success(store.is_user_erased(user_id=str(alice)))) + @parameterized.expand([(False,), (True,)]) + def test_user_deactivation_removes_user_from_user_directory( + self, erase: bool + ) -> None: + """ + Test that when `/delete_user` deactivates a user, they get removed from the user directory. + + This is applicable regardless of whether the `erase` flag is set. + + Regression test for: + 'Users are not removed from user directory upon deactivation if erase flag is true' + https://github.com/element-hq/synapse/issues/19540 + """ + alice = UserID("alice", "test") + store = self.hs.get_datastores().main + + # Ensure we're testing what we think we are: + # check the user is IN the directory at the start of the test + self.assertEqual( + self.get_success(store._get_user_in_directory(alice.to_string())), + ("Alice", None), + ) + + channel = self.make_request( + "POST", + "/_synapse/mas/delete_user", + shorthand=False, + access_token=self.SHARED_SECRET, + content={"localpart": "alice", "erase": erase}, + ) + self.assertEqual(channel.code, 200, channel.json_body) + self.assertEqual(channel.json_body, {}) + + self.assertIsNone( + self.get_success(store._get_user_in_directory(alice.to_string())), + "User still present in user directory after deactivation!", + ) + + def test_user_deactivation_removes_custom_profile_fields(self) -> None: + """ + Test that when `/delete_user` deactivates a user with the `erase` flag, + their custom profile fields get removed. + """ + alice = UserID("alice", "test") + store = self.hs.get_datastores().main + + # Add custom profile field + self.get_success(store.set_profile_field(alice, "io.element.example", "hello")) + + # Ensure we're testing what we think we are: + # check the user has profile data at the start of the test + self.assertEqual( + self.get_success(store.get_profile_fields(alice)), + {"io.element.example": "hello"}, + ) + + channel = self.make_request( + "POST", + "/_synapse/mas/delete_user", + shorthand=False, + access_token=self.SHARED_SECRET, + content={"localpart": "alice", "erase": True}, + ) + self.assertEqual(channel.code, 200, channel.json_body) + self.assertEqual(channel.json_body, {}) + + # With no profile, we expect a 404 (Not Found) StoreError + with self.assertRaises(StoreError) as raised: + self.get_success_or_raise(store.get_profile_fields(alice)) + + self.assertEqual(raised.exception.code, HTTPStatus.NOT_FOUND) + def test_delete_user_missing_localpart(self) -> None: channel = self.make_request( "POST",