Merge remote-tracking branch 'origin/develop' into neilj/improve_perceived_display_name_performance

This commit is contained in:
Neil Johnson
2026-04-14 09:39:27 +01:00
19 changed files with 254 additions and 355 deletions
+176
View File
@@ -0,0 +1,176 @@
# Re-usable workflow (https://docs.github.com/en/actions/how-tos/reuse-automations/reuse-workflows)
name: Reusable Complement testing
on:
workflow_call:
inputs:
use_latest_deps:
type: boolean
default: false
use_twisted_trunk:
type: boolean
default: false
# Control the permissions granted to `GITHUB_TOKEN`.
permissions:
# `actions/checkout` reads the repository (also see
# https://github.com/actions/checkout/tree/de0fac2e4500dabe0009e67214ff5f5447ce83dd/#recommended-permissions)
contents: read
env:
RUST_VERSION: 1.87.0
jobs:
complement:
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
include:
- arrangement: monolith
database: SQLite
- arrangement: monolith
database: Postgres
- arrangement: workers
database: Postgres
steps:
- name: Checkout synapse codebase
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
path: synapse
# Log Docker system info for debugging (compare with your local environment) and
# tracking GitHub runner changes over time (can easily compare a run from last
# week with the current one in question).
- run: docker system info
shell: bash
- name: Install Rust
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
with:
toolchain: ${{ env.RUST_VERSION }}
- uses: Swatinem/rust-cache@c19371144df3bb44fab255c43d04cbc2ab54d1c4 # v2.9.1
# We use `poetry` in `complement.sh`
- uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0
with:
poetry-version: "2.2.1"
# Matches the `path` where we checkout Synapse above
working-directory: "synapse"
- name: Prepare Complement's Prerequisites
run: synapse/.ci/scripts/setup_complement_prerequisites.sh
- uses: actions/setup-go@4a3601121dd01d1626a1e23e37211e3254c1c06c # v6.4.0
with:
cache-dependency-path: complement/go.sum
go-version-file: complement/go.mod
# This step is specific to the 'Twisted trunk' test run:
- name: Patch dependencies
if: ${{ inputs.use_twisted_trunk }}
run: |
set -x
DEBIAN_FRONTEND=noninteractive sudo apt-get install -yqq python3 pipx
pipx install poetry==2.2.1
poetry remove -n twisted
poetry add -n --extras tls git+https://github.com/twisted/twisted.git#trunk
poetry lock
working-directory: synapse
# Run the image sanity check test first as this is the first thing we want to know
# about (are we actually testing what we expect?) and we don't want to debug
# downstream failures (wild goose chase).
- name: Sanity check Complement image
id: run_sanity_check_complement_image_test
# -p=1: We're using `-p 1` to force the test packages to run serially as GHA boxes
# are underpowered and don't like running tons of Synapse instances at once.
# -json: Output JSON format so that gotestfmt can parse it.
#
# tee /tmp/gotest-complement.log: We tee the output to a file so that we can re-process it
# later on for better formatting with gotestfmt. But we still want the command
# to output to the terminal as it runs so we can see what's happening in
# real-time.
run: |
set -o pipefail
COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh --in-repo -p 1 -json -run 'TestSynapseVersion/Synapse_version_matches_current_git_checkout' 2>&1 | tee /tmp/gotest-sanity-check-complement.log
shell: bash
env:
POSTGRES: ${{ (matrix.database == 'Postgres') && 1 || '' }}
WORKERS: ${{ (matrix.arrangement == 'workers') && 1 || '' }}
- name: Formatted sanity check Complement test logs
# Always run this step if we attempted to run the Complement tests.
if: always() && steps.run_sanity_check_complement_image_test.outcome != 'skipped'
# We do not hide successful tests in `gotestfmt` here as the list of sanity
# check tests is so short. Feel free to change this when we get more tests.
#
# Note that the `-hide` argument is interpreted by `gotestfmt`. From it,
# it derives several values under `$settings` and passes them to our
# custom `.ci/complement_package.gotpl` template to render the output.
run: cat /tmp/gotest-sanity-check-complement.log | gotestfmt -hide "successful-downloads,empty-packages"
- name: Run Complement Tests
id: run_complement_tests
# -p=1: We're using `-p 1` to force the test packages to run serially as GHA boxes
# are underpowered and don't like running tons of Synapse instances at once.
# -json: Output JSON format so that gotestfmt can parse it.
#
# tee /tmp/gotest-complement.log: We tee the output to a file so that we can re-process it
# later on for better formatting with gotestfmt. But we still want the command
# to output to the terminal as it runs so we can see what's happening in
# real-time.
run: |
set -o pipefail
COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -p 1 -json 2>&1 | tee /tmp/gotest-complement.log
shell: bash
env:
POSTGRES: ${{ (matrix.database == 'Postgres') && 1 || '' }}
WORKERS: ${{ (matrix.arrangement == 'workers') && 1 || '' }}
TEST_ONLY_IGNORE_POETRY_LOCKFILE: ${{ inputs.use_latest_deps && 1 || '' }}
TEST_ONLY_SKIP_DEP_HASH_VERIFICATION: ${{ inputs.use_twisted_trunk && 1 || '' }}
- name: Formatted Complement test logs (only failing are shown)
# Always run this step if we attempted to run the Complement tests.
if: always() && steps.run_complement_tests.outcome != 'skipped'
# Hide successful tests in order to reduce the verbosity of the otherwise very large output.
#
# Note that the `-hide` argument is interpreted by `gotestfmt`. From it,
# it derives several values under `$settings` and passes them to our
# custom `.ci/complement_package.gotpl` template to render the output.
run: cat /tmp/gotest-complement.log | gotestfmt -hide "successful-downloads,successful-tests,empty-packages"
- name: Run in-repo Complement Tests
id: run_in_repo_complement_tests
# -p=1: We're using `-p 1` to force the test packages to run serially as GHA boxes
# are underpowered and don't like running tons of Synapse instances at once.
# -json: Output JSON format so that gotestfmt can parse it.
#
# tee /tmp/gotest-in-repo-complement.log: We tee the output to a file so that we can re-process it
# later on for better formatting with gotestfmt. But we still want the command
# to output to the terminal as it runs so we can see what's happening in
# real-time.
run: |
set -o pipefail
COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh --in-repo -p 1 -json 2>&1 | tee /tmp/gotest-in-repo-complement.log
shell: bash
env:
POSTGRES: ${{ (matrix.database == 'Postgres') && 1 || '' }}
WORKERS: ${{ (matrix.arrangement == 'workers') && 1 || '' }}
TEST_ONLY_IGNORE_POETRY_LOCKFILE: ${{ inputs.use_latest_deps && 1 || '' }}
TEST_ONLY_SKIP_DEP_HASH_VERIFICATION: ${{ inputs.use_twisted_trunk && 1 || '' }}
- name: Formatted in-repo Complement test logs (only failing are shown)
# Always run this step if we attempted to run the Complement tests.
if: always() && steps.run_in_repo_complement_tests.outcome != 'skipped'
# Hide successful tests in order to reduce the verbosity of the otherwise very large output.
#
# Note that the `-hide` argument is interpreted by `gotestfmt`. From it,
# it derives several values under `$settings` and passes them to our
# custom `.ci/complement_package.gotpl` template to render the output.
run: cat /tmp/gotest-in-repo-complement.log | gotestfmt -hide "successful-downloads,successful-tests,empty-packages"
+3 -76
View File
@@ -181,84 +181,11 @@ jobs:
/logs/**/*.log*
complement:
uses: ./.github/workflows/complement_tests.yml
needs: check_repo
if: "!failure() && !cancelled() && needs.check_repo.outputs.should_run_workflow == 'true'"
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
include:
- arrangement: monolith
database: SQLite
- arrangement: monolith
database: Postgres
- arrangement: workers
database: Postgres
steps:
- name: Check out synapse codebase
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
path: synapse
- name: Prepare Complement's Prerequisites
run: synapse/.ci/scripts/setup_complement_prerequisites.sh
- uses: actions/setup-go@4a3601121dd01d1626a1e23e37211e3254c1c06c # v6.4.0
with:
cache-dependency-path: complement/go.sum
go-version-file: complement/go.mod
- name: Run Complement Tests
id: run_complement_tests
# -p=1: We're using `-p 1` to force the test packages to run serially as GHA boxes
# are underpowered and don't like running tons of Synapse instances at once.
# -json: Output JSON format so that gotestfmt can parse it.
#
# tee /tmp/gotest-complement.log: We tee the output to a file so that we can re-process it
# later on for better formatting with gotestfmt. But we still want the command
# to output to the terminal as it runs so we can see what's happening in
# real-time.
run: |
set -o pipefail
COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -p 1 -json 2>&1 | tee /tmp/gotest-complement.log
shell: bash
env:
POSTGRES: ${{ (matrix.database == 'Postgres') && 1 || '' }}
WORKERS: ${{ (matrix.arrangement == 'workers') && 1 || '' }}
TEST_ONLY_IGNORE_POETRY_LOCKFILE: 1
- name: Formatted Complement test logs
# Always run this step if we attempted to run the Complement tests.
if: always() && steps.run_complement_tests.outcome != 'skipped'
run: cat /tmp/gotest-complement.log | gotestfmt -hide "successful-downloads,empty-packages"
- name: Run in-repo Complement Tests
id: run_in_repo_complement_tests
# -p=1: We're using `-p 1` to force the test packages to run serially as GHA boxes
# are underpowered and don't like running tons of Synapse instances at once.
# -json: Output JSON format so that gotestfmt can parse it.
#
# tee /tmp/gotest-in-repo-complement.log: We tee the output to a file so that we can re-process it
# later on for better formatting with gotestfmt. But we still want the command
# to output to the terminal as it runs so we can see what's happening in
# real-time.
run: |
set -o pipefail
COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh --in-repo -p 1 -json 2>&1 | tee /tmp/gotest-in-repo-complement.log
shell: bash
env:
POSTGRES: ${{ (matrix.database == 'Postgres') && 1 || '' }}
WORKERS: ${{ (matrix.arrangement == 'workers') && 1 || '' }}
TEST_ONLY_IGNORE_POETRY_LOCKFILE: 1
- name: Formatted in-repo Complement test logs
# Always run this step if we attempted to run the Complement tests.
if: always() && steps.run_in_repo_complement_tests.outcome != 'skipped'
run: cat /tmp/gotest-in-repo-complement.log | gotestfmt -hide "successful-downloads,empty-packages"
with:
use_latest_deps: true
# Open an issue if the build fails, so we know about it.
# Only do this if we're not experimenting with this action in a PR.
+1 -135
View File
@@ -668,145 +668,11 @@ jobs:
schema_diff
complement:
uses: ./.github/workflows/complement_tests.yml
if: "${{ !failure() && !cancelled() && needs.changes.outputs.integration == 'true' }}"
needs:
- linting-done
- changes
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
include:
- arrangement: monolith
database: SQLite
- arrangement: monolith
database: Postgres
- arrangement: workers
database: Postgres
steps:
- name: Checkout synapse codebase
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
path: synapse
# Log Docker system info for debugging (compare with your local environment) and
# tracking GitHub runner changes over time (can easily compare a run from last
# week with the current one in question).
- run: docker system info
shell: bash
- name: Install Rust
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
with:
toolchain: ${{ env.RUST_VERSION }}
- uses: Swatinem/rust-cache@c19371144df3bb44fab255c43d04cbc2ab54d1c4 # v2.9.1
# We use `poetry` in `complement.sh`
- uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0
with:
poetry-version: "2.2.1"
# Matches the `path` where we checkout Synapse above
working-directory: "synapse"
- name: Prepare Complement's Prerequisites
run: synapse/.ci/scripts/setup_complement_prerequisites.sh
- uses: actions/setup-go@4a3601121dd01d1626a1e23e37211e3254c1c06c # v6.4.0
with:
cache-dependency-path: complement/go.sum
go-version-file: complement/go.mod
# Run the image sanity check test first as this is the first thing we want to know
# about (are we actually testing what we expect?) and we don't want to debug
# downstream failures (wild goose chase).
- name: Sanity check Complement image
id: run_sanity_check_complement_image_test
# -p=1: We're using `-p 1` to force the test packages to run serially as GHA boxes
# are underpowered and don't like running tons of Synapse instances at once.
# -json: Output JSON format so that gotestfmt can parse it.
#
# tee /tmp/gotest-complement.log: We tee the output to a file so that we can re-process it
# later on for better formatting with gotestfmt. But we still want the command
# to output to the terminal as it runs so we can see what's happening in
# real-time.
run: |
set -o pipefail
COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh --in-repo -p 1 -json -run 'TestSynapseVersion/Synapse_version_matches_current_git_checkout' 2>&1 | tee /tmp/gotest-sanity-check-complement.log
shell: bash
env:
POSTGRES: ${{ (matrix.database == 'Postgres') && 1 || '' }}
WORKERS: ${{ (matrix.arrangement == 'workers') && 1 || '' }}
- name: Formatted sanity check Complement test logs
# Always run this step if we attempted to run the Complement tests.
if: always() && steps.run_sanity_check_complement_image_test.outcome != 'skipped'
# We do not hide successful tests in `gotestfmt` here as the list of sanity
# check tests is so short. Feel free to change this when we get more tests.
#
# Note that the `-hide` argument is interpreted by `gotestfmt`. From it,
# it derives several values under `$settings` and passes them to our
# custom `.ci/complement_package.gotpl` template to render the output.
run: cat /tmp/gotest-sanity-check-complement.log | gotestfmt -hide "successful-downloads,empty-packages"
- name: Run Complement Tests
id: run_complement_tests
# -p=1: We're using `-p 1` to force the test packages to run serially as GHA boxes
# are underpowered and don't like running tons of Synapse instances at once.
# -json: Output JSON format so that gotestfmt can parse it.
#
# tee /tmp/gotest-complement.log: We tee the output to a file so that we can re-process it
# later on for better formatting with gotestfmt. But we still want the command
# to output to the terminal as it runs so we can see what's happening in
# real-time.
run: |
set -o pipefail
COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -p 1 -json 2>&1 | tee /tmp/gotest-complement.log
shell: bash
env:
POSTGRES: ${{ (matrix.database == 'Postgres') && 1 || '' }}
WORKERS: ${{ (matrix.arrangement == 'workers') && 1 || '' }}
- name: Formatted Complement test logs (only failing are shown)
# Always run this step if we attempted to run the Complement tests.
if: always() && steps.run_complement_tests.outcome != 'skipped'
# Hide successful tests in order to reduce the verbosity of the otherwise very large output.
#
# Note that the `-hide` argument is interpreted by `gotestfmt`. From it,
# it derives several values under `$settings` and passes them to our
# custom `.ci/complement_package.gotpl` template to render the output.
run: cat /tmp/gotest-complement.log | gotestfmt -hide "successful-downloads,successful-tests,empty-packages"
- name: Run in-repo Complement Tests
id: run_in_repo_complement_tests
# -p=1: We're using `-p 1` to force the test packages to run serially as GHA boxes
# are underpowered and don't like running tons of Synapse instances at once.
# -json: Output JSON format so that gotestfmt can parse it.
#
# tee /tmp/gotest-in-repo-complement.log: We tee the output to a file so that we can re-process it
# later on for better formatting with gotestfmt. But we still want the command
# to output to the terminal as it runs so we can see what's happening in
# real-time.
run: |
set -o pipefail
COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh --in-repo -p 1 -json 2>&1 | tee /tmp/gotest-in-repo-complement.log
shell: bash
env:
POSTGRES: ${{ (matrix.database == 'Postgres') && 1 || '' }}
WORKERS: ${{ (matrix.arrangement == 'workers') && 1 || '' }}
- name: Formatted in-repo Complement test logs (only failing are shown)
# Always run this step if we attempted to run the Complement tests.
if: always() && steps.run_in_repo_complement_tests.outcome != 'skipped'
# Hide successful tests in order to reduce the verbosity of the otherwise very large output.
#
# Note that the `-hide` argument is interpreted by `gotestfmt`. From it,
# it derives several values under `$settings` and passes them to our
# custom `.ci/complement_package.gotpl` template to render the output.
run: cat /tmp/gotest-in-repo-complement.log | gotestfmt -hide "successful-downloads,successful-tests,empty-packages"
cargo-test:
if: ${{ needs.changes.outputs.rust == 'true' }}
+3 -88
View File
@@ -154,96 +154,11 @@ jobs:
/logs/**/*.log*
complement:
uses: ./.github/workflows/complement_tests.yml
needs: check_repo
if: "!failure() && !cancelled() && needs.check_repo.outputs.should_run_workflow == 'true'"
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
include:
- arrangement: monolith
database: SQLite
- arrangement: monolith
database: Postgres
- arrangement: workers
database: Postgres
steps:
- name: Run actions/checkout@v4 for synapse
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
path: synapse
- name: Prepare Complement's Prerequisites
run: synapse/.ci/scripts/setup_complement_prerequisites.sh
- uses: actions/setup-go@4a3601121dd01d1626a1e23e37211e3254c1c06c # v6.4.0
with:
cache-dependency-path: complement/go.sum
go-version-file: complement/go.mod
# This step is specific to the 'Twisted trunk' test run:
- name: Patch dependencies
run: |
set -x
DEBIAN_FRONTEND=noninteractive sudo apt-get install -yqq python3 pipx
pipx install poetry==2.1.1
poetry remove -n twisted
poetry add -n --extras tls git+https://github.com/twisted/twisted.git#trunk
poetry lock
working-directory: synapse
- name: Run Complement Tests
id: run_complement_tests
# -p=1: We're using `-p 1` to force the test packages to run serially as GHA boxes
# are underpowered and don't like running tons of Synapse instances at once.
# -json: Output JSON format so that gotestfmt can parse it.
#
# tee /tmp/gotest-complement.log: We tee the output to a file so that we can re-process it
# later on for better formatting with gotestfmt. But we still want the command
# to output to the terminal as it runs so we can see what's happening in
# real-time.
run: |
set -o pipefail
COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -p 1 -json 2>&1 | tee /tmp/gotest-complement.log
shell: bash
env:
POSTGRES: ${{ (matrix.database == 'Postgres') && 1 || '' }}
WORKERS: ${{ (matrix.arrangement == 'workers') && 1 || '' }}
TEST_ONLY_SKIP_DEP_HASH_VERIFICATION: 1
- name: Formatted Complement test logs
# Always run this step if we attempted to run the Complement tests.
if: always() && steps.run_complement_tests.outcome != 'skipped'
run: cat /tmp/gotest-complement.log | gotestfmt -hide "successful-downloads,empty-packages"
- name: Run in-repo Complement Tests
id: run_in_repo_complement_tests
# -p=1: We're using `-p 1` to force the test packages to run serially as GHA boxes
# are underpowered and don't like running tons of Synapse instances at once.
# -json: Output JSON format so that gotestfmt can parse it.
#
# tee /tmp/gotest-in-repo-complement.log: We tee the output to a file so that we can re-process it
# later on for better formatting with gotestfmt. But we still want the command
# to output to the terminal as it runs so we can see what's happening in
# real-time.
run: |
set -o pipefail
COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh --in-repo -p 1 -json 2>&1 | tee /tmp/gotest-in-repo-complement.log
shell: bash
env:
POSTGRES: ${{ (matrix.database == 'Postgres') && 1 || '' }}
WORKERS: ${{ (matrix.arrangement == 'workers') && 1 || '' }}
TEST_ONLY_SKIP_DEP_HASH_VERIFICATION: 1
- name: Formatted in-repo Complement test logs
# Always run this step if we attempted to run the Complement tests.
if: always() && steps.run_in_repo_complement_tests.outcome != 'skipped'
run: cat /tmp/gotest-in-repo-complement.log | gotestfmt -hide "successful-downloads,empty-packages"
with:
use_twisted_trunk: true
# open an issue if the build fails, so we know about it.
open-issue:
+1
View File
@@ -0,0 +1 @@
Update CI to use re-usable Complement GitHub CI workflow.
+1
View File
@@ -0,0 +1 @@
Reject `device_keys: null` in the request to [`POST /_matrix/client/v3/keys/upload`](https://spec.matrix.org/v1.16/client-server-api/#post_matrixclientv3keysupload), as per the spec. This was temporarily allowed as a workaround for misbehaving clients.
+1
View File
@@ -0,0 +1 @@
Small simplifications to the events class.
+22 -22
View File
@@ -358,7 +358,7 @@ def check_state_dependent_auth_rules(
# a user is allowed to issue invites. Fixes
# https://github.com/vector-im/vector-web/issues/1208 hopefully
if event.type == EventTypes.ThirdPartyInvite:
user_level = get_user_power_level(event.user_id, auth_dict)
user_level = get_user_power_level(event.sender, auth_dict)
invite_level = get_named_level(auth_dict, "invite", 0)
if user_level < invite_level:
@@ -409,8 +409,8 @@ def _check_size_limits(event: "EventBase") -> None:
# Codepoint size check: Synapse always enforced these limits, so apply
# them strictly.
if len(event.user_id) > 255:
raise EventSizeError("'user_id' too large", unpersistable=True)
if len(event.sender) > 255:
raise EventSizeError("'sender' too large", unpersistable=True)
if len(event.room_id) > 255:
raise EventSizeError("'room_id' too large", unpersistable=True)
if event.is_state() and len(event.state_key) > 255:
@@ -423,8 +423,8 @@ def _check_size_limits(event: "EventBase") -> None:
strict_byte_limits = event.room_version.strict_event_byte_limits_room_versions
# Byte size check: if these fail, then be lenient to avoid breaking rooms.
if len(event.user_id.encode("utf-8")) > 255:
raise EventSizeError("'user_id' too large", unpersistable=strict_byte_limits)
if len(event.sender.encode("utf-8")) > 255:
raise EventSizeError("'sender' too large", unpersistable=strict_byte_limits)
if len(event.room_id.encode("utf-8")) > 255:
raise EventSizeError("'room_id' too large", unpersistable=strict_byte_limits)
if event.is_state() and len(event.state_key.encode("utf-8")) > 255:
@@ -544,7 +544,7 @@ def _is_membership_change_allowed(
raise AuthError(403, "This room has been marked as unfederatable.")
# get info about the caller
key = (EventTypes.Member, event.user_id)
key = (EventTypes.Member, event.sender)
caller = auth_events.get(key)
caller_in_room = caller and caller.membership == Membership.JOIN
@@ -569,7 +569,7 @@ def _is_membership_change_allowed(
else:
join_rule = JoinRules.INVITE
user_level = get_user_power_level(event.user_id, auth_events)
user_level = get_user_power_level(event.sender, auth_events)
target_level = get_user_power_level(target_user_id, auth_events)
invite_level = get_named_level(auth_events, "invite", 0)
@@ -587,7 +587,7 @@ def _is_membership_change_allowed(
"membership": membership,
"join_rule": join_rule,
"target_user_id": target_user_id,
"event.user_id": event.user_id,
"event.sender": event.sender,
},
)
@@ -607,14 +607,14 @@ def _is_membership_change_allowed(
if (
(caller_invited or caller_knocked)
and Membership.LEAVE == membership
and target_user_id == event.user_id
and target_user_id == event.sender
):
return
if not caller_in_room: # caller isn't joined
raise UnstableSpecAuthError(
403,
"%s not in room %s." % (event.user_id, event.room_id),
"%s not in room %s." % (event.sender, event.room_id),
errcode=Codes.NOT_JOINED,
)
@@ -645,7 +645,7 @@ def _is_membership_change_allowed(
# * They are already joined (it's a NOOP).
# * The room is public.
# * The room is restricted and the user meets the allows rules.
if event.user_id != target_user_id:
if event.sender != target_user_id:
raise AuthError(403, "Cannot force another user to join.")
elif target_banned:
raise AuthError(403, "You are banned from this room")
@@ -705,7 +705,7 @@ def _is_membership_change_allowed(
"You cannot unban user %s." % (target_user_id,),
errcode=Codes.INSUFFICIENT_POWER,
)
elif target_user_id != event.user_id:
elif target_user_id != event.sender:
kick_level = get_named_level(auth_events, "kick", 50)
if user_level < kick_level or user_level <= target_level:
@@ -733,7 +733,7 @@ def _is_membership_change_allowed(
or join_rule != JoinRules.KNOCK_RESTRICTED
):
raise AuthError(403, "You don't have permission to knock")
elif target_user_id != event.user_id:
elif target_user_id != event.sender:
raise AuthError(403, "You cannot knock for other users")
elif target_in_room:
raise UnstableSpecAuthError(
@@ -752,10 +752,10 @@ def _is_membership_change_allowed(
def _check_event_sender_in_room(
event: "EventBase", auth_events: StateMap["EventBase"]
) -> None:
key = (EventTypes.Member, event.user_id)
key = (EventTypes.Member, event.sender)
member_event = auth_events.get(key)
_check_joined_room(member_event, event.user_id, event.room_id)
_check_joined_room(member_event, event.sender, event.room_id)
def _check_joined_room(
@@ -809,7 +809,7 @@ def _can_send_event(event: "EventBase", auth_events: StateMap["EventBase"]) -> b
power_levels_event = get_power_level_event(auth_events)
send_level = get_send_level(event.type, state_key, power_levels_event)
user_level = get_user_power_level(event.user_id, auth_events)
user_level = get_user_power_level(event.sender, auth_events)
if user_level < send_level:
raise UnstableSpecAuthError(
@@ -822,7 +822,7 @@ def _can_send_event(event: "EventBase", auth_events: StateMap["EventBase"]) -> b
if (
state_key is not None
and state_key.startswith("@")
and state_key != event.user_id
and state_key != event.sender
):
if event.room_version.msc3757_enabled:
try:
@@ -841,7 +841,7 @@ def _can_send_event(event: "EventBase", auth_events: StateMap["EventBase"]) -> b
)
if (
# sender is owner of the state key
state_key_user_id == event.user_id
state_key_user_id == event.sender
# sender has higher PL than the owner of the state key
or user_level > get_user_power_level(state_key_user_id, auth_events)
):
@@ -868,7 +868,7 @@ def check_redaction(
AuthError if the event sender is definitely not allowed to redact
the target event.
"""
user_level = get_user_power_level(event.user_id, auth_events)
user_level = get_user_power_level(event.sender, auth_events)
redact_level = get_named_level(auth_events, "redact", 50)
@@ -966,7 +966,7 @@ def _check_power_levels(
if not current_state:
return
user_level = get_user_power_level(event.user_id, auth_events)
user_level = get_user_power_level(event.sender, auth_events)
# Check other levels:
levels_to_check: list[tuple[str, str | None]] = [
@@ -1020,7 +1020,7 @@ def _check_power_levels(
if new_level == old_level:
continue
if dir == "users" and level_to_check != event.user_id:
if dir == "users" and level_to_check != event.sender:
if old_level == user_level:
raise AuthError(
403,
@@ -1137,7 +1137,7 @@ def _verify_third_party_invite(
if invite_event.sender != event.sender:
return False
if event.user_id != invite_event.user_id:
if event.sender != invite_event.sender:
return False
if signed["mxid"] != event.state_key:
-4
View File
@@ -221,7 +221,6 @@ class EventBase(metaclass=abc.ABCMeta):
# get_state_key() (and a check for None).
state_key: DictProperty[str] = DictProperty("state_key")
type: DictProperty[str] = DictProperty("type")
user_id: DictProperty[str] = DictProperty("sender")
@property
def event_id(self) -> str:
@@ -288,9 +287,6 @@ class EventBase(metaclass=abc.ABCMeta):
return template_json
def __getitem__(self, field: str) -> Any | None:
return self._dict[field]
def __contains__(self, field: str) -> bool:
return field in self._dict
+1 -1
View File
@@ -1190,7 +1190,7 @@ class FederationHandler:
# We should assert some things.
# FIXME: Do this in a nicer way
assert event.type == EventTypes.Member
assert event.user_id == user_id
assert event.sender == user_id
assert event.state_key == user_id
assert event.room_id == room_id
return origin, event, room_version
+1 -3
View File
@@ -456,9 +456,7 @@ class InitialSyncHandler:
if not self.hs.config.server.presence_enabled:
return []
states = await presence_handler.get_states(
[m.user_id for m in room_members]
)
states = await presence_handler.get_states([m.sender for m in room_members])
return [
{
+3 -3
View File
@@ -895,7 +895,7 @@ class EventCreationHandler:
if not prev_event:
return None
if prev_event and event.user_id == prev_event.user_id:
if prev_event and event.sender == prev_event.sender:
prev_content = encode_canonical_json(prev_event.content)
next_content = encode_canonical_json(event.content)
if prev_content == next_content:
@@ -1541,7 +1541,7 @@ class EventCreationHandler:
EventTypes.Message,
EventTypes.Encrypted,
]:
await self.store.set_room_participation(event.user_id, event.room_id)
await self.store.set_room_participation(event.sender, event.room_id)
if event.internal_metadata.is_out_of_band_membership():
# the only sort of out-of-band-membership events we expect to see here are
@@ -2098,7 +2098,7 @@ class EventCreationHandler:
"Could not find event %s" % (event.redacts,)
)
if event.user_id != original_event.user_id:
if event.sender != original_event.sender:
raise AuthError(
403, "You don't have permission to redact events"
)
+1 -1
View File
@@ -501,7 +501,7 @@ class HttpPusher(Pusher):
"event_id": event.event_id,
"room_id": event.room_id,
"type": event.type,
"sender": event.user_id,
"sender": event.sender,
"prio": priority,
}
if not self.disable_badge_count:
+20 -3
View File
@@ -159,6 +159,23 @@ class KeyUploadServlet(RestServlet):
device_keys: DeviceKeys | None = None
"""Identity keys for the device. May be absent if no new identity keys are required."""
@field_validator("device_keys", mode="before")
@classmethod
def validate_device_keys_not_null(cls, v: Any) -> Any:
"""Reject explicit `null` for `device_keys` while still allowing
the field to be omitted (in which case the default `None` is used).
The spec says `device_keys` may be omitted, but when present it
must be a `DeviceKeys` object not `null`.
Pydantic's experimental `Missing` sentinel would be a cleaner way
to express this, but it's not stable yet:
https://docs.pydantic.dev/latest/concepts/experimental/#missing-sentinel
"""
if v is None:
raise ValueError("device_keys must not be null")
return v
fallback_keys: Mapping[StrictStr, StrictStr | KeyObject] | None = None
"""
The public key which should be used if the device's one-time keys are
@@ -241,7 +258,7 @@ class KeyUploadServlet(RestServlet):
400, "To upload keys, you must pass device_id when authenticating"
)
if "device_keys" in body and isinstance(body["device_keys"], dict):
if "device_keys" in body:
# Validate the provided `user_id` and `device_id` fields in
# `device_keys` match that of the requesting user. We can't do
# this directly in the pydantic model as we don't have access
@@ -249,13 +266,13 @@ class KeyUploadServlet(RestServlet):
#
# TODO: We could use ValidationInfo when we switch to Pydantic v2.
# https://docs.pydantic.dev/latest/concepts/validators/#validation-info
if body["device_keys"].get("user_id") != user_id:
if body["device_keys"]["user_id"] != user_id:
raise SynapseError(
code=HTTPStatus.BAD_REQUEST,
errcode=Codes.BAD_JSON,
msg="Provided `user_id` in `device_keys` does not match that of the authenticated user",
)
if body["device_keys"].get("device_id") != device_id:
if body["device_keys"]["device_id"] != device_id:
raise SynapseError(
code=HTTPStatus.BAD_REQUEST,
errcode=Codes.BAD_JSON,
+1 -1
View File
@@ -3028,7 +3028,7 @@ class PersistEventsStore:
event.event_id,
event.internal_metadata.stream_ordering,
event.state_key,
event.user_id,
event.sender,
event.room_id,
event.membership,
non_null_str_or_none(event.content.get("displayname")),
+4 -4
View File
@@ -568,8 +568,8 @@ class ApplicationServicesHandlerSendEventsTestCase(unittest.HomeserverTestCase):
# notify us because the interesting user is joined to the room where the
# message was sent.
self.assertEqual(service, interested_appservice)
self.assertEqual(events[0]["type"], "m.room.message")
self.assertEqual(events[0]["sender"], alice)
self.assertEqual(events[0].type, "m.room.message")
self.assertEqual(events[0].sender, alice)
else:
self.send_mock.assert_not_called()
@@ -628,8 +628,8 @@ class ApplicationServicesHandlerSendEventsTestCase(unittest.HomeserverTestCase):
# Events sent from an interesting local user should also be picked up as
# interesting to the appservice.
self.assertEqual(service, interested_appservice)
self.assertEqual(events[0]["type"], "m.room.message")
self.assertEqual(events[0]["sender"], alice)
self.assertEqual(events[0].type, "m.room.message")
self.assertEqual(events[0].sender, alice)
def test_sending_read_receipt_batches_to_application_services(self) -> None:
"""Tests that a large batch of read receipts are sent correctly to
+3 -3
View File
@@ -19,7 +19,7 @@
#
#
import logging
from typing import Collection, cast
from typing import Collection
from unittest import TestCase
from unittest.mock import AsyncMock, Mock, patch
@@ -140,7 +140,7 @@ class FederationTestCase(unittest.FederatingHomeserverTestCase):
"content": {},
"room_id": room_id,
"sender": "@yetanotheruser:" + OTHER_SERVER,
"depth": cast(int, join_event["depth"]) + 1,
"depth": join_event.depth + 1,
"prev_events": [join_event.event_id],
"auth_events": [],
"origin_server_ts": self.clock.time_msec(),
@@ -192,7 +192,7 @@ class FederationTestCase(unittest.FederatingHomeserverTestCase):
"content": {},
"room_id": room_id,
"sender": "@yetanotheruser:" + OTHER_SERVER,
"depth": cast(int, join_event["depth"]) + 1,
"depth": join_event.depth + 1,
"prev_events": [join_event.event_id],
"auth_events": [],
"origin_server_ts": self.clock.time_msec(),
+6 -5
View File
@@ -160,9 +160,12 @@ class KeyUploadTestCase(unittest.HomeserverTestCase):
channel.result,
)
def test_upload_keys_succeeds_when_fields_are_explicitly_set_to_null(self) -> None:
def test_upload_keys_rejects_device_keys_set_to_null(self) -> None:
"""
This is a regression test for https://github.com/element-hq/synapse/pull/19023.
Test that sending `device_keys: null` is rejected per spec.
The spec says `device_keys` may be omitted, but not set to `null`.
See https://github.com/element-hq/synapse/issues/19030.
"""
device_id = "DEVICE_ID"
self.register_user("alice", "wonderland")
@@ -173,12 +176,10 @@ class KeyUploadTestCase(unittest.HomeserverTestCase):
"/_matrix/client/v3/keys/upload",
{
"device_keys": None,
"one_time_keys": None,
"fallback_keys": None,
},
alice_token,
)
self.assertEqual(channel.code, HTTPStatus.OK, channel.result)
self.assertEqual(channel.code, HTTPStatus.BAD_REQUEST, channel.result)
class KeyQueryTestCase(unittest.HomeserverTestCase):
+6 -6
View File
@@ -152,7 +152,7 @@ class RedactionTestCase(unittest.HomeserverTestCase):
self.assertObjectHasAttributes(
{
"type": EventTypes.Message,
"user_id": self.u_alice.to_string(),
"sender": self.u_alice.to_string(),
"content": {"body": "t", "msgtype": "message"},
},
event,
@@ -173,7 +173,7 @@ class RedactionTestCase(unittest.HomeserverTestCase):
self.assertObjectHasAttributes(
{
"type": EventTypes.Message,
"user_id": self.u_alice.to_string(),
"sender": self.u_alice.to_string(),
"content": {},
},
event,
@@ -191,7 +191,7 @@ class RedactionTestCase(unittest.HomeserverTestCase):
self.assertObjectHasAttributes(
{
"type": EventTypes.Member,
"user_id": self.u_bob.to_string(),
"sender": self.u_bob.to_string(),
"content": {"membership": Membership.JOIN, "blue": "red"},
},
event,
@@ -212,7 +212,7 @@ class RedactionTestCase(unittest.HomeserverTestCase):
self.assertObjectHasAttributes(
{
"type": EventTypes.Member,
"user_id": self.u_bob.to_string(),
"sender": self.u_bob.to_string(),
"content": {"membership": Membership.JOIN},
},
event,
@@ -328,7 +328,7 @@ class RedactionTestCase(unittest.HomeserverTestCase):
self.assertObjectHasAttributes(
{
"type": EventTypes.Message,
"user_id": self.u_alice.to_string(),
"sender": self.u_alice.to_string(),
"content": {"body": "t", "msgtype": "message"},
},
event,
@@ -347,7 +347,7 @@ class RedactionTestCase(unittest.HomeserverTestCase):
self.assertObjectHasAttributes(
{
"type": EventTypes.Message,
"user_id": self.u_alice.to_string(),
"sender": self.u_alice.to_string(),
"content": {},
},
event,