mirror of
https://forgejo.ellis.link/continuwuation/continuwuity/
synced 2026-04-01 20:26:18 +00:00
Compare commits
107 Commits
jade/mirro
...
nex/fix/va
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4f198fb4ef | ||
|
|
1631c0afa4 | ||
|
|
862684af28 | ||
|
|
7345c241a9 | ||
|
|
6a8b988b36 | ||
|
|
f1d6536793 | ||
|
|
cf8d8e4ea6 | ||
|
|
393d341f07 | ||
|
|
ba55dffa0e | ||
|
|
f3115e14ab | ||
|
|
b3fa4705ef | ||
|
|
53b06a7918 | ||
|
|
fafc1d3fd1 | ||
|
|
dbc74272c3 | ||
|
|
f11caac05e | ||
|
|
e581face44 | ||
|
|
037ba41adb | ||
|
|
941c8f7d52 | ||
|
|
7dae118af9 | ||
|
|
07dfc5528d | ||
|
|
3f4749a796 | ||
|
|
be8d72fafc | ||
|
|
0008709481 | ||
|
|
ee51d4357f | ||
|
|
8ffc6d4f15 | ||
|
|
93efe89a1f | ||
|
|
16f37d21ff | ||
|
|
800ac8d1f1 | ||
|
|
872f5bf077 | ||
|
|
992217d644 | ||
|
|
4fb4397a9f | ||
|
|
61b6947e88 | ||
|
|
876d3faec4 | ||
|
|
9cc0cc69f7 | ||
|
|
5513bb4dff | ||
|
|
693e327004 | ||
|
|
3e6571a2b8 | ||
|
|
f0f10f8f3e | ||
|
|
a4f2b55a8a | ||
|
|
213a361c53 | ||
|
|
1c21e4af6e | ||
|
|
fceaaedc04 | ||
|
|
0eff173c0b | ||
|
|
72bf8e5927 | ||
|
|
3491f653a5 | ||
|
|
e820dd7aed | ||
|
|
c92b7239a8 | ||
|
|
2940bc69c1 | ||
|
|
502919b248 | ||
|
|
33c3d23d60 | ||
|
|
ce318fe455 | ||
|
|
a729e1d63d | ||
|
|
956c3dfa62 | ||
|
|
49e8f06559 | ||
|
|
c0f4424cb9 | ||
|
|
3eac985c5e | ||
|
|
5fd341096d | ||
|
|
a1b2d6ec46 | ||
|
|
551563ce83 | ||
|
|
9f133cf75b | ||
|
|
23c398dc1e | ||
|
|
fa73893179 | ||
|
|
57fec44ec7 | ||
|
|
bc8d304dbf | ||
|
|
7f4248a8c6 | ||
|
|
430200b60e | ||
|
|
a573f1f502 | ||
|
|
3bf3c24d22 | ||
|
|
c1dc336c65 | ||
|
|
be3c6ebb58 | ||
|
|
bdf31fa92e | ||
|
|
1372f74812 | ||
|
|
5935d99af0 | ||
|
|
6b11a65545 | ||
|
|
5ea1206739 | ||
|
|
d45c5a9f47 | ||
|
|
9a1039b215 | ||
|
|
59f9b8bdb8 | ||
|
|
3b76e9876d | ||
|
|
64c059f82e | ||
|
|
76b90acea6 | ||
|
|
5e0334088a | ||
|
|
eecc472258 | ||
|
|
4a9bea5764 | ||
|
|
08fd87c7de | ||
|
|
ac6d639660 | ||
|
|
0958660eb5 | ||
|
|
57c3290f02 | ||
|
|
6794ea565f | ||
|
|
38080275d4 | ||
|
|
1138218878 | ||
|
|
c0f1d8eab6 | ||
|
|
192f78887a | ||
|
|
def8816c02 | ||
|
|
9e73146b19 | ||
|
|
19d792e4eb | ||
|
|
2a977f019f | ||
|
|
76ea4dfa29 | ||
|
|
2ec771c84d | ||
|
|
9375e81974 | ||
|
|
f22f35d27b | ||
|
|
d5c7d80709 | ||
|
|
1899d8bb00 | ||
|
|
9a5ba6171f | ||
|
|
da3efa05b5 | ||
|
|
b53ba2eef4 | ||
|
|
33019c4529 |
@@ -17,9 +17,9 @@ inputs:
|
||||
required: false
|
||||
default: ''
|
||||
rust-version:
|
||||
description: 'Rust version to install (e.g. nightly). Defaults to 1.87.0'
|
||||
description: 'Rust version to install (e.g. nightly). Defaults to the version specified in rust-toolchain.toml'
|
||||
required: false
|
||||
default: '1.87.0'
|
||||
default: ''
|
||||
sccache-cache-limit:
|
||||
description: 'Maximum size limit for sccache local cache (e.g. 2G, 500M)'
|
||||
required: false
|
||||
@@ -59,9 +59,20 @@ runs:
|
||||
mkdir -p "${{ github.workspace }}/target"
|
||||
mkdir -p "${{ github.workspace }}/.rustup"
|
||||
|
||||
- name: Start cache restore group
|
||||
- name: Start registry/toolchain restore group
|
||||
shell: bash
|
||||
run: echo "::group::📦 Restoring caches (registry, toolchain, build artifacts)"
|
||||
run: echo "::group::📦 Restoring registry and toolchain caches"
|
||||
|
||||
- name: Cache toolchain binaries
|
||||
id: toolchain-cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
.cargo/bin
|
||||
.rustup/toolchains
|
||||
.rustup/update-hashes
|
||||
# Shared toolchain cache across all Rust versions
|
||||
key: continuwuity-toolchain-${{ steps.runner-os.outputs.slug }}-${{ steps.runner-os.outputs.arch }}
|
||||
|
||||
- name: Cache Cargo registry and git
|
||||
id: registry-cache
|
||||
@@ -77,58 +88,13 @@ runs:
|
||||
restore-keys: |
|
||||
continuwuity-cargo-registry-${{ steps.runner-os.outputs.slug }}-${{ steps.runner-os.outputs.arch }}-
|
||||
|
||||
- name: Cache toolchain binaries
|
||||
id: toolchain-cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
.cargo/bin
|
||||
.rustup/toolchains
|
||||
.rustup/update-hashes
|
||||
# Shared toolchain cache across all Rust versions
|
||||
key: continuwuity-toolchain-${{ steps.runner-os.outputs.slug }}-${{ steps.runner-os.outputs.arch }}
|
||||
|
||||
|
||||
- name: Setup sccache
|
||||
uses: https://git.tomfos.tr/tom/sccache-action@v1
|
||||
|
||||
- name: Cache dependencies
|
||||
id: deps-cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
target/**/.fingerprint
|
||||
target/**/deps
|
||||
target/**/*.d
|
||||
target/**/.cargo-lock
|
||||
target/**/CACHEDIR.TAG
|
||||
target/**/.rustc_info.json
|
||||
/timelord/
|
||||
# Dependencies cache - based on Cargo.lock, survives source code changes
|
||||
key: >-
|
||||
continuwuity-deps-${{ steps.runner-os.outputs.slug }}-${{ steps.runner-os.outputs.arch }}-${{ inputs.rust-version }}${{ inputs.cache-key-suffix && format('-{0}', inputs.cache-key-suffix) || '' }}-${{ hashFiles('rust-toolchain.toml', '**/Cargo.lock') }}
|
||||
restore-keys: |
|
||||
continuwuity-deps-${{ steps.runner-os.outputs.slug }}-${{ steps.runner-os.outputs.arch }}-${{ inputs.rust-version }}${{ inputs.cache-key-suffix && format('-{0}', inputs.cache-key-suffix) || '' }}-
|
||||
|
||||
- name: Cache incremental compilation
|
||||
id: incremental-cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
target/**/incremental
|
||||
# Incremental cache - based on source code changes
|
||||
key: >-
|
||||
continuwuity-incremental-${{ steps.runner-os.outputs.slug }}-${{ steps.runner-os.outputs.arch }}-${{ inputs.rust-version }}${{ inputs.cache-key-suffix && format('-{0}', inputs.cache-key-suffix) || '' }}-${{ hashFiles('rust-toolchain.toml', '**/Cargo.lock') }}-${{ hashFiles('**/*.rs', '**/Cargo.toml') }}
|
||||
restore-keys: |
|
||||
continuwuity-incremental-${{ steps.runner-os.outputs.slug }}-${{ steps.runner-os.outputs.arch }}-${{ inputs.rust-version }}${{ inputs.cache-key-suffix && format('-{0}', inputs.cache-key-suffix) || '' }}-${{ hashFiles('rust-toolchain.toml', '**/Cargo.lock') }}-
|
||||
continuwuity-incremental-${{ steps.runner-os.outputs.slug }}-${{ steps.runner-os.outputs.arch }}-${{ inputs.rust-version }}${{ inputs.cache-key-suffix && format('-{0}', inputs.cache-key-suffix) || '' }}-
|
||||
|
||||
- name: End cache restore group
|
||||
- name: End registry/toolchain restore group
|
||||
shell: bash
|
||||
run: echo "::endgroup::"
|
||||
|
||||
- name: Setup Rust toolchain
|
||||
shell: bash
|
||||
id: rust-setup
|
||||
run: |
|
||||
# Install rustup if not already cached
|
||||
if ! command -v rustup &> /dev/null; then
|
||||
@@ -156,8 +122,68 @@ runs:
|
||||
echo "::group::📦 Setting up Rust from rust-toolchain.toml"
|
||||
rustup show
|
||||
fi
|
||||
|
||||
RUST_VERSION=$(rustc --version | cut -d' ' -f2)
|
||||
echo "version=$RUST_VERSION" >> $GITHUB_OUTPUT
|
||||
|
||||
echo "::endgroup::"
|
||||
|
||||
- name: Install Rust components
|
||||
if: inputs.rust-components != ''
|
||||
shell: bash
|
||||
run: |
|
||||
echo "📦 Installing components: ${{ inputs.rust-components }}"
|
||||
rustup component add ${{ inputs.rust-components }}
|
||||
|
||||
- name: Install Rust target
|
||||
if: inputs.rust-target != ''
|
||||
shell: bash
|
||||
run: |
|
||||
echo "📦 Installing target: ${{ inputs.rust-target }}"
|
||||
rustup target add ${{ inputs.rust-target }}
|
||||
|
||||
- name: Start build cache restore group
|
||||
shell: bash
|
||||
run: echo "::group::📦 Restoring build cache"
|
||||
|
||||
- name: Setup sccache
|
||||
uses: https://git.tomfos.tr/tom/sccache-action@v1
|
||||
|
||||
- name: Cache dependencies
|
||||
id: deps-cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
target/**/.fingerprint
|
||||
target/**/deps
|
||||
target/**/*.d
|
||||
target/**/.cargo-lock
|
||||
target/**/CACHEDIR.TAG
|
||||
target/**/.rustc_info.json
|
||||
/timelord/
|
||||
# Dependencies cache - based on Cargo.lock, survives source code changes
|
||||
key: >-
|
||||
continuwuity-deps-${{ steps.runner-os.outputs.slug }}-${{ steps.runner-os.outputs.arch }}-${{ steps.rust-setup.outputs.version }}${{ inputs.cache-key-suffix && format('-{0}', inputs.cache-key-suffix) || '' }}-${{ hashFiles('rust-toolchain.toml', '**/Cargo.lock') }}
|
||||
restore-keys: |
|
||||
continuwuity-deps-${{ steps.runner-os.outputs.slug }}-${{ steps.runner-os.outputs.arch }}-${{ steps.rust-setup.outputs.version }}${{ inputs.cache-key-suffix && format('-{0}', inputs.cache-key-suffix) || '' }}-
|
||||
|
||||
- name: Cache incremental compilation
|
||||
id: incremental-cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
target/**/incremental
|
||||
# Incremental cache - based on source code changes
|
||||
key: >-
|
||||
continuwuity-incremental-${{ steps.runner-os.outputs.slug }}-${{ steps.runner-os.outputs.arch }}-${{ steps.rust-setup.outputs.version }}${{ inputs.cache-key-suffix && format('-{0}', inputs.cache-key-suffix) || '' }}-${{ hashFiles('rust-toolchain.toml', '**/Cargo.lock') }}-${{ hashFiles('**/*.rs', '**/Cargo.toml') }}
|
||||
restore-keys: |
|
||||
continuwuity-incremental-${{ steps.runner-os.outputs.slug }}-${{ steps.runner-os.outputs.arch }}-${{ steps.rust-setup.outputs.version }}${{ inputs.cache-key-suffix && format('-{0}', inputs.cache-key-suffix) || '' }}-${{ hashFiles('rust-toolchain.toml', '**/Cargo.lock') }}-
|
||||
continuwuity-incremental-${{ steps.runner-os.outputs.slug }}-${{ steps.runner-os.outputs.arch }}-${{ steps.rust-setup.outputs.version }}${{ inputs.cache-key-suffix && format('-{0}', inputs.cache-key-suffix) || '' }}-
|
||||
|
||||
- name: End build cache restore group
|
||||
shell: bash
|
||||
run: echo "::endgroup::"
|
||||
|
||||
- name: Configure PATH and install tools
|
||||
shell: bash
|
||||
env:
|
||||
@@ -211,27 +237,9 @@ runs:
|
||||
echo "CARGO_INCREMENTAL_GC_THRESHOLD=5" >> $GITHUB_ENV
|
||||
fi
|
||||
|
||||
- name: Install Rust components
|
||||
if: inputs.rust-components != ''
|
||||
shell: bash
|
||||
run: |
|
||||
echo "📦 Installing components: ${{ inputs.rust-components }}"
|
||||
rustup component add ${{ inputs.rust-components }}
|
||||
|
||||
- name: Install Rust target
|
||||
if: inputs.rust-target != ''
|
||||
shell: bash
|
||||
run: |
|
||||
echo "📦 Installing target: ${{ inputs.rust-target }}"
|
||||
rustup target add ${{ inputs.rust-target }}
|
||||
|
||||
- name: Output version and summary
|
||||
id: rust-setup
|
||||
shell: bash
|
||||
run: |
|
||||
RUST_VERSION=$(rustc --version | cut -d' ' -f2)
|
||||
echo "version=$RUST_VERSION" >> $GITHUB_OUTPUT
|
||||
|
||||
echo "📋 Setup complete:"
|
||||
echo " Rust: $(rustc --version)"
|
||||
echo " Cargo: $(cargo --version)"
|
||||
|
||||
@@ -54,7 +54,7 @@ runs:
|
||||
run: mv /tmp/binaries/sbin/conduwuit /tmp/binaries/conduwuit${{ inputs.cpu_suffix }}-${{ inputs.slug }}${{ inputs.artifact_suffix }}
|
||||
|
||||
- name: Upload binary artifact
|
||||
uses: forgejo/upload-artifact@v4
|
||||
uses: forgejo/upload-artifact@v3
|
||||
with:
|
||||
name: conduwuit${{ inputs.cpu_suffix }}-${{ inputs.slug }}${{ inputs.artifact_suffix }}
|
||||
path: /tmp/binaries/conduwuit${{ inputs.cpu_suffix }}-${{ inputs.slug }}${{ inputs.artifact_suffix }}
|
||||
@@ -62,7 +62,7 @@ runs:
|
||||
|
||||
- name: Upload digest
|
||||
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
|
||||
uses: forgejo/upload-artifact@v4
|
||||
uses: forgejo/upload-artifact@v3
|
||||
with:
|
||||
name: digests${{ inputs.digest_suffix }}-${{ inputs.slug }}${{ inputs.cpu_suffix }}
|
||||
path: /tmp/digests/*
|
||||
|
||||
@@ -46,6 +46,9 @@ creds:
|
||||
- registry: ghcr.io
|
||||
user: "{{env \"GH_PACKAGES_USER\"}}"
|
||||
pass: "{{env \"GH_PACKAGES_TOKEN\"}}"
|
||||
- registry: docker.io
|
||||
user: "{{env \"DOCKER_MIRROR_USER\"}}"
|
||||
pass: "{{env \"DOCKER_MIRROR_TOKEN\"}}"
|
||||
|
||||
# Global defaults
|
||||
defaults:
|
||||
@@ -67,3 +70,7 @@ sync:
|
||||
target: ghcr.io/continuwuity/continuwuity
|
||||
type: repository
|
||||
<<: *tags-main
|
||||
- source: *source
|
||||
target: docker.io/jadedblueeyes/continuwuity
|
||||
type: repository
|
||||
<<: *tags-main
|
||||
|
||||
@@ -32,12 +32,12 @@ jobs:
|
||||
echo "Debian distribution: $DISTRIBUTION ($VERSION)"
|
||||
|
||||
- name: Checkout repository with full history
|
||||
uses: https://code.forgejo.org/actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Cache Cargo registry
|
||||
uses: https://code.forgejo.org/actions/cache@v4
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/registry
|
||||
@@ -126,7 +126,7 @@ jobs:
|
||||
[ -f /etc/conduwuit/conduwuit.toml ] && echo "✅ Config file installed"
|
||||
|
||||
- name: Upload deb artifact
|
||||
uses: https://code.forgejo.org/actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: continuwuity-${{ steps.debian-version.outputs.distribution }}
|
||||
path: ${{ steps.cargo-deb.outputs.path }}
|
||||
|
||||
@@ -30,13 +30,13 @@ jobs:
|
||||
echo "Fedora version: $VERSION"
|
||||
|
||||
- name: Checkout repository with full history
|
||||
uses: https://code.forgejo.org/actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
|
||||
- name: Cache DNF packages
|
||||
uses: https://code.forgejo.org/actions/cache@v4
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
/var/cache/dnf
|
||||
@@ -46,7 +46,7 @@ jobs:
|
||||
dnf-fedora${{ steps.fedora.outputs.version }}-
|
||||
|
||||
- name: Cache Cargo registry
|
||||
uses: https://code.forgejo.org/actions/cache@v4
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/registry
|
||||
@@ -56,7 +56,7 @@ jobs:
|
||||
cargo-fedora${{ steps.fedora.outputs.version }}-
|
||||
|
||||
- name: Cache Rust build dependencies
|
||||
uses: https://code.forgejo.org/actions/cache@v4
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/rpmbuild/BUILD/*/target/release/deps
|
||||
@@ -238,13 +238,13 @@ jobs:
|
||||
cp $BIN_RPM upload-bin/
|
||||
|
||||
- name: Upload binary RPM
|
||||
uses: https://code.forgejo.org/actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: continuwuity
|
||||
path: upload-bin/
|
||||
|
||||
- name: Upload debug RPM artifact
|
||||
uses: https://code.forgejo.org/actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: continuwuity-debug
|
||||
path: artifacts/*debuginfo*.rpm
|
||||
|
||||
@@ -21,34 +21,11 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Sync repository
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
persist-credentials: false
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Setup mdBook
|
||||
uses: https://github.com/peaceiris/actions-mdbook@v2
|
||||
with:
|
||||
mdbook-version: "latest"
|
||||
|
||||
- name: Build mdbook
|
||||
run: mdbook build
|
||||
|
||||
- name: Prepare static files for deployment
|
||||
run: |
|
||||
mkdir -p ./public/.well-known/matrix
|
||||
mkdir -p ./public/.well-known/continuwuity
|
||||
mkdir -p ./public/schema
|
||||
# Copy the Matrix .well-known files
|
||||
cp ./docs/static/server ./public/.well-known/matrix/server
|
||||
cp ./docs/static/client ./public/.well-known/matrix/client
|
||||
cp ./docs/static/client ./public/.well-known/matrix/support
|
||||
cp ./docs/static/announcements.json ./public/.well-known/continuwuity/announcements
|
||||
cp ./docs/static/announcements.schema.json ./public/schema/announcements.schema.json
|
||||
# Copy the custom headers file
|
||||
cp ./docs/static/_headers ./public/_headers
|
||||
echo "Copied .well-known files and _headers to ./public"
|
||||
|
||||
- name: Detect runner environment
|
||||
id: runner-env
|
||||
uses: https://git.tomfos.tr/actions/detect-versions@v1
|
||||
@@ -63,9 +40,18 @@ jobs:
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/.npm
|
||||
key: continuwuity-${{ steps.runner-env.outputs.slug }}-${{ steps.runner-env.outputs.arch }}-node-${{ steps.runner-env.outputs.node_version }}
|
||||
key: continuwuity-rspress-${{ steps.runner-env.outputs.slug }}-${{ steps.runner-env.outputs.arch }}-node-${{ steps.runner-env.outputs.node_version }}-${{ hashFiles('package-lock.json') }}
|
||||
restore-keys: |
|
||||
continuwuity-rspress-${{ steps.runner-env.outputs.slug }}-${{ steps.runner-env.outputs.arch }}-node-${{ steps.runner-env.outputs.node_version }}-
|
||||
continuwuity-rspress-${{ steps.runner-env.outputs.slug }}-${{ steps.runner-env.outputs.arch }}-node-
|
||||
|
||||
- name: Install dependencies
|
||||
run: npm ci
|
||||
|
||||
- name: Build Rspress documentation
|
||||
run: npm run docs:build
|
||||
|
||||
- name: Install Wrangler
|
||||
run: npm install --save-dev wrangler@latest
|
||||
|
||||
- name: Deploy to Cloudflare Pages (Production)
|
||||
@@ -74,7 +60,7 @@ jobs:
|
||||
with:
|
||||
accountId: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }}
|
||||
apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN }}
|
||||
command: pages deploy ./public --branch="main" --commit-dirty=true --project-name="${{ vars.CLOUDFLARE_PROJECT_NAME }}"
|
||||
command: pages deploy ./doc_build --branch="main" --commit-dirty=true --project-name="${{ vars.CLOUDFLARE_PROJECT_NAME }}"
|
||||
|
||||
- name: Deploy to Cloudflare Pages (Preview)
|
||||
if: github.ref != 'refs/heads/main' && vars.CLOUDFLARE_PROJECT_NAME != ''
|
||||
@@ -82,4 +68,4 @@ jobs:
|
||||
with:
|
||||
accountId: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }}
|
||||
apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN }}
|
||||
command: pages deploy ./public --branch="${{ github.head_ref || github.ref_name }}" --commit-dirty=true --project-name="${{ vars.CLOUDFLARE_PROJECT_NAME }}"
|
||||
command: pages deploy ./doc_build --branch="${{ github.head_ref || github.ref_name }}" --commit-dirty=true --project-name="${{ vars.CLOUDFLARE_PROJECT_NAME }}"
|
||||
|
||||
@@ -109,7 +109,7 @@ jobs:
|
||||
cat ./element-web/webapp/config.json
|
||||
|
||||
- name: 📤 Upload Artifact
|
||||
uses: forgejo/upload-artifact@v4
|
||||
uses: forgejo/upload-artifact@v3
|
||||
with:
|
||||
name: element-web
|
||||
path: ./element-web/webapp/
|
||||
|
||||
@@ -34,9 +34,11 @@ jobs:
|
||||
N7574_GIT_TOKEN: ${{ secrets.N7574_GIT_TOKEN }}
|
||||
GH_PACKAGES_USER: ${{ vars.GH_PACKAGES_USER }}
|
||||
GH_PACKAGES_TOKEN: ${{ secrets.GH_PACKAGES_TOKEN }}
|
||||
DOCKER_MIRROR_USER: ${{ vars.DOCKER_MIRROR_USER }}
|
||||
DOCKER_MIRROR_TOKEN: ${{ secrets.DOCKER_MIRROR_TOKEN }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
@@ -47,7 +47,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
|
||||
@@ -3,15 +3,6 @@ concurrency:
|
||||
group: "release-image-${{ github.ref }}"
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
paths-ignore:
|
||||
- "*.md"
|
||||
- "**/*.md"
|
||||
- ".gitlab-ci.yml"
|
||||
- ".gitignore"
|
||||
- "renovate.json"
|
||||
- "pkg/**"
|
||||
- "docs/**"
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
@@ -52,7 +43,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
persist-credentials: false
|
||||
- name: Prepare Docker build environment
|
||||
@@ -106,7 +97,7 @@ jobs:
|
||||
needs: build-release
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
persist-credentials: false
|
||||
- name: Create multi-platform manifest
|
||||
@@ -139,7 +130,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
persist-credentials: false
|
||||
- name: Prepare max-perf Docker build environment
|
||||
@@ -193,7 +184,7 @@ jobs:
|
||||
needs: build-maxperf
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
persist-credentials: false
|
||||
- name: Create max-perf manifest
|
||||
|
||||
@@ -43,11 +43,11 @@ jobs:
|
||||
name: Renovate
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: ghcr.io/renovatebot/renovate:41.146.4@sha256:bb70194b7405faf10a6f279b60caa10403a440ba37d158c5a4ef0ae7b67a0f92
|
||||
image: ghcr.io/renovatebot/renovate:42.11.0@sha256:656c1e5b808279eac16c37b89562fb4c699e02fc7e219244f4a1fc2f0a7ce367
|
||||
options: --tmpfs /tmp:exec
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
show-progress: false
|
||||
|
||||
|
||||
@@ -14,13 +14,14 @@ jobs:
|
||||
update-flake-hashes:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: https://code.forgejo.org/actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5
|
||||
- uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
fetch-tags: false
|
||||
fetch-single-branch: true
|
||||
submodules: false
|
||||
persist-credentials: false
|
||||
persist-credentials: true
|
||||
token: ${{ secrets.FORGEJO_TOKEN }}
|
||||
|
||||
- uses: https://github.com/cachix/install-nix-action@7ab6e7fd29da88e74b1e314a4ae9ac6b5cda3801 # v31.8.0
|
||||
with:
|
||||
|
||||
9
.gitignore
vendored
9
.gitignore
vendored
@@ -79,7 +79,7 @@ test-conduit.toml
|
||||
/.gitlab-ci.d
|
||||
|
||||
# mdbook output
|
||||
public/
|
||||
/public/
|
||||
|
||||
# macOS
|
||||
.DS_Store
|
||||
@@ -95,3 +95,10 @@ rustc-ice-*
|
||||
|
||||
# complement test logs are huge
|
||||
tests/test_results/complement/test_logs.jsonl
|
||||
|
||||
# Node
|
||||
node_modules/
|
||||
|
||||
# Rspress
|
||||
doc_build/
|
||||
.rspress/
|
||||
|
||||
@@ -23,7 +23,7 @@ repos:
|
||||
- id: check-added-large-files
|
||||
|
||||
- repo: https://github.com/crate-ci/typos
|
||||
rev: v1.39.0
|
||||
rev: v1.40.0
|
||||
hooks:
|
||||
- id: typos
|
||||
- id: typos
|
||||
@@ -31,7 +31,7 @@ repos:
|
||||
stages: [commit-msg]
|
||||
|
||||
- repo: https://github.com/crate-ci/committed
|
||||
rev: v1.1.7
|
||||
rev: v1.1.8
|
||||
hooks:
|
||||
- id: committed
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
# Contributing guide
|
||||
|
||||
This page is about contributing to Continuwuity. The
|
||||
[development](./development.md) and [code style guide](./development/code_style.md) pages may be of interest for you as well.
|
||||
[development](/development/index.mdx) and [code style guide](/development/code_style.mdx) pages may be of interest for you as well.
|
||||
|
||||
If you would like to work on an [issue][issues] that is not assigned, preferably
|
||||
ask in the Matrix room first at [#continuwuity:continuwuity.org][continuwuity-matrix],
|
||||
@@ -9,7 +9,7 @@ # Contributing guide
|
||||
|
||||
### Code Style
|
||||
|
||||
Please review and follow the [code style guide](./development/code_style.md) for formatting, linting, naming conventions, and other code standards.
|
||||
Please review and follow the [code style guide](/development/code_style.mdx) for formatting, linting, naming conventions, and other code standards.
|
||||
|
||||
### Pre-commit Checks
|
||||
|
||||
@@ -150,7 +150,7 @@ ### Creating pull requests
|
||||
|
||||
Before submitting a pull request, please ensure:
|
||||
1. Your code passes all CI checks (formatting, linting, typo detection, etc.)
|
||||
2. Your code follows the [code style guide](./development/code_style.md)
|
||||
2. Your code follows the [code style guide](/development/code_style.md)
|
||||
3. Your commit messages follow the conventional commits format
|
||||
4. Tests are added for new functionality
|
||||
5. Documentation is updated if needed
|
||||
|
||||
327
Cargo.lock
generated
327
Cargo.lock
generated
@@ -17,6 +17,19 @@ version = "2.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa"
|
||||
|
||||
[[package]]
|
||||
name = "ahash"
|
||||
version = "0.8.12"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5a15f179cd60c4584b8a8c596927aadc462e27f2ca70c04e0071964a73ba7a75"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"getrandom 0.3.4",
|
||||
"once_cell",
|
||||
"version_check",
|
||||
"zerocopy",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "aho-corasick"
|
||||
version = "1.1.4"
|
||||
@@ -26,6 +39,15 @@ dependencies = [
|
||||
"memchr",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "aligned"
|
||||
version = "0.4.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "377e4c0ba83e4431b10df45c1d4666f178ea9c552cac93e60c3a88bf32785923"
|
||||
dependencies = [
|
||||
"as-slice",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "aligned-vec"
|
||||
version = "0.6.4"
|
||||
@@ -156,6 +178,15 @@ dependencies = [
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "as-slice"
|
||||
version = "0.2.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "516b6b4f0e40d50dcda9365d53964ec74560ad4284da2e7fc97122cd83174516"
|
||||
dependencies = [
|
||||
"stable_deref_trait",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "as_variant"
|
||||
version = "1.3.0"
|
||||
@@ -305,6 +336,26 @@ version = "1.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8"
|
||||
|
||||
[[package]]
|
||||
name = "av-scenechange"
|
||||
version = "0.14.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0f321d77c20e19b92c39e7471cf986812cbb46659d2af674adc4331ef3f18394"
|
||||
dependencies = [
|
||||
"aligned",
|
||||
"anyhow",
|
||||
"arg_enum_proc_macro",
|
||||
"arrayvec",
|
||||
"log",
|
||||
"num-rational",
|
||||
"num-traits",
|
||||
"pastey",
|
||||
"rayon",
|
||||
"thiserror 2.0.17",
|
||||
"v_frame",
|
||||
"y4m",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "av1-grain"
|
||||
version = "0.2.4"
|
||||
@@ -484,9 +535,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "axum-server"
|
||||
version = "0.7.2"
|
||||
version = "0.7.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "495c05f60d6df0093e8fb6e74aa5846a0ad06abaf96d76166283720bf740f8ab"
|
||||
checksum = "c1ab4a3ec9ea8a657c72d99a03a824af695bd0fb5ec639ccbd9cd3543b41a5f9"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"bytes",
|
||||
@@ -599,9 +650,12 @@ checksum = "2261d10cca569e4643e526d8dc2e62e433cc8aba21ab764233731f8d369bf394"
|
||||
|
||||
[[package]]
|
||||
name = "bitstream-io"
|
||||
version = "2.6.0"
|
||||
version = "4.9.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6099cdc01846bc367c4e7dd630dc5966dccf36b652fae7a74e17b640411a91b2"
|
||||
checksum = "60d4bd9d1db2c6bdf285e223a7fa369d5ce98ec767dec949c6ca62863ce61757"
|
||||
dependencies = [
|
||||
"core2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "blake2"
|
||||
@@ -651,12 +705,6 @@ dependencies = [
|
||||
"alloc-stdlib",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "built"
|
||||
version = "0.7.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "56ed6191a7e78c36abdb16ab65341eefd73d64d303fffccdbb00d51e4205967b"
|
||||
|
||||
[[package]]
|
||||
name = "built"
|
||||
version = "0.8.0"
|
||||
@@ -689,15 +737,15 @@ checksum = "8f1fe948ff07f4bd06c30984e69f5b4899c516a3ef74f34df92a2df2ab535495"
|
||||
|
||||
[[package]]
|
||||
name = "bytes"
|
||||
version = "1.10.1"
|
||||
version = "1.11.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a"
|
||||
checksum = "b35204fbdc0b3f4446b89fc1ac2cf84a8a68971995d0bf2e925ec7cd960f9cb3"
|
||||
|
||||
[[package]]
|
||||
name = "bytesize"
|
||||
version = "2.1.0"
|
||||
version = "2.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f5c434ae3cf0089ca203e9019ebe529c47ff45cefe8af7c85ecb734ef541822f"
|
||||
checksum = "c99fa31e08a43eaa5913ef68d7e01c37a2bdce6ed648168239ad33b7d30a9cd8"
|
||||
|
||||
[[package]]
|
||||
name = "bzip2-sys"
|
||||
@@ -740,16 +788,6 @@ dependencies = [
|
||||
"nom",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "cfg-expr"
|
||||
version = "0.15.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d067ad48b8650848b989a59a86c6c36a995d02d2bf778d45c3c5d57bc2718f02"
|
||||
dependencies = [
|
||||
"smallvec 1.15.1",
|
||||
"target-lexicon",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "cfg-if"
|
||||
version = "1.0.4"
|
||||
@@ -793,9 +831,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "clap"
|
||||
version = "4.5.51"
|
||||
version = "4.5.53"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4c26d721170e0295f191a69bd9a1f93efcdb0aff38684b61ab5750468972e5f5"
|
||||
checksum = "c9e340e012a1bf4935f5282ed1436d1489548e8f72308207ea5df0e23d2d03f8"
|
||||
dependencies = [
|
||||
"clap_builder",
|
||||
"clap_derive",
|
||||
@@ -812,9 +850,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "clap_builder"
|
||||
version = "4.5.51"
|
||||
version = "4.5.53"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "75835f0c7bf681bfd05abe44e965760fea999a5286c6eb2d59883634fd02011a"
|
||||
checksum = "d76b5d13eaa18c901fd2f7fca939fefe3a0727a953561fefdf3b2922b8569d00"
|
||||
dependencies = [
|
||||
"anstream",
|
||||
"anstyle",
|
||||
@@ -902,7 +940,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "conduwuit"
|
||||
version = "0.5.0-rc.8"
|
||||
version = "0.5.0-rc.8.1"
|
||||
dependencies = [
|
||||
"clap",
|
||||
"conduwuit_admin",
|
||||
@@ -934,7 +972,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "conduwuit_admin"
|
||||
version = "0.5.0-rc.8"
|
||||
version = "0.5.0-rc.8.1"
|
||||
dependencies = [
|
||||
"clap",
|
||||
"conduwuit_api",
|
||||
@@ -956,7 +994,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "conduwuit_api"
|
||||
version = "0.5.0-rc.8"
|
||||
version = "0.5.0-rc.8.1"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"axum 0.7.9",
|
||||
@@ -989,14 +1027,14 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "conduwuit_build_metadata"
|
||||
version = "0.5.0-rc.8"
|
||||
version = "0.5.0-rc.8.1"
|
||||
dependencies = [
|
||||
"built 0.8.0",
|
||||
"built",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "conduwuit_core"
|
||||
version = "0.5.0-rc.8"
|
||||
version = "0.5.0-rc.8.1"
|
||||
dependencies = [
|
||||
"argon2",
|
||||
"arrayvec",
|
||||
@@ -1057,7 +1095,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "conduwuit_database"
|
||||
version = "0.5.0-rc.8"
|
||||
version = "0.5.0-rc.8.1"
|
||||
dependencies = [
|
||||
"async-channel",
|
||||
"conduwuit_core",
|
||||
@@ -1076,7 +1114,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "conduwuit_macros"
|
||||
version = "0.5.0-rc.8"
|
||||
version = "0.5.0-rc.8.1"
|
||||
dependencies = [
|
||||
"itertools 0.14.0",
|
||||
"proc-macro2",
|
||||
@@ -1086,7 +1124,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "conduwuit_router"
|
||||
version = "0.5.0-rc.8"
|
||||
version = "0.5.0-rc.8.1"
|
||||
dependencies = [
|
||||
"axum 0.7.9",
|
||||
"axum-client-ip",
|
||||
@@ -1121,7 +1159,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "conduwuit_service"
|
||||
version = "0.5.0-rc.8"
|
||||
version = "0.5.0-rc.8.1"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"base64 0.22.1",
|
||||
@@ -1162,7 +1200,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "conduwuit_web"
|
||||
version = "0.5.0-rc.8"
|
||||
version = "0.5.0-rc.8.1"
|
||||
dependencies = [
|
||||
"askama",
|
||||
"axum 0.7.9",
|
||||
@@ -1269,6 +1307,15 @@ version = "0.8.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b"
|
||||
|
||||
[[package]]
|
||||
name = "core2"
|
||||
version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b49ba7ef1ad6107f8824dbe97de947cbaac53c44e7f9756a1fba0d37c1eec505"
|
||||
dependencies = [
|
||||
"memchr",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "core_affinity"
|
||||
version = "0.8.1"
|
||||
@@ -1431,9 +1478,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "ctor"
|
||||
version = "0.5.0"
|
||||
version = "0.6.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "67773048316103656a637612c4a62477603b777d91d9c62ff2290f9cde178fdb"
|
||||
checksum = "3ffc71fcdcdb40d6f087edddf7f8f1f8f79e6cf922f555a9ee8779752d4819bd"
|
||||
dependencies = [
|
||||
"ctor-proc-macro",
|
||||
"dtor",
|
||||
@@ -1441,9 +1488,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "ctor-proc-macro"
|
||||
version = "0.0.6"
|
||||
version = "0.0.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e2931af7e13dc045d8e9d26afccc6fa115d64e115c9c84b1166288b46f6782c2"
|
||||
checksum = "52560adf09603e58c9a7ee1fe1dcb95a16927b17c127f0ac02d6e768a0e25bc1"
|
||||
|
||||
[[package]]
|
||||
name = "curve25519-dalek"
|
||||
@@ -1640,6 +1687,24 @@ dependencies = [
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "encoding_rs"
|
||||
version = "0.8.35"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "75030f3c4f45dafd7586dd6780965a8c7e8e285a5ecb86713e63a79c5b2766f3"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "encoding_rs_io"
|
||||
version = "0.1.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1cc3c5651fb62ab8aa3103998dade57efdd028544bd300516baa31840c252a83"
|
||||
dependencies = [
|
||||
"encoding_rs",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "enum-as-inner"
|
||||
version = "0.6.1"
|
||||
@@ -1710,9 +1775,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "exr"
|
||||
version = "1.73.0"
|
||||
version = "1.74.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f83197f59927b46c04a183a619b7c29df34e63e63c7869320862268c0ef687e0"
|
||||
checksum = "4300e043a56aa2cb633c01af81ca8f699a321879a7854d3896a0ba89056363be"
|
||||
dependencies = [
|
||||
"bit_field",
|
||||
"half",
|
||||
@@ -1991,9 +2056,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "gif"
|
||||
version = "0.13.3"
|
||||
version = "0.14.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4ae047235e33e2829703574b54fdec96bfbad892062d97fed2f76022287de61b"
|
||||
checksum = "f954a9e9159ec994f73a30a12b96a702dde78f5547bcb561174597924f7d4162"
|
||||
dependencies = [
|
||||
"color_quant",
|
||||
"weezl",
|
||||
@@ -2455,9 +2520,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "image"
|
||||
version = "0.25.8"
|
||||
version = "0.25.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "529feb3e6769d234375c4cf1ee2ce713682b8e76538cb13f9fc23e1400a591e7"
|
||||
checksum = "e6506c6c10786659413faa717ceebcb8f70731c0a60cbae39795fdf114519c1a"
|
||||
dependencies = [
|
||||
"bytemuck",
|
||||
"byteorder-lite",
|
||||
@@ -2473,8 +2538,8 @@ dependencies = [
|
||||
"rayon",
|
||||
"rgb",
|
||||
"tiff",
|
||||
"zune-core",
|
||||
"zune-jpeg",
|
||||
"zune-core 0.5.0",
|
||||
"zune-jpeg 0.5.5",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2925,29 +2990,15 @@ checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a"
|
||||
|
||||
[[package]]
|
||||
name = "minicbor"
|
||||
version = "2.1.1"
|
||||
version = "2.1.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4f182275033b808ede9427884caa8e05fa7db930801759524ca7925bd8aa7a82"
|
||||
dependencies = [
|
||||
"minicbor-derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "minicbor-derive"
|
||||
version = "0.18.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b17290c95158a760027059fe3f511970d6857e47ff5008f9e09bffe3d3e1c6af"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
]
|
||||
checksum = "f9a1119e42fbacc2bb65d860de6eb7c930562bc71d42dca026d06b0228231f77"
|
||||
|
||||
[[package]]
|
||||
name = "minicbor-serde"
|
||||
version = "0.6.1"
|
||||
version = "0.6.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "546cc904f35809921fa57016a84c97e68d9d27c012e87b9dadc28c233705f783"
|
||||
checksum = "80047f75e28e3b38f6ab2ec3c2c7669f6b411fa6f8424e1a90a3fd784b19a3f4"
|
||||
dependencies = [
|
||||
"minicbor",
|
||||
"serde",
|
||||
@@ -2955,9 +3006,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "minimad"
|
||||
version = "0.13.1"
|
||||
version = "0.14.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a9c5d708226d186590a7b6d4a9780e2bdda5f689e0d58cd17012a298efd745d2"
|
||||
checksum = "df8b688969b16915f3ecadc7829d5b7779dee4977e503f767f34136803d5c06f"
|
||||
dependencies = [
|
||||
"once_cell",
|
||||
]
|
||||
@@ -3070,7 +3121,7 @@ version = "0.50.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7957b9740744892f114936ab4a57b3f487491bbeafaf8083688b16841a4240e5"
|
||||
dependencies = [
|
||||
"windows-sys 0.61.2",
|
||||
"windows-sys 0.60.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -3347,6 +3398,12 @@ version = "1.0.15"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a"
|
||||
|
||||
[[package]]
|
||||
name = "pastey"
|
||||
version = "0.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "35fb2e5f958ec131621fdd531e9fc186ed768cbe395337403ae56c17a74c68ec"
|
||||
|
||||
[[package]]
|
||||
name = "pear"
|
||||
version = "0.2.9"
|
||||
@@ -3611,7 +3668,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9120690fafc389a67ba3803df527d0ec9cbbc9cc45e4cc20b332996dfb672425"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"itertools 0.12.1",
|
||||
"itertools 0.14.0",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
@@ -3808,19 +3865,21 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "rav1e"
|
||||
version = "0.7.1"
|
||||
version = "0.8.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cd87ce80a7665b1cce111f8a16c1f3929f6547ce91ade6addf4ec86a8dda5ce9"
|
||||
checksum = "43b6dd56e85d9483277cde964fd1bdb0428de4fec5ebba7540995639a21cb32b"
|
||||
dependencies = [
|
||||
"aligned-vec",
|
||||
"arbitrary",
|
||||
"arg_enum_proc_macro",
|
||||
"arrayvec",
|
||||
"av-scenechange",
|
||||
"av1-grain",
|
||||
"bitstream-io",
|
||||
"built 0.7.7",
|
||||
"built",
|
||||
"cfg-if",
|
||||
"interpolate_name",
|
||||
"itertools 0.12.1",
|
||||
"itertools 0.14.0",
|
||||
"libc",
|
||||
"libfuzzer-sys",
|
||||
"log",
|
||||
@@ -3829,23 +3888,21 @@ dependencies = [
|
||||
"noop_proc_macro",
|
||||
"num-derive",
|
||||
"num-traits",
|
||||
"once_cell",
|
||||
"paste",
|
||||
"profiling",
|
||||
"rand 0.8.5",
|
||||
"rand_chacha 0.3.1",
|
||||
"rand 0.9.2",
|
||||
"rand_chacha 0.9.0",
|
||||
"simd_helpers",
|
||||
"system-deps",
|
||||
"thiserror 1.0.69",
|
||||
"thiserror 2.0.17",
|
||||
"v_frame",
|
||||
"wasm-bindgen",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ravif"
|
||||
version = "0.11.20"
|
||||
version = "0.12.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5825c26fddd16ab9f515930d49028a630efec172e903483c94796cfe31893e6b"
|
||||
checksum = "ef69c1990ceef18a116855938e74793a5f7496ee907562bd0857b6ac734ab285"
|
||||
dependencies = [
|
||||
"avif-serialize",
|
||||
"imgref",
|
||||
@@ -3973,9 +4030,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "resolv-conf"
|
||||
version = "0.7.5"
|
||||
version = "0.7.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6b3789b30bd25ba102de4beabd95d21ac45b69b1be7d14522bab988c526d6799"
|
||||
checksum = "1e061d1b48cb8d38042de4ae0a7a6401009d6143dc80d2e2d6f31f0bdd6470c7"
|
||||
|
||||
[[package]]
|
||||
name = "rgb"
|
||||
@@ -4572,18 +4629,20 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "serde-saphyr"
|
||||
version = "0.0.7"
|
||||
version = "0.0.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fd76af9505b2498740576f95f60b3b4e2c469b5b677a8d2dd1d2da18b58193de"
|
||||
checksum = "9b9e06cddad47cc6214c0c456cf209b99a58b54223e7af2f6d4b88a5a9968499"
|
||||
dependencies = [
|
||||
"ahash",
|
||||
"base64 0.22.1",
|
||||
"encoding_rs_io",
|
||||
"nohash-hasher",
|
||||
"num-traits",
|
||||
"ryu",
|
||||
"saphyr-parser",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"smallvec 2.0.0-alpha.11",
|
||||
"smallvec 2.0.0-alpha.12",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -4807,9 +4866,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "smallvec"
|
||||
version = "2.0.0-alpha.11"
|
||||
version = "2.0.0-alpha.12"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "87b96efa4bd6bdd2ff0c6615cc36fc4970cbae63cfd46ddff5cee35a1b4df570"
|
||||
checksum = "ef784004ca8777809dcdad6ac37629f0a97caee4c685fcea805278d81dd8b857"
|
||||
|
||||
[[package]]
|
||||
name = "socket2"
|
||||
@@ -4901,9 +4960,9 @@ checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292"
|
||||
|
||||
[[package]]
|
||||
name = "syn"
|
||||
version = "2.0.109"
|
||||
version = "2.0.111"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2f17c7e013e88258aa9543dcbe81aca68a667a9ac37cd69c9fbc07858bfe0e2f"
|
||||
checksum = "390cc9a294ab71bdb1aa2e99d13be9c753cd2d7bd6560c77118597410c4d2e87"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@@ -4930,31 +4989,12 @@ dependencies = [
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "system-deps"
|
||||
version = "6.2.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a3e535eb8dded36d55ec13eddacd30dec501792ff23a0b1682c38601b8cf2349"
|
||||
dependencies = [
|
||||
"cfg-expr",
|
||||
"heck",
|
||||
"pkg-config",
|
||||
"toml 0.8.23",
|
||||
"version-compare",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tagptr"
|
||||
version = "0.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7b2093cf4c8eb1e67749a6762251bc9cd836b6fc171623bd0a9d324d37af2417"
|
||||
|
||||
[[package]]
|
||||
name = "target-lexicon"
|
||||
version = "0.12.16"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "61c41af27dd6d1e27b1b16b489db798443478cef1f06a660c96db617ba5de3b1"
|
||||
|
||||
[[package]]
|
||||
name = "tendril"
|
||||
version = "0.4.3"
|
||||
@@ -4968,9 +5008,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "termimad"
|
||||
version = "0.34.0"
|
||||
version = "0.34.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "68ff5ca043d65d4ea43b65cdb4e3aba119657d0d12caf44f93212ec3168a8e20"
|
||||
checksum = "889a9370996b74cf46016ce35b96c248a9ac36d69aab1d112b3e09bc33affa49"
|
||||
dependencies = [
|
||||
"coolor",
|
||||
"crokey",
|
||||
@@ -5052,7 +5092,7 @@ dependencies = [
|
||||
"half",
|
||||
"quick-error",
|
||||
"weezl",
|
||||
"zune-jpeg",
|
||||
"zune-jpeg 0.4.21",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -5366,9 +5406,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tower-http"
|
||||
version = "0.6.6"
|
||||
version = "0.6.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "adc82fd73de2a9722ac5da747f12383d2bfdb93591ee6c58486e0097890f05f2"
|
||||
checksum = "9cf146f99d442e8e68e585f5d798ccd3cad9a7835b917e09728880a862706456"
|
||||
dependencies = [
|
||||
"async-compression",
|
||||
"bitflags",
|
||||
@@ -5402,9 +5442,9 @@ checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3"
|
||||
|
||||
[[package]]
|
||||
name = "tracing"
|
||||
version = "0.1.41"
|
||||
version = "0.1.43"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0"
|
||||
checksum = "2d15d90a0b5c19378952d479dc858407149d7bb45a14de0142f6c534b16fc647"
|
||||
dependencies = [
|
||||
"pin-project-lite",
|
||||
"tracing-attributes",
|
||||
@@ -5413,9 +5453,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tracing-attributes"
|
||||
version = "0.1.30"
|
||||
version = "0.1.31"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "81383ab64e72a7a8b8e13130c49e3dab29def6d0c7d76a03087b3cf71c5c6903"
|
||||
checksum = "7490cfa5ec963746568740651ac6781f701c9c5ea257c58e057f3ba8cf69e8da"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@@ -5424,9 +5464,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tracing-core"
|
||||
version = "0.1.34"
|
||||
version = "0.1.35"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b9d12581f227e93f094d3af2ae690a574abb8a2b9b7a96e7cfe9647b2b617678"
|
||||
checksum = "7a04e24fab5c89c6a36eb8558c9656f30d81de51dfa4d3b45f26b21d61fa0a6c"
|
||||
dependencies = [
|
||||
"once_cell",
|
||||
"valuable",
|
||||
@@ -5445,9 +5485,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tracing-journald"
|
||||
version = "0.3.1"
|
||||
version = "0.3.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fc0b4143302cf1022dac868d521e36e8b27691f72c84b3311750d5188ebba657"
|
||||
checksum = "2d3a81ed245bfb62592b1e2bc153e77656d94ee6a0497683a65a12ccaf2438d0"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"tracing-core",
|
||||
@@ -5486,9 +5526,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tracing-subscriber"
|
||||
version = "0.3.20"
|
||||
version = "0.3.22"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2054a14f5307d601f88daf0553e1cbf472acc4f2c51afab632431cdcd72124d5"
|
||||
checksum = "2f30143827ddab0d256fd843b7a66d164e9f271cfa0dde49142c5ca0ca291f1e"
|
||||
dependencies = [
|
||||
"matchers",
|
||||
"nu-ansi-term",
|
||||
@@ -5677,12 +5717,6 @@ version = "0.2.15"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426"
|
||||
|
||||
[[package]]
|
||||
name = "version-compare"
|
||||
version = "0.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "852e951cb7832cb45cb1169900d19760cfa39b82bc0ea9c0e5a14ae88411c98b"
|
||||
|
||||
[[package]]
|
||||
name = "version_check"
|
||||
version = "0.9.5"
|
||||
@@ -6170,7 +6204,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "xtask"
|
||||
version = "0.5.0-rc.8"
|
||||
version = "0.5.0-rc.8.1"
|
||||
dependencies = [
|
||||
"clap",
|
||||
"serde",
|
||||
@@ -6179,7 +6213,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "xtask-generate-commands"
|
||||
version = "0.5.0-rc.8"
|
||||
version = "0.5.0-rc.8.1"
|
||||
dependencies = [
|
||||
"clap-markdown",
|
||||
"clap_builder",
|
||||
@@ -6188,6 +6222,12 @@ dependencies = [
|
||||
"conduwuit_admin",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "y4m"
|
||||
version = "0.8.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7a5a4b21e1a62b67a2970e6831bc091d7b87e119e7f9791aef9702e3bef04448"
|
||||
|
||||
[[package]]
|
||||
name = "yansi"
|
||||
version = "1.0.1"
|
||||
@@ -6332,6 +6372,12 @@ version = "0.4.12"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3f423a2c17029964870cfaabb1f13dfab7d092a62a29a89264f4d36990ca414a"
|
||||
|
||||
[[package]]
|
||||
name = "zune-core"
|
||||
version = "0.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "111f7d9820f05fd715df3144e254d6fc02ee4088b0644c0ffd0efc9e6d9d2773"
|
||||
|
||||
[[package]]
|
||||
name = "zune-inflate"
|
||||
version = "0.2.54"
|
||||
@@ -6347,5 +6393,14 @@ version = "0.4.21"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "29ce2c8a9384ad323cf564b67da86e21d3cfdff87908bc1223ed5c99bc792713"
|
||||
dependencies = [
|
||||
"zune-core",
|
||||
"zune-core 0.4.12",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "zune-jpeg"
|
||||
version = "0.5.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dc6fb7703e32e9a07fb3f757360338b3a567a5054f21b5f52a666752e333d58e"
|
||||
dependencies = [
|
||||
"zune-core 0.5.0",
|
||||
]
|
||||
|
||||
24
Cargo.toml
24
Cargo.toml
@@ -21,7 +21,7 @@ license = "Apache-2.0"
|
||||
readme = "README.md"
|
||||
repository = "https://forgejo.ellis.link/continuwuation/continuwuity"
|
||||
rust-version = "1.86.0"
|
||||
version = "0.5.0-rc.8"
|
||||
version = "0.5.0-rc.8.1"
|
||||
|
||||
[workspace.metadata.crane]
|
||||
name = "conduwuit"
|
||||
@@ -48,7 +48,7 @@ features = ["ffi", "std", "union"]
|
||||
version = "0.7.0"
|
||||
|
||||
[workspace.dependencies.ctor]
|
||||
version = "0.5.0"
|
||||
version = "0.6.0"
|
||||
|
||||
[workspace.dependencies.cargo_toml]
|
||||
version = "0.22"
|
||||
@@ -167,7 +167,7 @@ features = ["raw_value"]
|
||||
|
||||
# Used for appservice registration files
|
||||
[workspace.dependencies.serde-saphyr]
|
||||
version = "0.0.7"
|
||||
version = "0.0.10"
|
||||
|
||||
# Used to load forbidden room/user regex from config
|
||||
[workspace.dependencies.serde_regex]
|
||||
@@ -670,24 +670,6 @@ panic = "abort"
|
||||
inherits = "release"
|
||||
strip = "symbols"
|
||||
lto = "fat"
|
||||
#rustflags = [
|
||||
# '-Ctarget-cpu=native',
|
||||
# '-Ztune-cpu=native',
|
||||
# '-Ctarget-feature=+crt-static',
|
||||
# '-Crelocation-model=static',
|
||||
# '-Ztls-model=local-exec',
|
||||
# '-Zinline-in-all-cgus=true',
|
||||
# '-Zinline-mir=true',
|
||||
# '-Zmir-opt-level=3',
|
||||
# '-Clink-arg=-fuse-ld=gold',
|
||||
# '-Clink-arg=-Wl,--threads',
|
||||
# '-Clink-arg=-Wl,--gc-sections',
|
||||
# '-Clink-arg=-luring',
|
||||
# '-Clink-arg=-lstdc++',
|
||||
# '-Clink-arg=-lc',
|
||||
# '-Ztime-passes',
|
||||
# '-Ztime-llvm-passes',
|
||||
#]
|
||||
|
||||
[profile.release-max-perf.build-override]
|
||||
inherits = "release-max-perf"
|
||||
|
||||
@@ -11,7 +11,7 @@ ## A community-driven [Matrix](https://matrix.org/) homeserver in Rust
|
||||
<!-- ANCHOR_END: catchphrase -->
|
||||
|
||||
[continuwuity] is a Matrix homeserver written in Rust.
|
||||
It's a community continuation of the [conduwuit](https://github.com/girlbossceo/conduwuit) homeserver.
|
||||
It's the official community continuation of the [conduwuit](https://github.com/girlbossceo/conduwuit) homeserver.
|
||||
|
||||
<!-- ANCHOR: body -->
|
||||
|
||||
|
||||
@@ -22,7 +22,7 @@ ### Responsible Disclosure
|
||||
|
||||
1. **Contact members of the team directly** over E2EE private message.
|
||||
- [@jade:ellis.link](https://matrix.to/#/@jade:ellis.link)
|
||||
- [@nex:nexy7574.co.uk](https://matrix.to/#/@nex:nexy7574.co.uk) <!-- ? -->
|
||||
- [@nex:nexy7574.co.uk](https://matrix.to/#/@nex:nexy7574.co.uk)
|
||||
2. **Email the security team** at [security@continuwuity.org](mailto:security@continuwuity.org). This is not E2EE, so don't include sensitive details.
|
||||
3. **Do not disclose the vulnerability publicly** until it has been addressed
|
||||
4. **Provide detailed information** about the vulnerability, including:
|
||||
|
||||
@@ -4,7 +4,6 @@ description = "continuwuity is a community continuation of the conduwuit Matrix
|
||||
language = "en"
|
||||
authors = ["The continuwuity Community"]
|
||||
text-direction = "ltr"
|
||||
multilingual = false
|
||||
src = "docs"
|
||||
|
||||
[build]
|
||||
|
||||
@@ -1 +1 @@
|
||||
docs/development.md
|
||||
docs/development/index.mdx
|
||||
@@ -48,7 +48,7 @@ EOF
|
||||
|
||||
# Developer tool versions
|
||||
# renovate: datasource=github-releases depName=cargo-bins/cargo-binstall
|
||||
ENV BINSTALL_VERSION=1.15.10
|
||||
ENV BINSTALL_VERSION=1.16.2
|
||||
# renovate: datasource=github-releases depName=psastras/sbom-rs
|
||||
ENV CARGO_SBOM_VERSION=0.9.1
|
||||
# renovate: datasource=crate depName=lddtree
|
||||
|
||||
@@ -18,7 +18,7 @@ RUN --mount=type=cache,target=/etc/apk/cache apk add \
|
||||
|
||||
# Developer tool versions
|
||||
# renovate: datasource=github-releases depName=cargo-bins/cargo-binstall
|
||||
ENV BINSTALL_VERSION=1.15.10
|
||||
ENV BINSTALL_VERSION=1.16.2
|
||||
# renovate: datasource=github-releases depName=psastras/sbom-rs
|
||||
ENV CARGO_SBOM_VERSION=0.9.1
|
||||
# renovate: datasource=crate depName=lddtree
|
||||
|
||||
@@ -1,26 +0,0 @@
|
||||
# Summary
|
||||
|
||||
- [Introduction](introduction.md)
|
||||
- [Configuration](configuration.md)
|
||||
- [Examples](configuration/examples.md)
|
||||
- [Deploying](deploying.md)
|
||||
- [Generic](deploying/generic.md)
|
||||
- [NixOS](deploying/nixos.md)
|
||||
- [Docker](deploying/docker.md)
|
||||
- [Kubernetes](deploying/kubernetes.md)
|
||||
- [Arch Linux](deploying/arch-linux.md)
|
||||
- [Debian](deploying/debian.md)
|
||||
- [Fedora](deploying/fedora.md)
|
||||
- [FreeBSD](deploying/freebsd.md)
|
||||
- [TURN](turn.md)
|
||||
- [Appservices](appservices.md)
|
||||
- [Maintenance](maintenance.md)
|
||||
- [Troubleshooting](troubleshooting.md)
|
||||
- [Admin Command Reference](admin_reference.md)
|
||||
- [Development](development.md)
|
||||
- [Contributing](contributing.md)
|
||||
- [Code Style Guide](development/code_style.md)
|
||||
- [Testing](development/testing.md)
|
||||
- [Hot Reloading ("Live" Development)](development/hot_reload.md)
|
||||
- [Community (and Guidelines)](community.md)
|
||||
- [Security](security.md)
|
||||
74
docs/_meta.json
Normal file
74
docs/_meta.json
Normal file
@@ -0,0 +1,74 @@
|
||||
[
|
||||
{
|
||||
"type": "file",
|
||||
"name": "introduction",
|
||||
"label": "Continuwuity"
|
||||
},
|
||||
{
|
||||
"type": "file",
|
||||
"name": "configuration",
|
||||
"label": "Configuration"
|
||||
},
|
||||
{
|
||||
"type": "dir",
|
||||
"name": "deploying",
|
||||
"label": "Deploying"
|
||||
},
|
||||
{
|
||||
"type": "file",
|
||||
"name": "turn",
|
||||
"label": "TURN"
|
||||
},
|
||||
{
|
||||
"type": "file",
|
||||
"name": "appservices",
|
||||
"label": "Appservices"
|
||||
},
|
||||
{
|
||||
"type": "file",
|
||||
"name": "maintenance",
|
||||
"label": "Maintenance"
|
||||
},
|
||||
{
|
||||
"type": "file",
|
||||
"name": "troubleshooting",
|
||||
"label": "Troubleshooting"
|
||||
},
|
||||
{
|
||||
"type": "divider"
|
||||
},
|
||||
{
|
||||
"type": "dir-section-header",
|
||||
"name": "development",
|
||||
"label": "Development",
|
||||
"collapsible": true,
|
||||
"collapsed": false
|
||||
},
|
||||
{
|
||||
"type": "divider"
|
||||
},
|
||||
{
|
||||
"type": "section-header",
|
||||
"label": "Reference"
|
||||
},
|
||||
{
|
||||
"type": "file",
|
||||
"label": "Configuration Reference",
|
||||
"name": "/reference/config"
|
||||
},
|
||||
{
|
||||
"type": "file",
|
||||
"label": "Admin Command Reference",
|
||||
"name": "/reference/admin"
|
||||
},
|
||||
{
|
||||
"type": "file",
|
||||
"label": "Server Reference",
|
||||
"name": "/reference/server"
|
||||
},
|
||||
{
|
||||
"type": "divider"
|
||||
},
|
||||
"community",
|
||||
"security"
|
||||
]
|
||||
37
docs/_nav.json
Normal file
37
docs/_nav.json
Normal file
@@ -0,0 +1,37 @@
|
||||
[
|
||||
{
|
||||
"text": "Guide",
|
||||
"link": "/introduction",
|
||||
"activeMatch": "^/(introduction|configuration|deploying|turn|appservices|maintenance|troubleshooting)"
|
||||
},
|
||||
{
|
||||
"text": "Development",
|
||||
"link": "/development/index",
|
||||
"activeMatch": "^/development/"
|
||||
},
|
||||
{
|
||||
"text": "Reference",
|
||||
"items": [
|
||||
{
|
||||
"text": "Configuration Reference",
|
||||
"link": "/reference/config"
|
||||
},
|
||||
{
|
||||
"text": "Admin Command Reference",
|
||||
"link": "/reference/admin"
|
||||
},
|
||||
{
|
||||
"text": "Server Reference",
|
||||
"link": "/reference/server"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"text": "Community",
|
||||
"link": "/community"
|
||||
},
|
||||
{
|
||||
"text": "Security",
|
||||
"link": "/security"
|
||||
}
|
||||
]
|
||||
@@ -1,36 +0,0 @@
|
||||
<svg
|
||||
version="1.1"
|
||||
id="Layer_1"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
x="0px"
|
||||
y="0px"
|
||||
width="100%"
|
||||
viewBox="0 0 864 864"
|
||||
enableBackground="new 0 0 864 864"
|
||||
xmlSpace="preserve"
|
||||
>
|
||||
<path
|
||||
fill="#EC008C"
|
||||
opacity="1.000000"
|
||||
stroke="none"
|
||||
d="M0.999997,649.000000 C1.000000,433.052795 1.000000,217.105591 1.000000,1.079198 C288.876801,1.079198 576.753601,1.079198 865.000000,1.079198 C865.000000,73.025414 865.000000,145.051453 864.634888,217.500671 C852.362488,223.837280 840.447632,229.735275 828.549438,235.666794 C782.143677,258.801056 735.743225,281.945923 688.998657,304.980469 C688.122009,304.476532 687.580750,304.087708 687.053894,303.680206 C639.556946,266.944733 573.006775,291.446869 560.804199,350.179443 C560.141357,353.369446 559.717590,356.609131 559.195374,359.748962 C474.522705,359.748962 390.283478,359.748962 306.088135,359.748962 C298.804138,318.894806 265.253357,295.206024 231.834442,293.306793 C201.003021,291.554596 169.912033,310.230042 156.935104,338.792725 C149.905151,354.265930 147.884064,370.379944 151.151794,387.034515 C155.204453,407.689667 166.300507,423.954224 183.344437,436.516663 C181.938263,437.607025 180.887405,438.409576 179.849426,439.228516 C147.141953,465.032562 139.918045,510.888947 163.388611,545.322632 C167.274551,551.023804 172.285187,555.958313 176.587341,561.495728 C125.846893,587.012817 75.302292,612.295532 24.735992,637.534790 C16.874903,641.458496 8.914484,645.183228 0.999997,649.000000 z"
|
||||
/>
|
||||
<path
|
||||
fill="#000000"
|
||||
opacity="1.000000"
|
||||
stroke="none"
|
||||
d="M689.340759,305.086823 C735.743225,281.945923 782.143677,258.801056 828.549438,235.666794 C840.447632,229.735275 852.362488,223.837280 864.634888,217.961929 C865.000000,433.613190 865.000000,649.226379 865.000000,864.919800 C577.000000,864.919800 289.000000,864.919800 1.000000,864.919800 C1.000000,793.225708 1.000000,721.576721 0.999997,649.463867 C8.914484,645.183228 16.874903,641.458496 24.735992,637.534790 C75.302292,612.295532 125.846893,587.012817 176.939667,561.513062 C178.543060,562.085083 179.606812,562.886414 180.667526,563.691833 C225.656799,597.853394 291.232574,574.487244 304.462524,519.579773 C304.989105,517.394409 305.501068,515.205505 305.984619,513.166748 C391.466370,513.166748 476.422729,513.166748 561.331177,513.166748 C573.857727,555.764343 608.978149,572.880920 638.519897,572.672791 C671.048340,572.443665 700.623230,551.730408 711.658752,520.910583 C722.546875,490.502106 715.037842,453.265564 682.776733,429.447052 C683.966064,428.506866 685.119507,427.602356 686.265320,426.688232 C712.934143,405.412262 723.011475,370.684631 711.897339,338.686676 C707.312805,325.487671 699.185303,314.725128 689.340759,305.086823 z"
|
||||
/>
|
||||
<path
|
||||
fill="#FEFBFC"
|
||||
opacity="1.000000"
|
||||
stroke="none"
|
||||
d="M688.998657,304.980469 C699.185303,314.725128 707.312805,325.487671 711.897339,338.686676 C723.011475,370.684631 712.934143,405.412262 686.265320,426.688232 C685.119507,427.602356 683.966064,428.506866 682.776733,429.447052 C715.037842,453.265564 722.546875,490.502106 711.658752,520.910583 C700.623230,551.730408 671.048340,572.443665 638.519897,572.672791 C608.978149,572.880920 573.857727,555.764343 561.331177,513.166748 C476.422729,513.166748 391.466370,513.166748 305.984619,513.166748 C305.501068,515.205505 304.989105,517.394409 304.462524,519.579773 C291.232574,574.487244 225.656799,597.853394 180.667526,563.691833 C179.606812,562.886414 178.543060,562.085083 177.128418,561.264465 C172.285187,555.958313 167.274551,551.023804 163.388611,545.322632 C139.918045,510.888947 147.141953,465.032562 179.849426,439.228516 C180.887405,438.409576 181.938263,437.607025 183.344437,436.516663 C166.300507,423.954224 155.204453,407.689667 151.151794,387.034515 C147.884064,370.379944 149.905151,354.265930 156.935104,338.792725 C169.912033,310.230042 201.003021,291.554596 231.834442,293.306793 C265.253357,295.206024 298.804138,318.894806 306.088135,359.748962 C390.283478,359.748962 474.522705,359.748962 559.195374,359.748962 C559.717590,356.609131 560.141357,353.369446 560.804199,350.179443 C573.006775,291.446869 639.556946,266.944733 687.053894,303.680206 C687.580750,304.087708 688.122009,304.476532 688.998657,304.980469 M703.311279,484.370789 C698.954468,457.053253 681.951416,440.229645 656.413696,429.482330 C673.953552,421.977875 688.014709,412.074219 696.456482,395.642365 C704.862061,379.280853 706.487793,362.316345 700.947998,344.809204 C691.688965,315.548492 664.183716,296.954437 633.103516,298.838257 C618.467957,299.725372 605.538086,305.139557 594.588501,314.780121 C577.473999,329.848511 570.185486,349.121399 571.838501,371.750854 C479.166595,371.750854 387.082886,371.750854 294.582672,371.750854 C293.993011,354.662048 288.485260,339.622314 276.940491,327.118439 C265.392609,314.611176 251.082092,307.205322 234.093262,305.960541 C203.355347,303.708374 176.337585,320.898438 166.089890,348.816620 C159.557541,366.613007 160.527206,384.117401 168.756042,401.172516 C177.054779,418.372589 191.471954,428.832886 207.526581,435.632172 C198.407059,442.272583 188.815598,448.302246 180.383728,455.660675 C171.685028,463.251984 166.849655,473.658661 163.940216,484.838684 C161.021744,496.053375 161.212982,507.259705 164.178833,518.426208 C171.577927,546.284302 197.338104,566.588867 226.001465,567.336853 C240.828415,567.723816 254.357819,563.819092 266.385468,555.199646 C284.811554,541.994751 293.631104,523.530579 294.687347,501.238312 C387.354828,501.238312 479.461304,501.238312 571.531799,501.238312 C577.616638,543.189026 615.312866,566.342102 651.310059,559.044739 C684.973938,552.220398 708.263306,519.393127 703.311279,484.370789 z"
|
||||
/>
|
||||
<path
|
||||
fill="#EC008C"
|
||||
opacity="1.000000"
|
||||
stroke="none"
|
||||
d="M703.401855,484.804718 C708.263306,519.393127 684.973938,552.220398 651.310059,559.044739 C615.312866,566.342102 577.616638,543.189026 571.531799,501.238312 C479.461304,501.238312 387.354828,501.238312 294.687347,501.238312 C293.631104,523.530579 284.811554,541.994751 266.385468,555.199646 C254.357819,563.819092 240.828415,567.723816 226.001465,567.336853 C197.338104,566.588867 171.577927,546.284302 164.178833,518.426208 C161.212982,507.259705 161.021744,496.053375 163.940216,484.838684 C166.849655,473.658661 171.685028,463.251984 180.383728,455.660675 C188.815598,448.302246 198.407059,442.272583 207.526581,435.632172 C191.471954,428.832886 177.054779,418.372589 168.756042,401.172516 C160.527206,384.117401 159.557541,366.613007 166.089890,348.816620 C176.337585,320.898438 203.355347,303.708374 234.093262,305.960541 C251.082092,307.205322 265.392609,314.611176 276.940491,327.118439 C288.485260,339.622314 293.993011,354.662048 294.582672,371.750854 C387.082886,371.750854 479.166595,371.750854 571.838501,371.750854 C570.185486,349.121399 577.473999,329.848511 594.588501,314.780121 C605.538086,305.139557 618.467957,299.725372 633.103516,298.838257 C664.183716,296.954437 691.688965,315.548492 700.947998,344.809204 C706.487793,362.316345 704.862061,379.280853 696.456482,395.642365 C688.014709,412.074219 673.953552,421.977875 656.413696,429.482330 C681.951416,440.229645 698.954468,457.053253 703.401855,484.804718 z"
|
||||
/>
|
||||
</svg>
|
||||
|
Before Width: | Height: | Size: 7.0 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 11 KiB |
@@ -8,7 +8,7 @@ ## Basics
|
||||
setting individual config options via commandline.
|
||||
|
||||
Please refer to the [example config
|
||||
file](./configuration/examples.md#example-configuration) for all of those
|
||||
file](./reference/config.mdx) for all of those
|
||||
settings.
|
||||
|
||||
The config file to use can be specified on the commandline when running
|
||||
@@ -1,19 +0,0 @@
|
||||
## Example configuration
|
||||
|
||||
<details>
|
||||
<summary>Example configuration</summary>
|
||||
|
||||
```toml
|
||||
{{#include ../../conduwuit-example.toml}}
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
## systemd unit file
|
||||
|
||||
<details>
|
||||
<summary>systemd unit file</summary>
|
||||
|
||||
```
|
||||
{{#include ../../pkg/conduwuit.service}}
|
||||
```
|
||||
42
docs/deploying/_meta.json
Normal file
42
docs/deploying/_meta.json
Normal file
@@ -0,0 +1,42 @@
|
||||
[
|
||||
{
|
||||
"type": "file",
|
||||
"name": "generic",
|
||||
"label": "Generic"
|
||||
},
|
||||
{
|
||||
"type": "file",
|
||||
"name": "docker",
|
||||
"label": "Docker"
|
||||
},
|
||||
{
|
||||
"type": "file",
|
||||
"name": "debian",
|
||||
"label": "Debian"
|
||||
},
|
||||
{
|
||||
"type": "file",
|
||||
"name": "fedora",
|
||||
"label": "Fedora"
|
||||
},
|
||||
{
|
||||
"type": "file",
|
||||
"name": "nixos",
|
||||
"label": "NixOS"
|
||||
},
|
||||
{
|
||||
"type": "file",
|
||||
"name": "arch-linux",
|
||||
"label": "Arch Linux"
|
||||
},
|
||||
{
|
||||
"type": "file",
|
||||
"name": "kubernetes",
|
||||
"label": "Kubernetes"
|
||||
},
|
||||
{
|
||||
"type": "file",
|
||||
"name": "freebsd",
|
||||
"label": "FreeBSD"
|
||||
}
|
||||
]
|
||||
@@ -1 +0,0 @@
|
||||
{{#include ../../pkg/debian/README.md}}
|
||||
1
docs/deploying/debian.mdx
Symbolic link
1
docs/deploying/debian.mdx
Symbolic link
@@ -0,0 +1 @@
|
||||
../../pkg/debian/README.md
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
services:
|
||||
homeserver:
|
||||
### If you already built the conduduwit image with 'docker build' or want to use the Docker Hub image,
|
||||
### If you already built the continuwuity image with 'docker build' or want to use the Docker Hub image,
|
||||
### then you are ready to go.
|
||||
image: forgejo.ellis.link/continuwuation/continuwuity:latest
|
||||
restart: unless-stopped
|
||||
|
||||
@@ -40,10 +40,10 @@ ### Run
|
||||
|
||||
The `-d` flag lets the container run in detached mode. You may supply an
|
||||
optional `continuwuity.toml` config file, the example config can be found
|
||||
[here](../configuration/examples.md). You can pass in different env vars to
|
||||
[here](../reference/config.mdx). You can pass in different env vars to
|
||||
change config values on the fly. You can even configure Continuwuity completely by
|
||||
using env vars. For an overview of possible values, please take a look at the
|
||||
[`docker-compose.yml`](docker-compose.yml) file.
|
||||
<a href="/examples/docker-compose.yml" target="_blank">`docker-compose.yml`</a> file.
|
||||
|
||||
If you just want to test Continuwuity for a short time, you can use the `--rm`
|
||||
flag, which cleans up everything related to your container after you stop
|
||||
@@ -56,14 +56,62 @@ ### Docker-compose
|
||||
|
||||
Depending on your proxy setup, you can use one of the following files:
|
||||
|
||||
- If you already have a `traefik` instance set up, use
|
||||
[`docker-compose.for-traefik.yml`](docker-compose.for-traefik.yml)
|
||||
- If you don't have a `traefik` instance set up and would like to use it, use
|
||||
[`docker-compose.with-traefik.yml`](docker-compose.with-traefik.yml)
|
||||
- If you want a setup that works out of the box with `caddy-docker-proxy`, use
|
||||
[`docker-compose.with-caddy.yml`](docker-compose.with-caddy.yml) and replace all
|
||||
`example.com` placeholders with your own domain
|
||||
- For any other reverse proxy, use [`docker-compose.yml`](docker-compose.yml)
|
||||
### For existing Traefik setup
|
||||
|
||||
<details>
|
||||
<summary>docker-compose.for-traefik.yml</summary>
|
||||
|
||||
```yaml file="./docker-compose.for-traefik.yml"
|
||||
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
### With Traefik included
|
||||
|
||||
<details>
|
||||
<summary>docker-compose.with-traefik.yml</summary>
|
||||
|
||||
```yaml file="./docker-compose.with-traefik.yml"
|
||||
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
### With Caddy Docker Proxy
|
||||
|
||||
<details>
|
||||
<summary>docker-compose.with-caddy.yml</summary>
|
||||
|
||||
Replace all `example.com` placeholders with your own domain.
|
||||
|
||||
```yaml file="./docker-compose.with-caddy.yml"
|
||||
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
### For other reverse proxies
|
||||
|
||||
<details>
|
||||
<summary>docker-compose.yml</summary>
|
||||
|
||||
```yaml file="./docker-compose.yml"
|
||||
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
### Override file
|
||||
|
||||
<details>
|
||||
<summary>docker-compose.override.yml</summary>
|
||||
|
||||
```yaml file="./docker-compose.override.yml"
|
||||
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
When picking the Traefik-related compose file, rename it to
|
||||
`docker-compose.yml`, and rename the override file to
|
||||
@@ -80,7 +128,7 @@ ### Docker-compose
|
||||
After that, you can rename it to `docker-compose.yml` and spin up the
|
||||
containers!
|
||||
|
||||
Additional info about deploying Continuwuity can be found [here](generic.md).
|
||||
Additional info about deploying Continuwuity can be found [here](generic.mdx).
|
||||
|
||||
### Build
|
||||
|
||||
@@ -88,7 +136,18 @@ ### Build
|
||||
|
||||
The resulting images are widely compatible with Docker and other container runtimes like Podman or containerd.
|
||||
|
||||
The images *do not contain a shell*. They contain only the Continuwuity binary, required libraries, TLS certificates, and metadata. Please refer to the [`docker/Dockerfile`][dockerfile-path] for the specific details of the image composition.
|
||||
The images *do not contain a shell*. They contain only the Continuwuity binary, required libraries, TLS certificates, and metadata.
|
||||
|
||||
<details>
|
||||
<summary>Click to view the Dockerfile</summary>
|
||||
|
||||
You can also <a href="https://forgejo.ellis.link/continuwuation/continuwuation/src/branch/main/docker/Dockerfile" target="_blank">view the Dockerfile on Forgejo</a>.
|
||||
|
||||
```dockerfile file="../../docker/Dockerfile"
|
||||
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
To build an image locally using Docker Buildx, you can typically run a command like:
|
||||
|
||||
@@ -105,7 +164,7 @@ # docker buildx build --load --tag continuwuity:latest --build-arg TARGET_CPU=na
|
||||
|
||||
Refer to the Docker Buildx documentation for more advanced build options.
|
||||
|
||||
[dockerfile-path]: ../../docker/Dockerfile
|
||||
[dockerfile-path]: https://forgejo.ellis.link/continuwuation/continuwuation/src/branch/main/docker/Dockerfile
|
||||
|
||||
### Run
|
||||
|
||||
@@ -123,10 +182,7 @@ ### Use Traefik as Proxy
|
||||
|
||||
As a container user, you probably know about Traefik. It is an easy-to-use
|
||||
reverse proxy for making containerized apps and services available through the
|
||||
web. With the two provided files,
|
||||
[`docker-compose.for-traefik.yml`](docker-compose.for-traefik.yml) (or
|
||||
[`docker-compose.with-traefik.yml`](docker-compose.with-traefik.yml)) and
|
||||
[`docker-compose.override.yml`](docker-compose.override.yml), it is equally easy
|
||||
web. With the Traefik-related docker-compose files provided above, it is equally easy
|
||||
to deploy and use Continuwuity, with a small caveat. If you have already looked at
|
||||
the files, you should have seen the `well-known` service, which is the
|
||||
small caveat. Traefik is simply a proxy and load balancer and cannot
|
||||
@@ -97,8 +97,7 @@ ## Forwarding ports in the firewall or the router
|
||||
|
||||
## Setting up a systemd service
|
||||
|
||||
You can find two example systemd units for Continuwuity
|
||||
[on the configuration page](../configuration/examples.md#debian-systemd-unit-file).
|
||||
You can find an example unit for continuwuity below.
|
||||
You may need to change the `ExecStart=` path to match where you placed the Continuwuity
|
||||
binary if it is not in `/usr/bin/conduwuit`.
|
||||
|
||||
@@ -117,11 +116,26 @@ ## Setting up a systemd service
|
||||
ReadWritePaths=/path/to/custom/database/path
|
||||
```
|
||||
|
||||
|
||||
### Example systemd Unit File
|
||||
|
||||
<details>
|
||||
<summary>Click to expand systemd unit file (conduwuit.service)</summary>
|
||||
|
||||
|
||||
```ini file="../../pkg/conduwuit.service"
|
||||
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
You can also [view the file on Foregejo](https://forgejo.ellis.link/continuwuation/continuwuity/src/branch/main/pkg/conduwuit.service).
|
||||
|
||||
## Creating the Continuwuity configuration file
|
||||
|
||||
Now you need to create the Continuwuity configuration file in
|
||||
`/etc/continuwuity/continuwuity.toml`. You can find an example configuration at
|
||||
[conduwuit-example.toml](../configuration/examples.md).
|
||||
[conduwuit-example.toml](../reference/config.mdx).
|
||||
|
||||
**Please take a moment to read the config. You need to change at least the
|
||||
server name.**
|
||||
@@ -156,7 +170,7 @@ ### Caddy
|
||||
After installing Caddy via your preferred method, create `/etc/caddy/conf.d/conduwuit_caddyfile`
|
||||
and enter the following (substitute your actual server name):
|
||||
|
||||
```caddyfile
|
||||
```
|
||||
your.server.name, your.server.name:8448 {
|
||||
# TCP reverse_proxy
|
||||
reverse_proxy 127.0.0.1:6167
|
||||
@@ -193,8 +207,10 @@ ### Other Reverse Proxies
|
||||
- [`/.well-known/matrix/support`](https://spec.matrix.org/latest/client-server-api/#getwell-knownmatrixsupport)
|
||||
|
||||
Examples of delegation:
|
||||
- <https://puppygock.gay/.well-known/matrix/server>
|
||||
- <https://puppygock.gay/.well-known/matrix/client>
|
||||
- https://continuwuity.org/.well-known/matrix/server
|
||||
- https://continuwuity.org/.well-known/matrix/client
|
||||
- https://ellis.link/.well-known/matrix/server
|
||||
- https://ellis.link/.well-known/matrix/client
|
||||
|
||||
For Apache and Nginx there are many examples available online.
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
# Continuwuity for Kubernetes
|
||||
|
||||
Continuwuity doesn't support horizontal scalability or distributed loading
|
||||
natively. However, a community-maintained Helm Chart is available here to run
|
||||
conduwuit on Kubernetes: <https://gitlab.cronce.io/charts/conduwuit>
|
||||
natively. However, [a community-maintained Helm Chart is available here to run
|
||||
conduwuit on Kubernetes](https://gitlab.cronce.io/charts/conduwuit)
|
||||
|
||||
This should be compatible with Continuwuity, but you will need to change the image reference.
|
||||
|
||||
@@ -48,7 +48,7 @@ ### Available options
|
||||
- `package`: The Continuwuity package to use
|
||||
- `settings`: The Continuwuity configuration (in TOML format)
|
||||
|
||||
Use the `settings` option to configure Continuwuity itself. See the [example configuration file](../configuration/examples.md#example-configuration) for all available options.
|
||||
Use the `settings` option to configure Continuwuity itself. See the [example configuration file](../reference/config.mdx) for all available options.
|
||||
|
||||
### UNIX sockets
|
||||
|
||||
27
docs/development/_meta.json
Normal file
27
docs/development/_meta.json
Normal file
@@ -0,0 +1,27 @@
|
||||
[
|
||||
{
|
||||
"type": "file",
|
||||
"name": "index",
|
||||
"label": "Development Guide"
|
||||
},
|
||||
{
|
||||
"type": "file",
|
||||
"name": "contributing",
|
||||
"label": "Contributing"
|
||||
},
|
||||
{
|
||||
"type": "file",
|
||||
"name": "code_style",
|
||||
"label": "Code Style Guide"
|
||||
},
|
||||
{
|
||||
"type": "file",
|
||||
"name": "testing",
|
||||
"label": "Testing"
|
||||
},
|
||||
{
|
||||
"type": "file",
|
||||
"name": "hot_reload",
|
||||
"label": "Hot Reloading"
|
||||
}
|
||||
]
|
||||
173
docs/development/contributing.mdx
Normal file
173
docs/development/contributing.mdx
Normal file
@@ -0,0 +1,173 @@
|
||||
# Contributing guide
|
||||
|
||||
This page is about contributing to Continuwuity. The
|
||||
[development](./index.mdx) and [code style guide](./code_style.mdx) pages may be of interest for you as well.
|
||||
|
||||
If you would like to work on an [issue][issues] that is not assigned, preferably
|
||||
ask in the Matrix room first at [#continuwuity:continuwuity.org][continuwuity-matrix],
|
||||
and comment on it.
|
||||
|
||||
### Code Style
|
||||
|
||||
Please review and follow the [code style guide](./code_style) for formatting, linting, naming conventions, and other code standards.
|
||||
|
||||
### Pre-commit Checks
|
||||
|
||||
Continuwuity uses pre-commit hooks to enforce various coding standards and catch common issues before they're committed. These checks include:
|
||||
|
||||
- Code formatting and linting
|
||||
- Typo detection (both in code and commit messages)
|
||||
- Checking for large files
|
||||
- Ensuring proper line endings and no trailing whitespace
|
||||
- Validating YAML, JSON, and TOML files
|
||||
- Checking for merge conflicts
|
||||
|
||||
You can run these checks locally by installing [prefligit](https://github.com/j178/prefligit):
|
||||
|
||||
|
||||
```bash
|
||||
# Requires UV: https://docs.astral.sh/uv/getting-started/installation/
|
||||
# Mac/linux: curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||
# Windows: powershell -ExecutionPolicy ByPass -c "irm https://astral.sh/uv/install.ps1 | iex"
|
||||
|
||||
# Install prefligit using cargo-binstall
|
||||
cargo binstall prefligit
|
||||
|
||||
# Install git hooks to run checks automatically
|
||||
prefligit install
|
||||
|
||||
# Run all checks
|
||||
prefligit --all-files
|
||||
```
|
||||
|
||||
Alternatively, you can use [pre-commit](https://pre-commit.com/):
|
||||
```bash
|
||||
# Requires python
|
||||
|
||||
# Install pre-commit
|
||||
pip install pre-commit
|
||||
|
||||
# Install the hooks
|
||||
pre-commit install
|
||||
|
||||
# Run all checks manually
|
||||
pre-commit run --all-files
|
||||
```
|
||||
|
||||
These same checks are run in CI via the prefligit-checks workflow to ensure consistency. These must pass before the PR is merged.
|
||||
|
||||
### Running tests locally
|
||||
|
||||
Tests, compilation, and linting can be run with standard Cargo commands:
|
||||
|
||||
```bash
|
||||
# Run tests
|
||||
cargo test
|
||||
|
||||
# Check compilation
|
||||
cargo check --workspace --features full
|
||||
|
||||
# Run lints
|
||||
cargo clippy --workspace --features full
|
||||
# Auto-fix: cargo clippy --workspace --features full --fix --allow-staged;
|
||||
|
||||
# Format code (must use nightly)
|
||||
cargo +nightly fmt
|
||||
```
|
||||
|
||||
### Matrix tests
|
||||
|
||||
Continuwuity uses [Complement][complement] for Matrix protocol compliance testing. Complement tests are run manually by developers, and documentation on how to run these tests locally is currently being developed.
|
||||
|
||||
If your changes are done to fix Matrix tests, please note that in your pull request. If more Complement tests start failing from your changes, please review the logs and determine if they're intended or not.
|
||||
|
||||
[Sytest][sytest] is currently unsupported.
|
||||
|
||||
### Writing documentation
|
||||
|
||||
Continuwuity's website uses [`mdbook`][mdbook] and is deployed via CI using Cloudflare Pages
|
||||
in the [`documentation.yml`][documentation.yml] workflow file. All documentation is in the `docs/`
|
||||
directory at the top level.
|
||||
|
||||
To build the documentation locally:
|
||||
|
||||
1. Install mdbook if you don't have it already:
|
||||
```bash
|
||||
cargo install mdbook # or cargo binstall, or another method
|
||||
```
|
||||
|
||||
2. Build the documentation:
|
||||
```bash
|
||||
mdbook build
|
||||
```
|
||||
|
||||
The output of the mdbook generation is in `public/`. You can open the HTML files directly in your browser without needing a web server.
|
||||
|
||||
|
||||
### Commit Messages
|
||||
|
||||
Continuwuity follows the [Conventional Commits](https://www.conventionalcommits.org/) specification for commit messages. This provides a standardized format that makes the commit history more readable and enables automated tools to generate changelogs.
|
||||
|
||||
The basic structure is:
|
||||
|
||||
```
|
||||
<type>[(optional scope)]: <description>
|
||||
|
||||
[optional body]
|
||||
|
||||
[optional footer(s)]
|
||||
```
|
||||
|
||||
The allowed types for commits are:
|
||||
- `fix`: Bug fixes
|
||||
- `feat`: New features
|
||||
- `docs`: Documentation changes
|
||||
- `style`: Changes that don't affect the meaning of the code (formatting, etc.)
|
||||
- `refactor`: Code changes that neither fix bugs nor add features
|
||||
- `perf`: Performance improvements
|
||||
- `test`: Adding or fixing tests
|
||||
- `build`: Changes to the build system or dependencies
|
||||
- `ci`: Changes to CI configuration
|
||||
- `chore`: Other changes that don't modify source or test files
|
||||
|
||||
Examples:
|
||||
```
|
||||
feat: add user authentication
|
||||
fix(database): resolve connection pooling issue
|
||||
docs: update installation instructions
|
||||
```
|
||||
|
||||
The project uses the `committed` hook to validate commit messages in pre-commit. This ensures all commits follow the conventional format.
|
||||
|
||||
### Creating pull requests
|
||||
|
||||
Please try to keep contributions to the Forgejo Instance. While the mirrors of continuwuity
|
||||
allow for pull/merge requests, there is no guarantee the maintainers will see them in a timely
|
||||
manner. Additionally, please mark WIP or unfinished or incomplete PRs as drafts.
|
||||
This prevents us from having to ping once in a while to double check the status
|
||||
of it, especially when the CI completed successfully and everything so it
|
||||
*looks* done.
|
||||
|
||||
Before submitting a pull request, please ensure:
|
||||
1. Your code passes all CI checks (formatting, linting, typo detection, etc.)
|
||||
2. Your code follows the [code style guide](./code_style)
|
||||
3. Your commit messages follow the conventional commits format
|
||||
4. Tests are added for new functionality
|
||||
5. Documentation is updated if needed
|
||||
|
||||
Direct all PRs/MRs to the `main` branch.
|
||||
|
||||
By sending a pull request or patch, you are agreeing that your changes are
|
||||
allowed to be licenced under the Apache-2.0 licence and all of your conduct is
|
||||
in line with the Contributor's Covenant, and continuwuity's Code of Conduct.
|
||||
|
||||
Contribution by users who violate either of these code of conducts may not have
|
||||
their contributions accepted. This includes users who have been banned from
|
||||
continuwuity Matrix rooms for Code of Conduct violations.
|
||||
|
||||
[issues]: https://forgejo.ellis.link/continuwuation/continuwuity/issues
|
||||
[continuwuity-matrix]: https://matrix.to/#/#continuwuity:continuwuity.org?via=continuwuity.org&via=ellis.link&via=explodie.org&via=matrix.org
|
||||
[complement]: https://github.com/matrix-org/complement/
|
||||
[sytest]: https://github.com/matrix-org/sytest/
|
||||
[mdbook]: https://rust-lang.github.io/mdBook/
|
||||
[documentation.yml]: https://forgejo.ellis.link/continuwuation/continuwuity/src/branch/main/.forgejo/workflows/documentation.yml
|
||||
@@ -137,7 +137,7 @@ ### Addendum
|
||||
it.**
|
||||
|
||||

|
||||
Volk](./assets/libraries.png)
|
||||
|
||||
When a symbol is referenced between crates they become bound: **crates cannot be
|
||||
unloaded until their calling crates are first unloaded.** Thus we start the
|
||||
@@ -148,7 +148,7 @@ ### Addendum
|
||||
binding ever occurs between them.
|
||||
|
||||

|
||||
Volk](./assets/reload_order.png)
|
||||
|
||||
Proper resource management is essential for reliable reloading to occur. This is
|
||||
a very basic ask in RAII-idiomatic Rust and the exposure to reloading hazards is
|
||||
@@ -2,7 +2,7 @@ # Development
|
||||
|
||||
Information about developing the project. If you are only interested in using
|
||||
it, you can safely ignore this page. If you plan on contributing, see the
|
||||
[contributor's guide](./contributing.md) and [code style guide](./development/code_style.md).
|
||||
[contributor's guide](./contributing.mdx) and [code style guide](./code_style.mdx).
|
||||
|
||||
## Continuwuity project layout
|
||||
|
||||
@@ -24,7 +24,7 @@ ## Complement
|
||||
If you're on macOS and need to build an image, run `nix build .#linux-complement`.
|
||||
|
||||
We have a Complement fork as some tests have needed to be fixed. This can be found
|
||||
at: <https://forgejo.ellis.link/continuwuation/complement>
|
||||
at [continuwuation/complement](https://forgejo.ellis.link/continuwuation/complement)
|
||||
|
||||
[ci-workflows]:
|
||||
https://forgejo.ellis.link/continuwuation/continuwuity/actions/?workflow=ci.yml&actor=0&status=1
|
||||
51
docs/index.mdx
Normal file
51
docs/index.mdx
Normal file
@@ -0,0 +1,51 @@
|
||||
---
|
||||
pageType: home
|
||||
|
||||
hero:
|
||||
name: Continuwuity
|
||||
text: A community-driven Matrix homeserver
|
||||
tagline: Fast, lightweight and open
|
||||
actions:
|
||||
- theme: brand
|
||||
text: Get Started
|
||||
link: /introduction
|
||||
- theme: alt
|
||||
text: Contribute on Forgejo
|
||||
link: https://forgejo.ellis.link/continuwuation/continuwuity
|
||||
- theme: alt
|
||||
text: Star on GitHub
|
||||
link: https://github.com/continuwuity/continuwuity
|
||||
image:
|
||||
src: /assets/logo.svg
|
||||
alt: continuwuity logo
|
||||
|
||||
features:
|
||||
- title: 🚀 High Performance
|
||||
details: Built with Rust for exceptional speed and efficiency. Designed to run smoothly even on modest hardware.
|
||||
- title: 🔒 Secure by Default
|
||||
details: Memory-safe Rust implementation with built-in security features to protect your communication.
|
||||
- title: 🌐 Matrix Protocol
|
||||
details: Fully compatible with the Matrix ecosystem. Connect with users across the federated network.
|
||||
- title: 🛠️ Community Maintained
|
||||
details: Actively developed by a dedicated community of Matrix enthusiasts and contributors.
|
||||
- title: 📦 Easy to Deploy
|
||||
details: Multiple deployment options including Docker, NixOS, and traditional package managers.
|
||||
- title: 🔌 Appservice Support
|
||||
details: Bridge to other platforms like Discord, Telegram, and more with Matrix appservices.
|
||||
|
||||
doc: false
|
||||
---
|
||||
|
||||
## What is Continuwuity?
|
||||
|
||||
Continuwuity is a Matrix homeserver.
|
||||
|
||||
Matrix is an open chat network that lets anyone talk to anyone, no matter what server or address they use - sort of like email.
|
||||
|
||||
Continuwuity receives and keeps track of all your messages, and delivers what you send to the right people.
|
||||
|
||||
## Why is Continuwuity different?
|
||||
|
||||
Continuwuity is light and fast, using a fraction of the memory of other major homeservers. It's also simple to set up, and secure by default.
|
||||
|
||||
We are a community run project, filled with diverse and friendly people. Everything is built by people who care about the project volunteering their free time.
|
||||
@@ -1,18 +0,0 @@
|
||||
# Continuwuity
|
||||
|
||||
{{#include ../README.md:catchphrase}}
|
||||
|
||||
{{#include ../README.md:body}}
|
||||
|
||||
#### How can I deploy my own?
|
||||
|
||||
- [Deployment options](deploying.md)
|
||||
|
||||
If you want to connect an appservice to Continuwuity, take a look at the
|
||||
[appservices documentation](appservices.md).
|
||||
|
||||
#### How can I contribute?
|
||||
|
||||
See the [contributor's guide](contributing.md)
|
||||
|
||||
{{#include ../README.md:footer}}
|
||||
92
docs/introduction.mdx
Normal file
92
docs/introduction.mdx
Normal file
@@ -0,0 +1,92 @@
|
||||
# Continuwuity
|
||||
|
||||
## A community-driven [Matrix](https://matrix.org/) homeserver in Rust
|
||||
|
||||
[](https://matrix.to/#/#continuwuity:continuwuity.org?via=continuwuity.org&via=ellis.link&via=explodie.org&via=matrix.org) [](https://matrix.to/#/#space:continuwuity.org?via=continuwuity.org&via=ellis.link&via=explodie.org&via=matrix.org)
|
||||
|
||||
[continuwuity] is a Matrix homeserver written in Rust.
|
||||
It's the official community continuation of the [conduwuit](https://github.com/girlbossceo/conduwuit) homeserver.
|
||||
|
||||
[](https://forgejo.ellis.link/continuwuation/continuwuity) [](https://forgejo.ellis.link/continuwuation/continuwuity/stars) [](https://forgejo.ellis.link/continuwuation/continuwuity/issues?state=open) [](https://forgejo.ellis.link/continuwuation/continuwuity/pulls?state=open)
|
||||
|
||||
[](https://github.com/continuwuity/continuwuity) [](https://github.com/continuwuity/continuwuity/stargazers)
|
||||
|
||||
[](https://gitlab.com/continuwuity/continuwuity) [](https://gitlab.com/continuwuity/continuwuity/-/starrers)
|
||||
|
||||
[](https://codeberg.org/continuwuity/continuwuity) [](https://codeberg.org/continuwuity/continuwuity/stars)
|
||||
|
||||
## Why does this exist?
|
||||
|
||||
The original conduwuit project has been archived and is no longer maintained. Rather than letting this Rust-based Matrix homeserver disappear, a group of community contributors have forked the project to continue its development, fix outstanding issues, and add new features.
|
||||
|
||||
We aim to provide a stable, well-maintained alternative for current conduwuit users and welcome newcomers seeking a lightweight, efficient Matrix homeserver.
|
||||
|
||||
## Who are we?
|
||||
|
||||
We are a group of Matrix enthusiasts, developers and system administrators who have used conduwuit and believe in its potential. Our team includes both previous
|
||||
contributors to the original project and new developers who want to help maintain and improve this important piece of Matrix infrastructure.
|
||||
|
||||
We operate as an open community project, welcoming contributions from anyone interested in improving continuwuity.
|
||||
|
||||
## What is Matrix?
|
||||
|
||||
[Matrix](https://matrix.org) is an open, federated, and extensible network for
|
||||
decentralized communication. Users from any Matrix homeserver can chat with users from all
|
||||
other homeservers over federation. Matrix is designed to be extensible and built on top of.
|
||||
You can even use bridges such as Matrix Appservices to communicate with users outside of Matrix, like a community on Discord.
|
||||
|
||||
## What are the project's goals?
|
||||
|
||||
continuwuity aims to:
|
||||
|
||||
- Maintain a stable, reliable Matrix homeserver implementation in Rust
|
||||
- Improve compatibility and specification compliance with the Matrix protocol
|
||||
- Fix bugs and performance issues from the original conduwuit
|
||||
- Add missing features needed by homeserver administrators
|
||||
- Provide comprehensive documentation and easy deployment options
|
||||
- Create a sustainable development model for long-term maintenance
|
||||
- Keep a lightweight, efficient codebase that can run on modest hardware
|
||||
|
||||
## Can I try it out?
|
||||
|
||||
Check out the [documentation](https://continuwuity.org) for installation instructions.
|
||||
|
||||
There are currently no open registration continuwuity instances available.
|
||||
|
||||
## What are we working on?
|
||||
|
||||
We're working our way through all of the issues in the [Forgejo project](https://forgejo.ellis.link/continuwuation/continuwuity/issues).
|
||||
|
||||
- [Packaging & availability in more places](https://forgejo.ellis.link/continuwuation/continuwuity/issues/747)
|
||||
- [Appservices bugs & features](https://forgejo.ellis.link/continuwuation/continuwuity/issues?q=&type=all&state=open&labels=178&milestone=0&assignee=0&poster=0)
|
||||
- [Improving compatibility and spec compliance](https://forgejo.ellis.link/continuwuation/continuwuity/issues?labels=119)
|
||||
- Automated testing
|
||||
- [Admin API](https://forgejo.ellis.link/continuwuation/continuwuity/issues/748)
|
||||
- [Policy-list controlled moderation](https://forgejo.ellis.link/continuwuation/continuwuity/issues/750)
|
||||
|
||||
## Can I migrate my data from x?
|
||||
|
||||
- **Conduwuit**: Yes
|
||||
- **Conduit**: No, database is now incompatible
|
||||
- **Grapevine**: No, database is now incompatible
|
||||
- **Dendrite**: No
|
||||
- **Synapse**: No
|
||||
|
||||
We haven't written up a guide on migrating from incompatible homeservers yet. Reach out to us if you need to do this!
|
||||
|
||||
## How can I deploy my own?
|
||||
|
||||
- [Deployment options](deploying)
|
||||
|
||||
If you want to connect an appservice to continuwuity, take a look at the
|
||||
[appservices documentation](appservices).
|
||||
|
||||
## How can I contribute?
|
||||
|
||||
See the [contributor's guide](development/contributing)
|
||||
|
||||
## Contact
|
||||
|
||||
Join our [Matrix room](https://matrix.to/#/#continuwuity:continuwuity.org?via=continuwuity.org&via=ellis.link&via=explodie.org&via=matrix.org) and [space](https://matrix.to/#/#space:continuwuity.org?via=continuwuity.org&via=ellis.link&via=explodie.org&via=matrix.org) to chat with us about the project!
|
||||
|
||||
[continuwuity]: https://forgejo.ellis.link/continuwuation/continuwuity
|
||||
@@ -47,7 +47,7 @@ ## Database (RocksDB)
|
||||
### Compression
|
||||
|
||||
Some RocksDB settings can be adjusted such as the compression method chosen. See
|
||||
the RocksDB section in the [example config](configuration/examples.md).
|
||||
the RocksDB section in the [example config](./reference/config.mdx).
|
||||
|
||||
btrfs users have reported that database compression does not need to be disabled
|
||||
on Continuwuity as the filesystem already does not attempt to compress. This can be
|
||||
@@ -55,7 +55,7 @@ ### Compression
|
||||
the `physical_offset` matches (no filesystem compression). It is very important
|
||||
to ensure no additional filesystem compression takes place as this can render
|
||||
unbuffered Direct IO inoperable, significantly slowing down read and write
|
||||
performance. See <https://btrfs.readthedocs.io/en/latest/Compression.html#compatibility>
|
||||
performance. See [the Btrfs docs](https://btrfs.readthedocs.io/en/latest/Compression.html#compatibility).
|
||||
|
||||
> Compression is done using the COW mechanism so it’s incompatible with
|
||||
> nodatacow. Direct IO read works on compressed files but will fall back to
|
||||
17
docs/public/.well-known/continuwuity/announcements
Normal file
17
docs/public/.well-known/continuwuity/announcements
Normal file
@@ -0,0 +1,17 @@
|
||||
{
|
||||
"$schema": "https://continuwuity.org/schema/announcements.schema.json",
|
||||
"announcements": [
|
||||
{
|
||||
"id": 1,
|
||||
"message": "Welcome to Continuwuity! Important announcements about the project will appear here."
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"message": "_taps microphone_ The Continuwuity 0.5.0-rc.7 release is now available, and it's better than ever! **177 commits**, **35 pull requests**, **11 contributors,** and a lot of new stuff!\n\nFor highlights, we've got:\n\n* 🕵️ Full Policy Server support to fight spam!\n* 🚀 Smarter room & space upgrades.\n* 🚫 User suspension tools for better moderation.\n* 🤖 reCaptcha support for safer open registration.\n* 🔍 Ability to disable read receipts & typing indicators.\n* ⚡ Sweeping performance improvements!\n\nGet the [full changelog and downloads on our Forgejo](https://forgejo.ellis.link/continuwuation/continuwuity/releases/tag/v0.5.0-rc.7) - and make sure you're in the [Announcements room](https://matrix.to/#/!releases:continuwuity.org/$hN9z6L2_dTAlPxFLAoXVfo_g8DyYXu4cpvWsSrWhmB0) to get stuff like this sooner."
|
||||
},
|
||||
{
|
||||
"id": 5,
|
||||
"message": "It's a bird! It's a plane! No, it's 0.5.0-rc.8.1!\n\nThis is a minor bugfix update to the rc8 which backports some important fixes from the latest main branch. If you still haven't updated to rc8, you should skip to main. Otherwise, you should upgrade to this bugfix release as soon as possible.\n\nBugfixes backported to this version:\n\n- Resolved several issues with state resolution v2.1 (room version 12)\n- Fixed issues with the `restricted` and `knock_restricted` join rules that would sometimes incorrectly disallow a valid join\n- Fixed the automatic support contact listing being a no-op\n- Fixed upgrading pre-v12 rooms to v12 rooms\n- Fixed policy servers sending the incorrect JSON objects (resulted in false positives)\n- Fixed debug build panic during MSC4133 migration\n\nIt is recommended, if you can and are comfortable with doing so, following updates to the main branch - we're in the run up to the full 0.5.0 release, and more and more bugfixes and new features are being pushed constantly. Please don't forget to join [#announcements:continuwuity.org](https://matrix.to/#/#announcements:continuwuity.org) to receive this news faster and be alerted to other important updates!"
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1 +1 @@
|
||||
{"m.homeserver":{"base_url": "https://matrix.continuwuity.org"},"org.matrix.msc3575.proxy":{"url": "https://matrix.continuwuity.org"}}
|
||||
{"m.homeserver":{"base_url": "https://matrix.continuwuity.org"},"org.matrix.msc3575.proxy":{"url": "https://matrix.continuwuity.org"},"org.matrix.msc4143.rtc_foci":[{"type":"livekit","livekit_service_url":"https://livekit.ellis.link"}]}
|
||||
43
docs/public/assets/logo.svg
Normal file
43
docs/public/assets/logo.svg
Normal file
@@ -0,0 +1,43 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<!-- Created with Inkscape (http://www.inkscape.org/) -->
|
||||
|
||||
<svg
|
||||
width="447.99823"
|
||||
height="447.99823"
|
||||
viewBox="0 0 447.99823 447.99823"
|
||||
version="1.1"
|
||||
id="svg1"
|
||||
xml:space="preserve"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
xmlns:svg="http://www.w3.org/2000/svg"><defs
|
||||
id="defs1" /><g
|
||||
id="layer1"
|
||||
transform="translate(-32.000893,-32.000893)"><circle
|
||||
style="fill:#9b4bd4;fill-opacity:1;stroke:#000000;stroke-width:10;stroke-dasharray:none;stroke-opacity:1"
|
||||
id="path1"
|
||||
cy="256"
|
||||
cx="256"
|
||||
r="176" /><path
|
||||
style="fill:#de6cd3;fill-opacity:1;stroke:#000000;stroke-width:10;stroke-linejoin:round;stroke-dasharray:none;stroke-opacity:1"
|
||||
d="m 41,174 69,36 C 135,126 175,102 226,94 l -12,31 62,-44 -69,-44 15,30 C 128,69 84,109 41,172 Z"
|
||||
id="path7" /><path
|
||||
style="fill:#de6cd3;fill-opacity:1;stroke:#000000;stroke-width:10;stroke-linejoin:round;stroke-dasharray:none;stroke-opacity:1"
|
||||
d="m 338,41 -36,69 c 84,25 108,65 116,116 l -31,-12 44,62 44,-69 -30,15 C 443,128 403,84 340,41 Z"
|
||||
id="path6" /><path
|
||||
style="fill:#de6cd3;fill-opacity:1;stroke:#000000;stroke-width:10;stroke-linejoin:round;stroke-dasharray:none;stroke-opacity:1"
|
||||
d="m 471,338 -69,-36 c -25,84 -65,108 -116,116 l 12,-31 -62,44 69,44 -15,-30 c 94,-2 138,-42 181,-105 z"
|
||||
id="path8" /><path
|
||||
style="fill:#de6cd3;fill-opacity:1;stroke:#000000;stroke-width:10;stroke-linejoin:round;stroke-dasharray:none;stroke-opacity:1"
|
||||
d="m 174,471 36,-69 C 126,377 102,337 94,286 l 31,12 -44,-62 -44,69 30,-15 c 2,94 42,138 105,181 z"
|
||||
id="path9" /><g
|
||||
id="g15"
|
||||
transform="translate(-5.4157688e-4)"><path
|
||||
style="fill:none;stroke:#000000;stroke-width:10;stroke-linecap:round;stroke-linejoin:round;stroke-dasharray:none;stroke-opacity:1;paint-order:normal"
|
||||
d="m 155.45977,224.65379 c -7.25909,13.49567 -7.25909,26.09161 -6.35171,39.58729 0.90737,11.69626 12.7034,24.29222 24.49943,26.09164 21.77727,3.59884 28.12898,-20.69338 28.12898,-20.69338 0,0 4.53693,-15.29508 5.4443,-40.48699"
|
||||
id="path11" /><path
|
||||
style="fill:none;stroke:#000000;stroke-width:10;stroke-linecap:round;stroke-linejoin:round;stroke-dasharray:none;stroke-opacity:1;paint-order:normal"
|
||||
d="m 218.96706,278.05399 c 3.00446,17.12023 7.52704,24.88918 19.22704,28.48918 9,2.7 22.5,-4.5 22.5,-16.2 0.9,21.6 17.1,17.1 19.8,17.1 11.7,-1.8 18.9,-14.4 16.2,-30.6"
|
||||
id="path12" /><path
|
||||
style="fill:none;stroke:#000000;stroke-width:10;stroke-linecap:round;stroke-linejoin:round;stroke-dasharray:none;stroke-opacity:1;paint-order:normal"
|
||||
d="m 305.6941,230.94317 c 1.8,27 6.3,40.5 6.3,40.5 8.1,27 28.8,19.8 28.8,19.8 18.9,-7.2 22.5,-24.3 22.5,-30.6 0,-25.2 -6.3,-35.1 -6.3,-35.1"
|
||||
id="path13" /></g></g></svg>
|
||||
|
After Width: | Height: | Size: 2.8 KiB |
17
docs/reference/_meta.json
Normal file
17
docs/reference/_meta.json
Normal file
@@ -0,0 +1,17 @@
|
||||
[
|
||||
{
|
||||
"type": "file",
|
||||
"name": "config",
|
||||
"label": "Configuration"
|
||||
},
|
||||
{
|
||||
"type": "file",
|
||||
"name": "admin",
|
||||
"label": "Admin Commands"
|
||||
},
|
||||
{
|
||||
"type": "file",
|
||||
"name": "server",
|
||||
"label": "Server command"
|
||||
}
|
||||
]
|
||||
4
docs/reference/config.mdx
Normal file
4
docs/reference/config.mdx
Normal file
@@ -0,0 +1,4 @@
|
||||
|
||||
```toml file="../../conduwuit-example.toml"
|
||||
|
||||
```
|
||||
@@ -1 +0,0 @@
|
||||
{{#include ../SECURITY.md}}
|
||||
1
docs/security.mdx
Symbolic link
1
docs/security.mdx
Symbolic link
@@ -0,0 +1 @@
|
||||
../SECURITY.md
|
||||
13
docs/static/announcements.json
vendored
13
docs/static/announcements.json
vendored
@@ -1,13 +0,0 @@
|
||||
{
|
||||
"$schema": "https://continuwuity.org/schema/announcements.schema.json",
|
||||
"announcements": [
|
||||
{
|
||||
"id": 1,
|
||||
"message": "Welcome to Continuwuity! Important announcements about the project will appear here."
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"message": "_taps microphone_ The Continuwuity 0.5.0-rc.7 release is now available, and it's better than ever! **177 commits**, **35 pull requests**, **11 contributors,** and a lot of new stuff!\n\nFor highlights, we've got:\n\n* 🕵️ Full Policy Server support to fight spam!\n* 🚀 Smarter room & space upgrades.\n* 🚫 User suspension tools for better moderation.\n* 🤖 reCaptcha support for safer open registration.\n* 🔍 Ability to disable read receipts & typing indicators.\n* ⚡ Sweeping performance improvements!\n\nGet the [full changelog and downloads on our Forgejo](https://forgejo.ellis.link/continuwuation/continuwuity/releases/tag/v0.5.0-rc.7) - and make sure you're in the [Announcements room](https://matrix.to/#/!releases:continuwuity.org/$hN9z6L2_dTAlPxFLAoXVfo_g8DyYXu4cpvWsSrWhmB0) to get stuff like this sooner."
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -128,7 +128,7 @@ ### Database corruption
|
||||
With this in mind:
|
||||
|
||||
- First start Continuwuity with the `PointInTime` recovery method. See the [example
|
||||
config](configuration/examples.md) for how to do this using
|
||||
config](./reference/config.mdx) for how to do this using
|
||||
`rocksdb_recovery_mode`
|
||||
- If your database successfully opens, clients are recommended to clear their
|
||||
client cache to account for the rollback
|
||||
@@ -8,7 +8,7 @@ ### Configuration
|
||||
|
||||
Create a configuration file called `coturn.conf` containing:
|
||||
|
||||
```conf
|
||||
```
|
||||
use-auth-secret
|
||||
static-auth-secret=<a secret key>
|
||||
realm=<your server domain>
|
||||
@@ -18,7 +18,7 @@ ### Configuration
|
||||
-s 64 1`.
|
||||
|
||||
These same values need to be set in Continuwuity. See the [example
|
||||
config](configuration/examples.md) in the TURN section for configuring these and
|
||||
config](./reference/config.mdx) in the TURN section for configuring these and
|
||||
restart Continuwuity after.
|
||||
|
||||
`turn_secret` or a path to `turn_secret_file` must have a value of your
|
||||
@@ -15,7 +15,7 @@
|
||||
file = inputs.self + "/rust-toolchain.toml";
|
||||
|
||||
# See also `rust-toolchain.toml`
|
||||
sha256 = "sha256-+9FmLhAOezBZCOziO0Qct1NOrfpjNsXxc/8I0c7BdKE=";
|
||||
sha256 = "sha256-SJwZ8g0zF2WrKDVmHrVG3pD2RGoQeo24MEXnNx5FyuI=";
|
||||
};
|
||||
in
|
||||
{
|
||||
|
||||
@@ -97,6 +97,9 @@ rec {
|
||||
craneLib.buildPackage (
|
||||
(commonAttrs commonAttrsArgs)
|
||||
// {
|
||||
postFixup = ''
|
||||
patchelf --set-rpath "$(${pkgs.patchelf}/bin/patchelf --print-rpath $out/bin/${crateInfo.pname}):${rocksdb}/lib" $out/bin/${crateInfo.pname}
|
||||
'';
|
||||
cargoArtifacts = deps;
|
||||
doCheck = true;
|
||||
env = uwuenv.buildPackageEnv // rocksdbEnv;
|
||||
|
||||
5171
package-lock.json
generated
Normal file
5171
package-lock.json
generated
Normal file
File diff suppressed because it is too large
Load Diff
31
package.json
Normal file
31
package.json
Normal file
@@ -0,0 +1,31 @@
|
||||
{
|
||||
"name": "continuwuation",
|
||||
"version": "1.0.0",
|
||||
"description": "<!-- ANCHOR: catchphrase -->",
|
||||
"main": "index.js",
|
||||
"directories": {
|
||||
"doc": "docs",
|
||||
"test": "tests"
|
||||
},
|
||||
"scripts": {
|
||||
"docs:dev": "rspress dev docs",
|
||||
"docs:build": "rspress build docs",
|
||||
"docs:preview": "rspress preview docs",
|
||||
"test": "echo \"Error: no test specified\" && exit 1"
|
||||
},
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "ssh://git@forgejo.ellis.link/continuwuation/continuwuity.git"
|
||||
},
|
||||
"keywords": [],
|
||||
"author": "",
|
||||
"license": "ISC",
|
||||
"type": "commonjs",
|
||||
"devDependencies": {
|
||||
"@rspress/core": "^2.0.0-rc.1",
|
||||
"@rspress/plugin-client-redirects": "^2.0.0-alpha.12",
|
||||
"@rspress/plugin-preview": "^2.0.0-beta.35",
|
||||
"@rspress/plugin-sitemap": "^2.0.0-beta.23",
|
||||
"typescript": "^5.9.3"
|
||||
}
|
||||
}
|
||||
@@ -63,7 +63,7 @@ Restart=on-failure
|
||||
RestartSec=5
|
||||
|
||||
TimeoutStopSec=4m
|
||||
TimeoutStartSec=4m
|
||||
TimeoutStartSec=10m
|
||||
|
||||
StartLimitInterval=1m
|
||||
StartLimitBurst=5
|
||||
|
||||
@@ -22,7 +22,7 @@ # Update remote package lists
|
||||
```
|
||||
The `continuwuity` package conflicts with the old `conduwuit` package and will remove it automatically when installed.
|
||||
|
||||
See the [generic deployment guide](../deploying/generic.md) for additional information about using the Debian package.
|
||||
See the [generic deployment guide](/deploying/generic.mdx) for additional information about using the Debian package.
|
||||
|
||||
### Configuration
|
||||
|
||||
@@ -32,8 +32,8 @@ ### Configuration
|
||||
|
||||
### Running
|
||||
|
||||
The package uses the [`conduwuit.service`](../configuration/examples.md#example-systemd-unit-file) systemd unit file to start and stop Continuwuity. The binary installs at `/usr/bin/conduwuit`.
|
||||
The package uses the `conduwuit.service` systemd unit file to start and stop Continuwuity. The binary installs at `/usr/bin/conduwuit`.
|
||||
|
||||
By default, this package assumes that Continuwuity runs behind a reverse proxy. The default configuration options apply (listening on `localhost` and TCP port `6167`). Matrix federation requires a valid domain name and TLS. To federate properly, you must set up TLS certificates and certificate renewal.
|
||||
|
||||
For information about setting up a reverse proxy and TLS, consult online documentation and guides. The [generic deployment guide](../deploying/generic.md#setting-up-the-reverse-proxy) documents Caddy, which is the most user-friendly option for reverse proxy configuration.
|
||||
For information about setting up a reverse proxy and TLS, consult online documentation and guides. The [generic deployment guide](/deploying/generic.md#setting-up-the-reverse-proxy) documents Caddy, which is the most user-friendly option for reverse proxy configuration.
|
||||
|
||||
60
rspress.config.ts
Normal file
60
rspress.config.ts
Normal file
@@ -0,0 +1,60 @@
|
||||
import { defineConfig } from '@rspress/core';
|
||||
import { pluginPreview } from '@rspress/plugin-preview';
|
||||
import { pluginSitemap } from '@rspress/plugin-sitemap';
|
||||
import { pluginClientRedirects } from '@rspress/plugin-client-redirects';
|
||||
|
||||
export default defineConfig({
|
||||
root: 'docs',
|
||||
title: 'Continuwuity',
|
||||
description: 'A community-driven Matrix homeserver',
|
||||
icon: '/assets/logo.svg',
|
||||
logo: {
|
||||
light: '/assets/logo.svg',
|
||||
dark: '/assets/logo.svg',
|
||||
},
|
||||
themeConfig: {
|
||||
socialLinks: [
|
||||
{
|
||||
icon: {
|
||||
svg: `<svg role="img" viewBox="0 0 24 24" width="100%" xmlns="http://www.w3.org/2000/svg"><title>Matrix</title><path fill="currentColor" d="M.632.55v22.9H2.28V24H0V0h2.28v.55zm7.043 7.26v1.157h.033c.309-.443.683-.784 1.117-1.024.433-.245.936-.365 1.5-.365.54 0 1.033.107 1.481.314.448.208.785.582 1.02 1.108.254-.374.6-.706 1.034-.992.434-.287.95-.43 1.546-.43.453 0 .872.056 1.26.167.388.11.716.286.993.53.276.245.489.559.646.951.152.392.23.863.23 1.417v5.728h-2.349V11.52c0-.286-.01-.559-.032-.812a1.755 1.755 0 0 0-.18-.66 1.106 1.106 0 0 0-.438-.448c-.194-.11-.457-.166-.785-.166-.332 0-.6.064-.803.189a1.38 1.38 0 0 0-.48.499 1.946 1.946 0 0 0-.231.696 5.56 5.56 0 0 0-.06.785v4.768h-2.35v-4.8c0-.254-.004-.503-.018-.752a2.074 2.074 0 0 0-.143-.688 1.052 1.052 0 0 0-.415-.503c-.194-.125-.476-.19-.854-.19-.111 0-.259.024-.439.074-.18.051-.36.143-.53.282-.171.138-.319.337-.439.595-.12.259-.18.6-.18 1.02v4.966H5.46V7.81zm15.693 15.64V.55H21.72V0H24v24h-2.28v-.55z"/></svg>`
|
||||
},
|
||||
mode: 'link',
|
||||
content: 'https://matrix.to/#/#continuwuity:continuwuity.org',
|
||||
},
|
||||
{
|
||||
icon: {
|
||||
svg: `<svg role="img" viewBox="0 0 24 24" xmlns="http://www.w3.org/2000/svg"><title>Forgejo</title><path fill="currentColor" d="M16.7773 0c1.6018 0 2.9004 1.2986 2.9004 2.9005s-1.2986 2.9004-2.9004 2.9004c-1.0854 0-2.0315-.596-2.5288-1.4787H12.91c-2.3322 0-4.2272 1.8718-4.2649 4.195l-.0007 2.1175a7.0759 7.0759 0 0 1 4.148-1.4205l.1176-.001 1.3385.0002c.4973-.8827 1.4434-1.4788 2.5288-1.4788 1.6018 0 2.9004 1.2986 2.9004 2.9005s-1.2986 2.9004-2.9004 2.9004c-1.0854 0-2.0315-.596-2.5288-1.4787H12.91c-2.3322 0-4.2272 1.8718-4.2649 4.195l-.0007 2.319c.8827.4973 1.4788 1.4434 1.4788 2.5287 0 1.602-1.2986 2.9005-2.9005 2.9005-1.6018 0-2.9004-1.2986-2.9004-2.9005 0-1.0853.596-2.0314 1.4788-2.5287l-.0002-9.9831c0-3.887 3.1195-7.0453 6.9915-7.108l.1176-.001h1.3385C14.7458.5962 15.692 0 16.7773 0ZM7.2227 19.9052c-.6596 0-1.1943.5347-1.1943 1.1943s.5347 1.1943 1.1943 1.1943 1.1944-.5347 1.1944-1.1943-.5348-1.1943-1.1944-1.1943Zm9.5546-10.4644c-.6596 0-1.1944.5347-1.1944 1.1943s.5348 1.1943 1.1944 1.1943c.6596 0 1.1943-.5347 1.1943-1.1943s-.5347-1.1943-1.1943-1.1943Zm0-7.7346c-.6596 0-1.1944.5347-1.1944 1.1943s.5348 1.1943 1.1944 1.1943c.6596 0 1.1943-.5347 1.1943-1.1943s-.5347-1.1943-1.1943-1.1943Z"/></svg>`
|
||||
},
|
||||
mode: 'link',
|
||||
content: 'https://forgejo.ellis.link/continuwuation/continuwuity'
|
||||
},
|
||||
{
|
||||
icon: 'github',
|
||||
mode: 'link',
|
||||
content: 'https://github.com/continuwuity/continuwuity',
|
||||
},
|
||||
],
|
||||
lastUpdated: true,
|
||||
enableContentAnimation: true,
|
||||
enableAppearanceAnimation: false,
|
||||
footer: {
|
||||
},
|
||||
},
|
||||
|
||||
plugins: [pluginPreview(), pluginSitemap({
|
||||
siteUrl: 'https://continuwuity.org', // TODO: Set automatically in build pipeline
|
||||
}),
|
||||
pluginClientRedirects({
|
||||
redirects: [{
|
||||
from: '/configuration/examples',
|
||||
to: '/reference/config'
|
||||
}, {
|
||||
from: '/admin_reference',
|
||||
to: '/reference/admin'
|
||||
}, {
|
||||
from: '/server_reference',
|
||||
to: '/reference/server'
|
||||
}
|
||||
]
|
||||
})],
|
||||
});
|
||||
@@ -10,7 +10,7 @@
|
||||
|
||||
[toolchain]
|
||||
profile = "minimal"
|
||||
channel = "1.89.0"
|
||||
channel = "1.90.0"
|
||||
components = [
|
||||
# For rust-analyzer
|
||||
"rust-src",
|
||||
|
||||
@@ -41,7 +41,7 @@ async fn changes_since(
|
||||
let results: Vec<_> = self
|
||||
.services
|
||||
.account_data
|
||||
.changes_since(room_id.as_deref(), &user_id, since, None)
|
||||
.changes_since(room_id.as_deref(), &user_id, Some(since), None)
|
||||
.collect()
|
||||
.await;
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
use std::{collections::BTreeMap, fmt::Write as _};
|
||||
use std::{
|
||||
collections::{BTreeMap, HashSet},
|
||||
fmt::Write as _,
|
||||
};
|
||||
|
||||
use api::client::{
|
||||
full_user_deactivate, join_room_by_id_helper, leave_all_rooms, leave_room, remote_leave_room,
|
||||
@@ -12,7 +15,7 @@
|
||||
};
|
||||
use futures::{FutureExt, StreamExt};
|
||||
use ruma::{
|
||||
OwnedEventId, OwnedRoomId, OwnedRoomOrAliasId, OwnedUserId, UserId,
|
||||
OwnedEventId, OwnedRoomId, OwnedRoomOrAliasId, OwnedServerName, OwnedUserId, UserId,
|
||||
events::{
|
||||
RoomAccountDataEventType, StateEventType,
|
||||
room::{
|
||||
@@ -950,23 +953,38 @@ pub(super) async fn force_leave_remote_room(
|
||||
&self,
|
||||
user_id: String,
|
||||
room_id: OwnedRoomOrAliasId,
|
||||
via: Option<String>,
|
||||
) -> Result {
|
||||
let user_id = parse_local_user_id(self.services, &user_id)?;
|
||||
let (room_id, _) = self
|
||||
let (room_id, vias_raw) = self
|
||||
.services
|
||||
.rooms
|
||||
.alias
|
||||
.resolve_with_servers(&room_id, None)
|
||||
.resolve_with_servers(
|
||||
&room_id,
|
||||
if let Some(v) = via.clone() {
|
||||
Some(vec![OwnedServerName::parse(v)?])
|
||||
} else {
|
||||
None
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
assert!(
|
||||
self.services.globals.user_is_local(&user_id),
|
||||
"Parsed user_id must be a local user"
|
||||
);
|
||||
remote_leave_room(self.services, &user_id, &room_id, None)
|
||||
let mut vias: HashSet<OwnedServerName> = HashSet::new();
|
||||
if let Some(via) = via {
|
||||
vias.insert(OwnedServerName::parse(via)?);
|
||||
}
|
||||
for server in vias_raw {
|
||||
vias.insert(server);
|
||||
}
|
||||
remote_leave_room(self.services, &user_id, &room_id, None, vias)
|
||||
.boxed()
|
||||
.await?;
|
||||
|
||||
self.write_str(&format!("{user_id} has been joined to {room_id}.",))
|
||||
self.write_str(&format!("{user_id} successfully left {room_id} via remote server."))
|
||||
.await
|
||||
}
|
||||
|
||||
@@ -107,6 +107,7 @@ pub enum UserCommand {
|
||||
ForceLeaveRemoteRoom {
|
||||
user_id: String,
|
||||
room_id: OwnedRoomOrAliasId,
|
||||
via: Option<String>,
|
||||
},
|
||||
|
||||
/// - Forces the specified user to drop their power levels to the room
|
||||
|
||||
@@ -389,7 +389,7 @@ pub(crate) async fn get_key_changes_route(
|
||||
device_list_updates.extend(
|
||||
services
|
||||
.users
|
||||
.keys_changed(sender_user, from, Some(to))
|
||||
.keys_changed(sender_user, Some(from), Some(to))
|
||||
.map(ToOwned::to_owned)
|
||||
.collect::<Vec<_>>()
|
||||
.await,
|
||||
@@ -401,7 +401,7 @@ pub(crate) async fn get_key_changes_route(
|
||||
device_list_updates.extend(
|
||||
services
|
||||
.users
|
||||
.room_keys_changed(room_id, from, Some(to))
|
||||
.room_keys_changed(room_id, Some(from), Some(to))
|
||||
.map(|(user_id, _)| user_id)
|
||||
.map(ToOwned::to_owned)
|
||||
.collect::<Vec<_>>()
|
||||
|
||||
@@ -44,6 +44,7 @@
|
||||
rooms::{
|
||||
state::RoomMutexGuard,
|
||||
state_compressor::{CompressedState, HashSetCompressStateEvent},
|
||||
timeline::pdu_fits,
|
||||
},
|
||||
};
|
||||
|
||||
@@ -573,6 +574,13 @@ async fn join_room_by_id_helper_remote(
|
||||
return state;
|
||||
},
|
||||
};
|
||||
if !pdu_fits(&mut value.clone()) {
|
||||
warn!(
|
||||
"dropping incoming PDU {event_id} in room {room_id} from room join because \
|
||||
it exceeds 65535 bytes or is otherwise too large."
|
||||
);
|
||||
return state;
|
||||
}
|
||||
services.rooms.outlier.add_pdu_outlier(&event_id, &value);
|
||||
if let Some(state_key) = &pdu.state_key {
|
||||
let shortstatekey = services
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
use conduwuit::{
|
||||
Err, Result, debug, debug_info, debug_warn, err, info,
|
||||
matrix::{
|
||||
event::{Event, gen_event_id},
|
||||
event::gen_event_id,
|
||||
pdu::{PduBuilder, PduEvent},
|
||||
},
|
||||
result::FlatOk,
|
||||
@@ -458,7 +458,7 @@ async fn knock_room_helper_local(
|
||||
.await,
|
||||
};
|
||||
|
||||
let send_knock_response = services
|
||||
services
|
||||
.sending
|
||||
.send_federation_request(&remote_server, send_knock_request)
|
||||
.await?;
|
||||
@@ -477,20 +477,14 @@ async fn knock_room_helper_local(
|
||||
.map_err(|e| err!(BadServerResponse("Invalid knock event PDU: {e:?}")))?;
|
||||
|
||||
info!("Updating membership locally to knock state with provided stripped state events");
|
||||
// TODO: this call does not appear to do anything because `update_membership`
|
||||
// doesn't call `mark_as_knock`. investigate further, ideally with the aim of
|
||||
// removing this call entirely -- Ginger thinks `update_membership` should only
|
||||
// be called from `force_state` and `append_pdu`.
|
||||
services
|
||||
.rooms
|
||||
.state_cache
|
||||
.update_membership(
|
||||
room_id,
|
||||
sender_user,
|
||||
parsed_knock_pdu
|
||||
.get_content::<RoomMemberEventContent>()
|
||||
.expect("we just created this"),
|
||||
sender_user,
|
||||
Some(send_knock_response.knock_room_state),
|
||||
None,
|
||||
false,
|
||||
)
|
||||
.update_membership(room_id, sender_user, &parsed_knock_pdu, false)
|
||||
.await?;
|
||||
|
||||
info!("Appending room knock event locally");
|
||||
@@ -677,20 +671,11 @@ async fn knock_room_helper_remote(
|
||||
.await?;
|
||||
|
||||
info!("Updating membership locally to knock state with provided stripped state events");
|
||||
// TODO: see TODO on the other call to `update_membership`
|
||||
services
|
||||
.rooms
|
||||
.state_cache
|
||||
.update_membership(
|
||||
room_id,
|
||||
sender_user,
|
||||
parsed_knock_pdu
|
||||
.get_content::<RoomMemberEventContent>()
|
||||
.expect("we just created this"),
|
||||
sender_user,
|
||||
Some(send_knock_response.knock_room_state),
|
||||
None,
|
||||
false,
|
||||
)
|
||||
.update_membership(room_id, sender_user, &parsed_knock_pdu, false)
|
||||
.await?;
|
||||
|
||||
info!("Appending room knock event locally");
|
||||
|
||||
@@ -2,12 +2,12 @@
|
||||
|
||||
use axum::extract::State;
|
||||
use conduwuit::{
|
||||
Err, Result, debug_info, debug_warn, err,
|
||||
Err, Pdu, Result, debug_info, debug_warn, err,
|
||||
matrix::{event::gen_event_id, pdu::PduBuilder},
|
||||
utils::{self, FutureBoolExt, future::ReadyEqExt},
|
||||
warn,
|
||||
};
|
||||
use futures::{FutureExt, StreamExt, TryFutureExt, pin_mut};
|
||||
use futures::{FutureExt, StreamExt, pin_mut};
|
||||
use ruma::{
|
||||
CanonicalJsonObject, CanonicalJsonValue, OwnedServerName, RoomId, RoomVersionId, UserId,
|
||||
api::{
|
||||
@@ -81,42 +81,9 @@ pub async fn leave_room(
|
||||
room_id: &RoomId,
|
||||
reason: Option<String>,
|
||||
) -> Result {
|
||||
let default_member_content = RoomMemberEventContent {
|
||||
membership: MembershipState::Leave,
|
||||
reason: reason.clone(),
|
||||
join_authorized_via_users_server: None,
|
||||
is_direct: None,
|
||||
avatar_url: None,
|
||||
displayname: None,
|
||||
third_party_invite: None,
|
||||
blurhash: None,
|
||||
redact_events: None,
|
||||
};
|
||||
|
||||
let is_banned = services.rooms.metadata.is_banned(room_id);
|
||||
let is_disabled = services.rooms.metadata.is_disabled(room_id);
|
||||
|
||||
pin_mut!(is_banned, is_disabled);
|
||||
if is_banned.or(is_disabled).await {
|
||||
// the room is banned/disabled, the room must be rejected locally since we
|
||||
// cant/dont want to federate with this server
|
||||
services
|
||||
.rooms
|
||||
.state_cache
|
||||
.update_membership(
|
||||
room_id,
|
||||
user_id,
|
||||
default_member_content,
|
||||
user_id,
|
||||
None,
|
||||
None,
|
||||
true,
|
||||
)
|
||||
.await?;
|
||||
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let dont_have_room = services
|
||||
.rooms
|
||||
.state_cache
|
||||
@@ -129,43 +96,41 @@ pub async fn leave_room(
|
||||
.is_knocked(user_id, room_id)
|
||||
.eq(&false);
|
||||
|
||||
// Ask a remote server if we don't have this room and are not knocking on it
|
||||
if dont_have_room.and(not_knocked).await {
|
||||
if let Err(e) = remote_leave_room(services, user_id, room_id, reason.clone())
|
||||
.boxed()
|
||||
.await
|
||||
{
|
||||
warn!(%user_id, "Failed to leave room {room_id} remotely: {e}");
|
||||
// Don't tell the client about this error
|
||||
}
|
||||
pin_mut!(is_banned, is_disabled);
|
||||
|
||||
let last_state = services
|
||||
.rooms
|
||||
.state_cache
|
||||
.invite_state(user_id, room_id)
|
||||
.or_else(|_| services.rooms.state_cache.knock_state(user_id, room_id))
|
||||
.or_else(|_| services.rooms.state_cache.left_state(user_id, room_id))
|
||||
.await
|
||||
.ok();
|
||||
/*
|
||||
there are three possible cases when leaving a room:
|
||||
1. the room is banned or disabled, so we're not federating with it.
|
||||
2. nobody on the homeserver is in the room, which can happen if the user is rejecting an invite
|
||||
to a room that we don't have any members in.
|
||||
3. someone else on the homeserver is in the room. in this case we can leave like normal by sending a PDU over federation.
|
||||
|
||||
// We always drop the invite, we can't rely on other servers
|
||||
services
|
||||
.rooms
|
||||
.state_cache
|
||||
.update_membership(
|
||||
room_id,
|
||||
user_id,
|
||||
default_member_content,
|
||||
user_id,
|
||||
last_state,
|
||||
None,
|
||||
true,
|
||||
)
|
||||
.await?;
|
||||
in cases 1 and 2, we have to update the state cache using `mark_as_left` directly.
|
||||
otherwise `build_and_append_pdu` will take care of updating the state cache for us.
|
||||
*/
|
||||
|
||||
// `leave_pdu` is the outlier `m.room.member` event which will be synced to the
|
||||
// user. if it's None the sync handler will create a dummy PDU.
|
||||
let leave_pdu = if is_banned.or(is_disabled).await {
|
||||
// case 1: the room is banned/disabled. we don't want to federate with another
|
||||
// server to leave, so we can't create an outlier PDU.
|
||||
None
|
||||
} else if dont_have_room.and(not_knocked).await {
|
||||
// case 2: ask a remote server to assist us with leaving
|
||||
// we always mark the room as left locally, regardless of if the federated leave
|
||||
// failed
|
||||
|
||||
remote_leave_room(services, user_id, room_id, reason.clone(), HashSet::new())
|
||||
.await
|
||||
.inspect_err(|err| {
|
||||
warn!(%user_id, "Failed to leave room {room_id} remotely: {err}");
|
||||
})
|
||||
.ok()
|
||||
} else {
|
||||
// case 3: we can leave by sending a PDU.
|
||||
let state_lock = services.rooms.state.mutex.lock(room_id).await;
|
||||
|
||||
let Ok(event) = services
|
||||
let user_member_event_content = services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.room_state_get_content::<RoomMemberEventContent>(
|
||||
@@ -173,64 +138,84 @@ pub async fn leave_room(
|
||||
&StateEventType::RoomMember,
|
||||
user_id.as_str(),
|
||||
)
|
||||
.await
|
||||
else {
|
||||
debug_warn!(
|
||||
"Trying to leave a room you are not a member of, marking room as left locally."
|
||||
);
|
||||
.await;
|
||||
|
||||
return services
|
||||
.rooms
|
||||
.state_cache
|
||||
.update_membership(
|
||||
room_id,
|
||||
user_id,
|
||||
default_member_content,
|
||||
user_id,
|
||||
None,
|
||||
None,
|
||||
true,
|
||||
)
|
||||
.await;
|
||||
};
|
||||
match user_member_event_content {
|
||||
| Ok(content) => {
|
||||
services
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder::state(user_id.to_string(), &RoomMemberEventContent {
|
||||
membership: MembershipState::Leave,
|
||||
reason,
|
||||
join_authorized_via_users_server: None,
|
||||
is_direct: None,
|
||||
..content
|
||||
}),
|
||||
user_id,
|
||||
Some(room_id),
|
||||
&state_lock,
|
||||
)
|
||||
.await?;
|
||||
|
||||
services
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder::state(user_id.to_string(), &RoomMemberEventContent {
|
||||
membership: MembershipState::Leave,
|
||||
reason,
|
||||
join_authorized_via_users_server: None,
|
||||
is_direct: None,
|
||||
..event
|
||||
}),
|
||||
user_id,
|
||||
Some(room_id),
|
||||
&state_lock,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
// `build_and_append_pdu` calls `mark_as_left` internally, so we return early.
|
||||
return Ok(());
|
||||
},
|
||||
| Err(_) => {
|
||||
// an exception to case 3 is if the user isn't even in the room they're trying
|
||||
// to leave. this can happen if the client's caching is wrong.
|
||||
debug_warn!(
|
||||
"Trying to leave a room you are not a member of, marking room as left \
|
||||
locally."
|
||||
);
|
||||
|
||||
// return the existing leave state, if one exists. `mark_as_left` will then
|
||||
// update the `roomuserid_leftcount` table, making the leave come down sync
|
||||
// again.
|
||||
services
|
||||
.rooms
|
||||
.state_cache
|
||||
.left_state(user_id, room_id)
|
||||
.await?
|
||||
},
|
||||
}
|
||||
};
|
||||
|
||||
services
|
||||
.rooms
|
||||
.state_cache
|
||||
.mark_as_left(user_id, room_id, leave_pdu)
|
||||
.await;
|
||||
|
||||
services
|
||||
.rooms
|
||||
.state_cache
|
||||
.update_joined_count(room_id)
|
||||
.await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn remote_leave_room(
|
||||
pub async fn remote_leave_room<S: ::std::hash::BuildHasher>(
|
||||
services: &Services,
|
||||
user_id: &UserId,
|
||||
room_id: &RoomId,
|
||||
reason: Option<String>,
|
||||
) -> Result<()> {
|
||||
mut servers: HashSet<OwnedServerName, S>,
|
||||
) -> Result<Pdu> {
|
||||
let mut make_leave_response_and_server =
|
||||
Err!(BadServerResponse("No remote server available to assist in leaving {room_id}."));
|
||||
|
||||
let mut servers: HashSet<OwnedServerName> = services
|
||||
.rooms
|
||||
.state_cache
|
||||
.servers_invite_via(room_id)
|
||||
.map(ToOwned::to_owned)
|
||||
.collect()
|
||||
.await;
|
||||
servers.extend(
|
||||
services
|
||||
.rooms
|
||||
.state_cache
|
||||
.servers_invite_via(room_id)
|
||||
.map(ToOwned::to_owned)
|
||||
.collect::<HashSet<OwnedServerName>>()
|
||||
.await,
|
||||
);
|
||||
|
||||
match services
|
||||
.rooms
|
||||
@@ -277,6 +262,11 @@ pub async fn remote_leave_room(
|
||||
if let Some(room_id_server_name) = room_id.server_name() {
|
||||
servers.insert(room_id_server_name.to_owned());
|
||||
}
|
||||
if servers.is_empty() {
|
||||
return Err!(BadServerResponse(warn!(
|
||||
"No remote servers found to assist in leaving {room_id}."
|
||||
)));
|
||||
}
|
||||
|
||||
debug_info!("servers in remote_leave_room: {servers:?}");
|
||||
|
||||
@@ -284,7 +274,7 @@ pub async fn remote_leave_room(
|
||||
let make_leave_response = services
|
||||
.sending
|
||||
.send_federation_request(
|
||||
&remote_server,
|
||||
remote_server.as_ref(),
|
||||
federation::membership::prepare_leave_event::v1::Request {
|
||||
room_id: room_id.to_owned(),
|
||||
user_id: user_id.to_owned(),
|
||||
@@ -292,11 +282,21 @@ pub async fn remote_leave_room(
|
||||
)
|
||||
.await;
|
||||
|
||||
make_leave_response_and_server = make_leave_response.map(|r| (r, remote_server));
|
||||
let error = make_leave_response.as_ref().err().map(ToString::to_string);
|
||||
make_leave_response_and_server = make_leave_response.map(|r| (r, remote_server.clone()));
|
||||
|
||||
if make_leave_response_and_server.is_ok() {
|
||||
debug_info!(
|
||||
"Received make_leave_response from {} for leaving {room_id}",
|
||||
remote_server
|
||||
);
|
||||
break;
|
||||
}
|
||||
debug_warn!(
|
||||
"Failed to get make_leave_response from {} for leaving {room_id}: {}",
|
||||
remote_server,
|
||||
error.unwrap()
|
||||
);
|
||||
}
|
||||
|
||||
let (make_leave_response, remote_server) = make_leave_response_and_server?;
|
||||
@@ -304,13 +304,14 @@ pub async fn remote_leave_room(
|
||||
let Some(room_version_id) = make_leave_response.room_version else {
|
||||
return Err!(BadServerResponse(warn!(
|
||||
"No room version was returned by {remote_server} for {room_id}, room version is \
|
||||
likely not supported by conduwuit"
|
||||
likely not supported by continuwuity"
|
||||
)));
|
||||
};
|
||||
|
||||
if !services.server.supported_room_version(&room_version_id) {
|
||||
return Err!(BadServerResponse(warn!(
|
||||
"Remote room version {room_version_id} for {room_id} is not supported by conduwuit",
|
||||
"Remote room version {room_version_id} for {room_id} is not supported by \
|
||||
continuwuity",
|
||||
)));
|
||||
}
|
||||
|
||||
@@ -373,7 +374,7 @@ pub async fn remote_leave_room(
|
||||
&remote_server,
|
||||
federation::membership::create_leave_event::v2::Request {
|
||||
room_id: room_id.to_owned(),
|
||||
event_id,
|
||||
event_id: event_id.clone(),
|
||||
pdu: services
|
||||
.sending
|
||||
.convert_to_outgoing_federation_event(leave_event.clone())
|
||||
@@ -382,5 +383,14 @@ pub async fn remote_leave_room(
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
services
|
||||
.rooms
|
||||
.outlier
|
||||
.add_pdu_outlier(&event_id, &leave_event);
|
||||
|
||||
let leave_pdu = Pdu::from_id_val(&event_id, leave_event).map_err(|e| {
|
||||
err!(BadServerResponse("Invalid leave PDU received during federated leave: {e:?}"))
|
||||
})?;
|
||||
|
||||
Ok(leave_pdu)
|
||||
}
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
use axum::extract::State;
|
||||
use axum_client_ip::InsecureClientIp;
|
||||
use conduwuit::{
|
||||
Err, Result, at,
|
||||
matrix::{
|
||||
@@ -16,7 +17,7 @@
|
||||
Services,
|
||||
rooms::{
|
||||
lazy_loading,
|
||||
lazy_loading::{Options, Witness},
|
||||
lazy_loading::{MemberSet, Options},
|
||||
timeline::PdusIterItem,
|
||||
},
|
||||
};
|
||||
@@ -70,6 +71,7 @@
|
||||
/// where the user was joined, depending on `history_visibility`)
|
||||
pub(crate) async fn get_message_events_route(
|
||||
State(services): State<crate::State>,
|
||||
InsecureClientIp(client_ip): InsecureClientIp,
|
||||
body: Ruma<get_message_events::v3::Request>,
|
||||
) -> Result<get_message_events::v3::Response> {
|
||||
debug_assert!(IGNORED_MESSAGE_TYPES.is_sorted(), "IGNORED_MESSAGE_TYPES is not sorted");
|
||||
@@ -78,6 +80,11 @@ pub(crate) async fn get_message_events_route(
|
||||
let room_id = &body.room_id;
|
||||
let filter = &body.filter;
|
||||
|
||||
services
|
||||
.users
|
||||
.update_device_last_seen(sender_user, sender_device, client_ip)
|
||||
.await;
|
||||
|
||||
if !services.rooms.metadata.exists(room_id).await {
|
||||
return Err!(Request(Forbidden("Room does not exist to this server")));
|
||||
}
|
||||
@@ -162,7 +169,7 @@ pub(crate) async fn get_message_events_route(
|
||||
|
||||
let state = witness
|
||||
.map(Option::into_iter)
|
||||
.map(|option| option.flat_map(Witness::into_iter))
|
||||
.map(|option| option.flat_map(MemberSet::into_iter))
|
||||
.map(IterStream::stream)
|
||||
.into_stream()
|
||||
.flatten()
|
||||
@@ -192,7 +199,7 @@ pub(crate) async fn lazy_loading_witness<'a, I>(
|
||||
services: &Services,
|
||||
lazy_loading_context: &lazy_loading::Context<'_>,
|
||||
events: I,
|
||||
) -> Witness
|
||||
) -> MemberSet
|
||||
where
|
||||
I: Iterator<Item = &'a PdusIterItem> + Clone + Send,
|
||||
{
|
||||
@@ -213,10 +220,10 @@ pub(crate) async fn lazy_loading_witness<'a, I>(
|
||||
let receipts = services
|
||||
.rooms
|
||||
.read_receipt
|
||||
.readreceipts_since(lazy_loading_context.room_id, oldest.into_unsigned());
|
||||
.readreceipts_since(lazy_loading_context.room_id, Some(oldest.into_unsigned()));
|
||||
|
||||
pin_mut!(receipts);
|
||||
let witness: Witness = events
|
||||
let witness: MemberSet = events
|
||||
.stream()
|
||||
.map(ref_at!(1))
|
||||
.map(Event::sender)
|
||||
@@ -224,7 +231,7 @@ pub(crate) async fn lazy_loading_witness<'a, I>(
|
||||
.chain(
|
||||
receipts
|
||||
.ready_take_while(|(_, c, _)| *c <= newest.into_unsigned())
|
||||
.map(|(user_id, ..)| user_id.to_owned()),
|
||||
.map(|(user_id, ..)| user_id),
|
||||
)
|
||||
.collect()
|
||||
.await;
|
||||
@@ -232,7 +239,7 @@ pub(crate) async fn lazy_loading_witness<'a, I>(
|
||||
services
|
||||
.rooms
|
||||
.lazy_loading
|
||||
.witness_retain(witness, lazy_loading_context)
|
||||
.retain_lazy_members(witness, lazy_loading_context)
|
||||
.await
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
use std::collections::BTreeMap;
|
||||
|
||||
use axum::extract::State;
|
||||
use axum_client_ip::InsecureClientIp;
|
||||
use conduwuit::{Err, PduCount, Result, err};
|
||||
use ruma::{
|
||||
MilliSecondsSinceUnixEpoch,
|
||||
@@ -118,9 +119,14 @@ pub(crate) async fn set_read_marker_route(
|
||||
/// Sets private read marker and public read receipt EDU.
|
||||
pub(crate) async fn create_receipt_route(
|
||||
State(services): State<crate::State>,
|
||||
InsecureClientIp(client_ip): InsecureClientIp,
|
||||
body: Ruma<create_receipt::v3::Request>,
|
||||
) -> Result<create_receipt::v3::Response> {
|
||||
let sender_user = body.sender_user();
|
||||
services
|
||||
.users
|
||||
.update_device_last_seen(sender_user, body.sender_device.as_deref(), client_ip)
|
||||
.await;
|
||||
|
||||
if matches!(
|
||||
&body.receipt_type,
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
use axum::extract::State;
|
||||
use axum_client_ip::InsecureClientIp;
|
||||
use conduwuit::{Err, Result, matrix::pdu::PduBuilder};
|
||||
use ruma::{
|
||||
api::client::redact::redact_event, events::room::redaction::RoomRedactionEventContent,
|
||||
@@ -13,9 +14,14 @@
|
||||
/// - TODO: Handle txn id
|
||||
pub(crate) async fn redact_event_route(
|
||||
State(services): State<crate::State>,
|
||||
InsecureClientIp(client_ip): InsecureClientIp,
|
||||
body: Ruma<redact_event::v3::Request>,
|
||||
) -> Result<redact_event::v3::Response> {
|
||||
let sender_user = body.sender_user();
|
||||
services
|
||||
.users
|
||||
.update_device_last_seen(sender_user, body.sender_device.as_deref(), client_ip)
|
||||
.await;
|
||||
let body = &body.body;
|
||||
if services.users.is_suspended(sender_user).await? {
|
||||
// TODO: Users can redact their own messages while suspended
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
use std::collections::BTreeMap;
|
||||
|
||||
use axum::extract::State;
|
||||
use axum_client_ip::InsecureClientIp;
|
||||
use conduwuit::{Err, Result, err, matrix::pdu::PduBuilder, utils};
|
||||
use ruma::{api::client::message::send_message_event, events::MessageLikeEventType};
|
||||
use serde_json::from_str;
|
||||
@@ -18,6 +19,7 @@
|
||||
/// allowed
|
||||
pub(crate) async fn send_message_event_route(
|
||||
State(services): State<crate::State>,
|
||||
InsecureClientIp(client_ip): InsecureClientIp,
|
||||
body: Ruma<send_message_event::v3::Request>,
|
||||
) -> Result<send_message_event::v3::Response> {
|
||||
let sender_user = body.sender_user();
|
||||
@@ -27,6 +29,11 @@ pub(crate) async fn send_message_event_route(
|
||||
return Err!(Request(UserSuspended("You cannot perform this action while suspended.")));
|
||||
}
|
||||
|
||||
services
|
||||
.users
|
||||
.update_device_last_seen(sender_user, body.sender_device.as_deref(), client_ip)
|
||||
.await;
|
||||
|
||||
// Forbid m.room.encrypted if encryption is disabled
|
||||
if MessageLikeEventType::RoomEncrypted == body.event_type && !services.config.allow_encryption
|
||||
{
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
use axum::extract::State;
|
||||
use axum_client_ip::InsecureClientIp;
|
||||
use conduwuit::{
|
||||
Err, Result, err,
|
||||
matrix::{Event, pdu::PduBuilder},
|
||||
@@ -7,7 +8,7 @@
|
||||
use conduwuit_service::Services;
|
||||
use futures::{FutureExt, TryStreamExt};
|
||||
use ruma::{
|
||||
OwnedEventId, RoomId, UserId,
|
||||
MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, UserId,
|
||||
api::client::state::{get_state_events, get_state_events_for_key, send_state_event},
|
||||
events::{
|
||||
AnyStateEventContent, StateEventType,
|
||||
@@ -30,9 +31,14 @@
|
||||
/// Sends a state event into the room.
|
||||
pub(crate) async fn send_state_event_for_key_route(
|
||||
State(services): State<crate::State>,
|
||||
InsecureClientIp(ip): InsecureClientIp,
|
||||
body: Ruma<send_state_event::v3::Request>,
|
||||
) -> Result<send_state_event::v3::Response> {
|
||||
let sender_user = body.sender_user();
|
||||
services
|
||||
.users
|
||||
.update_device_last_seen(sender_user, body.sender_device.as_deref(), ip)
|
||||
.await;
|
||||
|
||||
if services.users.is_suspended(sender_user).await? {
|
||||
return Err!(Request(UserSuspended("You cannot perform this action while suspended.")));
|
||||
@@ -61,9 +67,10 @@ pub(crate) async fn send_state_event_for_key_route(
|
||||
/// Sends a state event into the room.
|
||||
pub(crate) async fn send_state_event_for_empty_key_route(
|
||||
State(services): State<crate::State>,
|
||||
InsecureClientIp(ip): InsecureClientIp,
|
||||
body: Ruma<send_state_event::v3::Request>,
|
||||
) -> Result<RumaResponse<send_state_event::v3::Response>> {
|
||||
send_state_event_for_key_route(State(services), body)
|
||||
send_state_event_for_key_route(State(services), InsecureClientIp(ip), body)
|
||||
.boxed()
|
||||
.await
|
||||
.map(RumaResponse)
|
||||
@@ -185,7 +192,7 @@ async fn send_state_event_for_key_helper(
|
||||
event_type: &StateEventType,
|
||||
json: &Raw<AnyStateEventContent>,
|
||||
state_key: &str,
|
||||
timestamp: Option<ruma::MilliSecondsSinceUnixEpoch>,
|
||||
timestamp: Option<MilliSecondsSinceUnixEpoch>,
|
||||
) -> Result<OwnedEventId> {
|
||||
allowed_to_send_state_event(services, room_id, event_type, state_key, json).await?;
|
||||
let state_lock = services.rooms.state.mutex.lock(room_id).await;
|
||||
|
||||
@@ -1,65 +1,123 @@
|
||||
mod v3;
|
||||
mod v4;
|
||||
mod v5;
|
||||
|
||||
use std::collections::VecDeque;
|
||||
|
||||
use conduwuit::{
|
||||
Error, PduCount, Result,
|
||||
Event, PduCount, Result, err,
|
||||
matrix::pdu::PduEvent,
|
||||
ref_at, trace,
|
||||
utils::stream::{BroadbandExt, ReadyExt, TryIgnore},
|
||||
};
|
||||
use conduwuit_service::Services;
|
||||
use futures::{StreamExt, pin_mut};
|
||||
use futures::StreamExt;
|
||||
use ruma::{
|
||||
RoomId, UserId,
|
||||
OwnedUserId, RoomId, UserId,
|
||||
events::TimelineEventType::{
|
||||
self, Beacon, CallInvite, PollStart, RoomEncrypted, RoomMessage, Sticker,
|
||||
},
|
||||
};
|
||||
|
||||
pub(crate) use self::{
|
||||
v3::sync_events_route, v4::sync_events_v4_route, v5::sync_events_v5_route,
|
||||
};
|
||||
pub(crate) use self::{v3::sync_events_route, v5::sync_events_v5_route};
|
||||
|
||||
pub(crate) const DEFAULT_BUMP_TYPES: &[TimelineEventType; 6] =
|
||||
&[CallInvite, PollStart, Beacon, RoomEncrypted, RoomMessage, Sticker];
|
||||
|
||||
#[derive(Default)]
|
||||
pub(crate) struct TimelinePdus {
|
||||
pub pdus: VecDeque<(PduCount, PduEvent)>,
|
||||
pub limited: bool,
|
||||
}
|
||||
|
||||
impl TimelinePdus {
|
||||
fn senders(&self) -> impl Iterator<Item = OwnedUserId> {
|
||||
self.pdus
|
||||
.iter()
|
||||
.map(ref_at!(1))
|
||||
.map(Event::sender)
|
||||
.map(Into::into)
|
||||
}
|
||||
}
|
||||
|
||||
/// Load up to `limit` PDUs in the range (starting_count, ending_count].
|
||||
async fn load_timeline(
|
||||
services: &Services,
|
||||
sender_user: &UserId,
|
||||
room_id: &RoomId,
|
||||
roomsincecount: PduCount,
|
||||
next_batch: Option<PduCount>,
|
||||
starting_count: Option<PduCount>,
|
||||
ending_count: Option<PduCount>,
|
||||
limit: usize,
|
||||
) -> Result<(Vec<(PduCount, PduEvent)>, bool), Error> {
|
||||
let last_timeline_count = services
|
||||
.rooms
|
||||
.timeline
|
||||
.last_timeline_count(Some(sender_user), room_id)
|
||||
.await?;
|
||||
) -> Result<TimelinePdus> {
|
||||
let mut pdu_stream = match starting_count {
|
||||
| Some(starting_count) => {
|
||||
let last_timeline_count = services
|
||||
.rooms
|
||||
.timeline
|
||||
.last_timeline_count(Some(sender_user), room_id)
|
||||
.await
|
||||
.map_err(|err| {
|
||||
err!(Database(warn!("Failed to fetch end of room timeline: {}", err)))
|
||||
})?;
|
||||
|
||||
if last_timeline_count <= roomsincecount {
|
||||
return Ok((Vec::new(), false));
|
||||
}
|
||||
if last_timeline_count <= starting_count {
|
||||
// no messages have been sent in this room since `starting_count`
|
||||
return Ok(TimelinePdus::default());
|
||||
}
|
||||
|
||||
let non_timeline_pdus = services
|
||||
.rooms
|
||||
.timeline
|
||||
.pdus_rev(Some(sender_user), room_id, None)
|
||||
.ignore_err()
|
||||
.ready_skip_while(|&(pducount, _)| pducount > next_batch.unwrap_or_else(PduCount::max))
|
||||
.ready_take_while(|&(pducount, _)| pducount > roomsincecount);
|
||||
// for incremental sync, stream from the DB all PDUs which were sent after
|
||||
// `starting_count` but before `ending_count`, including `ending_count` but
|
||||
// not `starting_count`. this code is pretty similar to the initial sync
|
||||
// branch, they're separate to allow for future optimization
|
||||
services
|
||||
.rooms
|
||||
.timeline
|
||||
.pdus_rev(
|
||||
Some(sender_user),
|
||||
room_id,
|
||||
ending_count.map(|count| count.saturating_add(1)),
|
||||
)
|
||||
.ignore_err()
|
||||
.ready_take_while(move |&(pducount, _)| pducount > starting_count)
|
||||
.boxed()
|
||||
},
|
||||
| None => {
|
||||
// For initial sync, stream from the DB all PDUs before and including
|
||||
// `ending_count` in reverse order
|
||||
services
|
||||
.rooms
|
||||
.timeline
|
||||
.pdus_rev(
|
||||
Some(sender_user),
|
||||
room_id,
|
||||
ending_count.map(|count| count.saturating_add(1)),
|
||||
)
|
||||
.ignore_err()
|
||||
.boxed()
|
||||
},
|
||||
};
|
||||
|
||||
// Take the last events for the timeline
|
||||
pin_mut!(non_timeline_pdus);
|
||||
let timeline_pdus: Vec<_> = non_timeline_pdus.by_ref().take(limit).collect().await;
|
||||
// Return at most `limit` PDUs from the stream
|
||||
let pdus = pdu_stream
|
||||
.by_ref()
|
||||
.take(limit)
|
||||
.ready_fold(VecDeque::with_capacity(limit), |mut pdus, item| {
|
||||
pdus.push_front(item);
|
||||
pdus
|
||||
})
|
||||
.await;
|
||||
|
||||
let timeline_pdus: Vec<_> = timeline_pdus.into_iter().rev().collect();
|
||||
// The timeline is limited if there are still more PDUs in the stream
|
||||
let limited = pdu_stream.next().await.is_some();
|
||||
|
||||
// They /sync response doesn't always return all messages, so we say the output
|
||||
// is limited unless there are events in non_timeline_pdus
|
||||
let limited = non_timeline_pdus.next().await.is_some();
|
||||
trace!(
|
||||
"syncing {:?} timeline pdus from {:?} to {:?} (limited = {:?})",
|
||||
pdus.len(),
|
||||
starting_count,
|
||||
ending_count,
|
||||
limited,
|
||||
);
|
||||
|
||||
Ok((timeline_pdus, limited))
|
||||
Ok(TimelinePdus { pdus, limited })
|
||||
}
|
||||
|
||||
async fn share_encrypted_room(
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
852
src/api/client/sync/v3/joined.rs
Normal file
852
src/api/client/sync/v3/joined.rs
Normal file
@@ -0,0 +1,852 @@
|
||||
use std::collections::{BTreeMap, HashSet};
|
||||
|
||||
use conduwuit::{
|
||||
Result, at, debug_warn, err, extract_variant,
|
||||
matrix::{
|
||||
Event,
|
||||
pdu::{PduCount, PduEvent},
|
||||
},
|
||||
trace,
|
||||
utils::{
|
||||
BoolExt, IterStream, ReadyExt, TryFutureExtExt,
|
||||
math::ruma_from_u64,
|
||||
stream::{TryIgnore, WidebandExt},
|
||||
},
|
||||
warn,
|
||||
};
|
||||
use conduwuit_service::Services;
|
||||
use futures::{
|
||||
FutureExt, StreamExt, TryFutureExt,
|
||||
future::{OptionFuture, join, join3, join4, try_join, try_join3},
|
||||
};
|
||||
use ruma::{
|
||||
OwnedRoomId, OwnedUserId, RoomId, UserId,
|
||||
api::client::sync::sync_events::{
|
||||
UnreadNotificationsCount,
|
||||
v3::{Ephemeral, JoinedRoom, RoomAccountData, RoomSummary, State as RoomState, Timeline},
|
||||
},
|
||||
events::{
|
||||
AnyRawAccountDataEvent, StateEventType,
|
||||
TimelineEventType::*,
|
||||
room::member::{MembershipState, RoomMemberEventContent},
|
||||
},
|
||||
serde::Raw,
|
||||
uint,
|
||||
};
|
||||
use service::rooms::short::ShortStateHash;
|
||||
|
||||
use super::{load_timeline, share_encrypted_room};
|
||||
use crate::client::{
|
||||
TimelinePdus, ignored_filter,
|
||||
sync::v3::{
|
||||
DEFAULT_TIMELINE_LIMIT, DeviceListUpdates, SyncContext, prepare_lazily_loaded_members,
|
||||
state::{build_state_incremental, build_state_initial},
|
||||
},
|
||||
};
|
||||
|
||||
/// Generate the sync response for a room the user is joined to.
|
||||
#[tracing::instrument(
|
||||
name = "joined",
|
||||
level = "debug",
|
||||
skip_all,
|
||||
fields(
|
||||
room_id = ?room_id,
|
||||
syncing_user = ?sync_context.syncing_user,
|
||||
),
|
||||
)]
|
||||
pub(super) async fn load_joined_room(
|
||||
services: &Services,
|
||||
sync_context: SyncContext<'_>,
|
||||
ref room_id: OwnedRoomId,
|
||||
) -> Result<(JoinedRoom, DeviceListUpdates)> {
|
||||
/*
|
||||
Building a sync response involves many steps which all depend on each other.
|
||||
To parallelize the process as much as possible, each step is divided into its own function,
|
||||
and `join*` functions are used to perform steps in parallel which do not depend on each other.
|
||||
*/
|
||||
|
||||
let (
|
||||
account_data,
|
||||
ephemeral,
|
||||
StateAndTimeline {
|
||||
state_events,
|
||||
timeline,
|
||||
summary,
|
||||
notification_counts,
|
||||
device_list_updates,
|
||||
},
|
||||
) = try_join3(
|
||||
build_account_data(services, sync_context, room_id),
|
||||
build_ephemeral(services, sync_context, room_id),
|
||||
build_state_and_timeline(services, sync_context, room_id),
|
||||
)
|
||||
.boxed()
|
||||
.await?;
|
||||
|
||||
if !timeline.is_empty() || !state_events.is_empty() {
|
||||
trace!(
|
||||
"syncing {} timeline events (limited = {}) and {} state events",
|
||||
timeline.events.len(),
|
||||
timeline.limited,
|
||||
state_events.len()
|
||||
);
|
||||
}
|
||||
|
||||
let joined_room = JoinedRoom {
|
||||
account_data,
|
||||
summary: summary.unwrap_or_default(),
|
||||
unread_notifications: notification_counts.unwrap_or_default(),
|
||||
timeline,
|
||||
state: RoomState {
|
||||
events: state_events.into_iter().map(Event::into_format).collect(),
|
||||
},
|
||||
ephemeral,
|
||||
unread_thread_notifications: BTreeMap::new(),
|
||||
};
|
||||
|
||||
Ok((joined_room, device_list_updates))
|
||||
}
|
||||
|
||||
/// Collect changes to the syncing user's account data events.
|
||||
#[tracing::instrument(level = "debug", skip_all)]
|
||||
async fn build_account_data(
|
||||
services: &Services,
|
||||
SyncContext {
|
||||
syncing_user,
|
||||
last_sync_end_count,
|
||||
current_count,
|
||||
..
|
||||
}: SyncContext<'_>,
|
||||
room_id: &RoomId,
|
||||
) -> Result<RoomAccountData> {
|
||||
let account_data_changes = services
|
||||
.account_data
|
||||
.changes_since(Some(room_id), syncing_user, last_sync_end_count, Some(current_count))
|
||||
.ready_filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Room))
|
||||
.collect()
|
||||
.await;
|
||||
|
||||
Ok(RoomAccountData { events: account_data_changes })
|
||||
}
|
||||
|
||||
/// Collect new ephemeral events.
|
||||
#[tracing::instrument(level = "debug", skip_all)]
|
||||
async fn build_ephemeral(
|
||||
services: &Services,
|
||||
SyncContext { syncing_user, last_sync_end_count, .. }: SyncContext<'_>,
|
||||
room_id: &RoomId,
|
||||
) -> Result<Ephemeral> {
|
||||
// note: some of the futures below are boxed. this is because, without the box,
|
||||
// rustc produces over thirty inscrutable errors in `mod.rs` at the call-site
|
||||
// of `load_joined_room`. I don't know why boxing them fixes this -- it seems
|
||||
// to be related to the async closures and borrowing from the sync context.
|
||||
|
||||
// collect updates to read receipts
|
||||
let receipt_events = services
|
||||
.rooms
|
||||
.read_receipt
|
||||
.readreceipts_since(room_id, last_sync_end_count)
|
||||
.filter_map(async |(read_user, _, edu)| {
|
||||
let is_ignored = services
|
||||
.users
|
||||
.user_is_ignored(&read_user, syncing_user)
|
||||
.await;
|
||||
|
||||
// filter out read receipts for ignored users
|
||||
is_ignored.or_some(edu)
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
.boxed();
|
||||
|
||||
// collect the updated list of typing users, if it's changed
|
||||
let typing_event = async {
|
||||
let should_send_typing_event = match last_sync_end_count {
|
||||
| Some(last_sync_end_count) => {
|
||||
match services.rooms.typing.last_typing_update(room_id).await {
|
||||
| Ok(last_typing_update) => {
|
||||
// update the typing list if the users typing have changed since the last
|
||||
// sync
|
||||
last_typing_update > last_sync_end_count
|
||||
},
|
||||
| Err(err) => {
|
||||
warn!("Error checking last typing update: {}", err);
|
||||
return None;
|
||||
},
|
||||
}
|
||||
},
|
||||
// always update the typing list on an initial sync
|
||||
| None => true,
|
||||
};
|
||||
|
||||
if should_send_typing_event {
|
||||
let event = services
|
||||
.rooms
|
||||
.typing
|
||||
.typings_event_for_user(room_id, syncing_user)
|
||||
.await;
|
||||
|
||||
if let Ok(event) = event {
|
||||
return Some(
|
||||
Raw::new(&event)
|
||||
.expect("typing event should be valid")
|
||||
.cast(),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
};
|
||||
|
||||
// collect the syncing user's private-read marker, if it's changed
|
||||
let private_read_event = async {
|
||||
let should_send_private_read = match last_sync_end_count {
|
||||
| Some(last_sync_end_count) => {
|
||||
let last_privateread_update = services
|
||||
.rooms
|
||||
.read_receipt
|
||||
.last_privateread_update(syncing_user, room_id)
|
||||
.await;
|
||||
|
||||
// update the marker if it's changed since the last sync
|
||||
last_privateread_update > last_sync_end_count
|
||||
},
|
||||
// always update the marker on an initial sync
|
||||
| None => true,
|
||||
};
|
||||
|
||||
if should_send_private_read {
|
||||
services
|
||||
.rooms
|
||||
.read_receipt
|
||||
.private_read_get(room_id, syncing_user)
|
||||
.await
|
||||
.ok()
|
||||
} else {
|
||||
None
|
||||
}
|
||||
};
|
||||
|
||||
let (receipt_events, typing_event, private_read_event) =
|
||||
join3(receipt_events, typing_event, private_read_event).await;
|
||||
|
||||
let mut edus = receipt_events;
|
||||
edus.extend(typing_event);
|
||||
edus.extend(private_read_event);
|
||||
|
||||
Ok(Ephemeral { events: edus })
|
||||
}
|
||||
|
||||
/// A struct to hold the state events, timeline, and other data which is
|
||||
/// computed from them.
|
||||
struct StateAndTimeline {
|
||||
state_events: Vec<PduEvent>,
|
||||
timeline: Timeline,
|
||||
summary: Option<RoomSummary>,
|
||||
notification_counts: Option<UnreadNotificationsCount>,
|
||||
device_list_updates: DeviceListUpdates,
|
||||
}
|
||||
|
||||
/// Compute changes to the room's state and timeline.
|
||||
#[tracing::instrument(level = "debug", skip_all)]
|
||||
async fn build_state_and_timeline(
|
||||
services: &Services,
|
||||
sync_context: SyncContext<'_>,
|
||||
room_id: &RoomId,
|
||||
) -> Result<StateAndTimeline> {
|
||||
let (shortstatehashes, timeline) = try_join(
|
||||
fetch_shortstatehashes(services, sync_context, room_id),
|
||||
build_timeline(services, sync_context, room_id),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let (state_events, notification_counts, joined_since_last_sync) = try_join3(
|
||||
build_state_events(services, sync_context, room_id, shortstatehashes, &timeline),
|
||||
build_notification_counts(services, sync_context, room_id, &timeline),
|
||||
check_joined_since_last_sync(services, shortstatehashes, sync_context),
|
||||
)
|
||||
.await?;
|
||||
|
||||
// the timeline should always include at least one PDU if the syncing user
|
||||
// joined since the last sync, that being the syncing user's join event. if
|
||||
// it's empty something is wrong.
|
||||
if joined_since_last_sync && timeline.pdus.is_empty() {
|
||||
warn!("timeline for newly joined room is empty");
|
||||
}
|
||||
|
||||
let (summary, device_list_updates) = try_join(
|
||||
build_room_summary(
|
||||
services,
|
||||
sync_context,
|
||||
room_id,
|
||||
shortstatehashes,
|
||||
&timeline,
|
||||
&state_events,
|
||||
joined_since_last_sync,
|
||||
),
|
||||
build_device_list_updates(
|
||||
services,
|
||||
sync_context,
|
||||
room_id,
|
||||
shortstatehashes,
|
||||
&state_events,
|
||||
joined_since_last_sync,
|
||||
),
|
||||
)
|
||||
.await?;
|
||||
|
||||
// the token which may be passed to the messages endpoint to backfill room
|
||||
// history
|
||||
let prev_batch = timeline.pdus.front().map(at!(0));
|
||||
|
||||
// note: we always indicate a limited timeline if the syncing user just joined
|
||||
// the room, to indicate to the client that it should request backfill (and to
|
||||
// copy Synapse's behavior). for federated room joins, the `timeline` will
|
||||
// usually only include the syncing user's join event.
|
||||
let limited = timeline.limited || joined_since_last_sync;
|
||||
|
||||
// filter out ignored events from the timeline and convert the PDUs into Ruma's
|
||||
// AnySyncTimelineEvent type
|
||||
let filtered_timeline = timeline
|
||||
.pdus
|
||||
.into_iter()
|
||||
.stream()
|
||||
.wide_filter_map(|item| ignored_filter(services, item, sync_context.syncing_user))
|
||||
.map(at!(1))
|
||||
.map(Event::into_format)
|
||||
.collect::<Vec<_>>()
|
||||
.await;
|
||||
|
||||
Ok(StateAndTimeline {
|
||||
state_events,
|
||||
timeline: Timeline {
|
||||
limited,
|
||||
prev_batch: prev_batch.as_ref().map(ToString::to_string),
|
||||
events: filtered_timeline,
|
||||
},
|
||||
summary,
|
||||
notification_counts,
|
||||
device_list_updates,
|
||||
})
|
||||
}
|
||||
|
||||
/// Shortstatehashes necessary to compute what state events to sync.
|
||||
#[derive(Clone, Copy)]
|
||||
struct ShortStateHashes {
|
||||
/// The current state of the syncing room.
|
||||
current_shortstatehash: ShortStateHash,
|
||||
/// The state of the syncing room at the end of the last sync.
|
||||
last_sync_end_shortstatehash: Option<ShortStateHash>,
|
||||
}
|
||||
|
||||
/// Fetch the current_shortstatehash and last_sync_end_shortstatehash.
|
||||
#[tracing::instrument(level = "debug", skip_all)]
|
||||
async fn fetch_shortstatehashes(
|
||||
services: &Services,
|
||||
SyncContext { last_sync_end_count, current_count, .. }: SyncContext<'_>,
|
||||
room_id: &RoomId,
|
||||
) -> Result<ShortStateHashes> {
|
||||
// the room state currently.
|
||||
// TODO: this should be the room state as of `current_count`, but there's no way
|
||||
// to get that right now.
|
||||
let current_shortstatehash = services
|
||||
.rooms
|
||||
.state
|
||||
.get_room_shortstatehash(room_id)
|
||||
.map_err(|_| err!(Database(error!("Room {room_id} has no state"))));
|
||||
|
||||
// the room state as of the end of the last sync.
|
||||
// this will be None if we are doing an initial sync or if we just joined this
|
||||
// room.
|
||||
let last_sync_end_shortstatehash =
|
||||
OptionFuture::from(last_sync_end_count.map(|last_sync_end_count| {
|
||||
// look up the shortstatehash saved by the last sync's call to
|
||||
// `associate_token_shortstatehash`
|
||||
services
|
||||
.rooms
|
||||
.user
|
||||
.get_token_shortstatehash(room_id, last_sync_end_count)
|
||||
.inspect_err(move |_| {
|
||||
debug_warn!(
|
||||
token = last_sync_end_count,
|
||||
"Room has no shortstatehash for this token"
|
||||
);
|
||||
})
|
||||
.ok()
|
||||
}))
|
||||
.map(Option::flatten)
|
||||
.map(Ok);
|
||||
|
||||
let (current_shortstatehash, last_sync_end_shortstatehash) =
|
||||
try_join(current_shortstatehash, last_sync_end_shortstatehash).await?;
|
||||
|
||||
/*
|
||||
associate the `current_count` with the `current_shortstatehash`, so we can
|
||||
use it on the next sync as the `last_sync_end_shortstatehash`.
|
||||
|
||||
TODO: the table written to by this call grows extremely fast, gaining one new entry for each
|
||||
joined room on _every single sync request_. we need to find a better way to remember the shortstatehash
|
||||
between syncs.
|
||||
*/
|
||||
services
|
||||
.rooms
|
||||
.user
|
||||
.associate_token_shortstatehash(room_id, current_count, current_shortstatehash)
|
||||
.await;
|
||||
|
||||
Ok(ShortStateHashes {
|
||||
current_shortstatehash,
|
||||
last_sync_end_shortstatehash,
|
||||
})
|
||||
}
|
||||
|
||||
/// Fetch recent timeline events.
|
||||
#[tracing::instrument(level = "debug", skip_all)]
|
||||
async fn build_timeline(
|
||||
services: &Services,
|
||||
sync_context: SyncContext<'_>,
|
||||
room_id: &RoomId,
|
||||
) -> Result<TimelinePdus> {
|
||||
let SyncContext {
|
||||
syncing_user,
|
||||
last_sync_end_count,
|
||||
current_count,
|
||||
filter,
|
||||
..
|
||||
} = sync_context;
|
||||
|
||||
/*
|
||||
determine the maximum number of events to return in this sync.
|
||||
if the sync filter specifies a limit, that will be used, otherwise
|
||||
`DEFAULT_TIMELINE_LIMIT` will be used. `DEFAULT_TIMELINE_LIMIT` will also be
|
||||
used if the limit is somehow greater than usize::MAX.
|
||||
*/
|
||||
let timeline_limit = filter
|
||||
.room
|
||||
.timeline
|
||||
.limit
|
||||
.and_then(|limit| limit.try_into().ok())
|
||||
.unwrap_or(DEFAULT_TIMELINE_LIMIT);
|
||||
|
||||
load_timeline(
|
||||
services,
|
||||
syncing_user,
|
||||
room_id,
|
||||
last_sync_end_count.map(PduCount::Normal),
|
||||
Some(PduCount::Normal(current_count)),
|
||||
timeline_limit,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
/// Calculate the state events to sync.
|
||||
async fn build_state_events(
|
||||
services: &Services,
|
||||
sync_context: SyncContext<'_>,
|
||||
room_id: &RoomId,
|
||||
shortstatehashes: ShortStateHashes,
|
||||
timeline: &TimelinePdus,
|
||||
) -> Result<Vec<PduEvent>> {
|
||||
let SyncContext {
|
||||
syncing_user,
|
||||
last_sync_end_count,
|
||||
full_state,
|
||||
..
|
||||
} = sync_context;
|
||||
|
||||
let ShortStateHashes {
|
||||
current_shortstatehash,
|
||||
last_sync_end_shortstatehash,
|
||||
} = shortstatehashes;
|
||||
|
||||
// the spec states that the `state` property only includes state events up to
|
||||
// the beginning of the timeline, so we determine the state of the syncing room
|
||||
// as of the first timeline event. NOTE: this explanation is not entirely
|
||||
// accurate; see the implementation of `build_state_incremental`.
|
||||
let timeline_start_shortstatehash = async {
|
||||
if let Some((_, pdu)) = timeline.pdus.front() {
|
||||
if let Ok(shortstatehash) = services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.pdu_shortstatehash(&pdu.event_id)
|
||||
.await
|
||||
{
|
||||
return shortstatehash;
|
||||
}
|
||||
}
|
||||
|
||||
current_shortstatehash
|
||||
};
|
||||
|
||||
// the user IDs of members whose membership needs to be sent to the client, if
|
||||
// lazy-loading is enabled.
|
||||
let lazily_loaded_members =
|
||||
prepare_lazily_loaded_members(services, sync_context, room_id, timeline.senders());
|
||||
|
||||
let (timeline_start_shortstatehash, lazily_loaded_members) =
|
||||
join(timeline_start_shortstatehash, lazily_loaded_members).await;
|
||||
|
||||
// compute the state delta between the previous sync and this sync.
|
||||
match (last_sync_end_count, last_sync_end_shortstatehash) {
|
||||
/*
|
||||
if `last_sync_end_count` is Some (meaning this is an incremental sync), and `last_sync_end_shortstatehash`
|
||||
is Some (meaning the syncing user didn't just join this room for the first time ever), and `full_state` is false,
|
||||
then use `build_state_incremental`.
|
||||
*/
|
||||
| (Some(last_sync_end_count), Some(last_sync_end_shortstatehash)) if !full_state =>
|
||||
build_state_incremental(
|
||||
services,
|
||||
syncing_user,
|
||||
room_id,
|
||||
PduCount::Normal(last_sync_end_count),
|
||||
last_sync_end_shortstatehash,
|
||||
timeline_start_shortstatehash,
|
||||
current_shortstatehash,
|
||||
timeline,
|
||||
lazily_loaded_members.as_ref(),
|
||||
)
|
||||
.boxed()
|
||||
.await,
|
||||
/*
|
||||
otherwise use `build_state_initial`. note that this branch will be taken if the user joined this room since the last sync
|
||||
for the first time ever, because in that case we have no `last_sync_end_shortstatehash` and can't correctly calculate
|
||||
the state using the incremental sync algorithm.
|
||||
*/
|
||||
| _ =>
|
||||
build_state_initial(
|
||||
services,
|
||||
syncing_user,
|
||||
timeline_start_shortstatehash,
|
||||
lazily_loaded_members.as_ref(),
|
||||
)
|
||||
.boxed()
|
||||
.await,
|
||||
}
|
||||
}
|
||||
|
||||
/// Compute the number of unread notifications in this room.
|
||||
#[tracing::instrument(level = "debug", skip_all)]
|
||||
async fn build_notification_counts(
|
||||
services: &Services,
|
||||
SyncContext { syncing_user, last_sync_end_count, .. }: SyncContext<'_>,
|
||||
room_id: &RoomId,
|
||||
timeline: &TimelinePdus,
|
||||
) -> Result<Option<UnreadNotificationsCount>> {
|
||||
// determine whether to actually update the notification counts
|
||||
let should_send_notification_counts = async {
|
||||
// if we're going to sync some timeline events, the notification count has
|
||||
// definitely changed to include them
|
||||
if !timeline.pdus.is_empty() {
|
||||
return true;
|
||||
}
|
||||
|
||||
// if this is an initial sync, we need to send notification counts because the
|
||||
// client doesn't know what they are yet
|
||||
let Some(last_sync_end_count) = last_sync_end_count else {
|
||||
return true;
|
||||
};
|
||||
|
||||
let last_notification_read = services
|
||||
.rooms
|
||||
.user
|
||||
.last_notification_read(syncing_user, room_id)
|
||||
.await;
|
||||
|
||||
// if the syncing user has read the events we sent during the last sync, we need
|
||||
// to send a new notification count on this sync.
|
||||
if last_notification_read > last_sync_end_count {
|
||||
return true;
|
||||
}
|
||||
|
||||
// otherwise, nothing's changed.
|
||||
false
|
||||
};
|
||||
|
||||
if should_send_notification_counts.await {
|
||||
let (notification_count, highlight_count) = join(
|
||||
services
|
||||
.rooms
|
||||
.user
|
||||
.notification_count(syncing_user, room_id)
|
||||
.map(TryInto::try_into)
|
||||
.unwrap_or(uint!(0)),
|
||||
services
|
||||
.rooms
|
||||
.user
|
||||
.highlight_count(syncing_user, room_id)
|
||||
.map(TryInto::try_into)
|
||||
.unwrap_or(uint!(0)),
|
||||
)
|
||||
.await;
|
||||
|
||||
trace!(?notification_count, ?highlight_count, "syncing new notification counts");
|
||||
|
||||
Ok(Some(UnreadNotificationsCount {
|
||||
notification_count: Some(notification_count),
|
||||
highlight_count: Some(highlight_count),
|
||||
}))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if the syncing user joined the room since their last incremental sync.
|
||||
#[tracing::instrument(level = "debug", skip_all)]
|
||||
async fn check_joined_since_last_sync(
|
||||
services: &Services,
|
||||
ShortStateHashes { last_sync_end_shortstatehash, .. }: ShortStateHashes,
|
||||
SyncContext { syncing_user, .. }: SyncContext<'_>,
|
||||
) -> Result<bool> {
|
||||
// fetch the syncing user's membership event during the last sync.
|
||||
// this will be None if `previous_sync_end_shortstatehash` is None.
|
||||
let membership_during_previous_sync = match last_sync_end_shortstatehash {
|
||||
| Some(last_sync_end_shortstatehash) => services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.state_get_content(
|
||||
last_sync_end_shortstatehash,
|
||||
&StateEventType::RoomMember,
|
||||
syncing_user.as_str(),
|
||||
)
|
||||
.await
|
||||
.inspect_err(|_| debug_warn!("User has no previous membership"))
|
||||
.ok(),
|
||||
| None => None,
|
||||
};
|
||||
|
||||
// TODO: If the requesting user got state-reset out of the room, this
|
||||
// will be `true` when it shouldn't be. this function should never be called
|
||||
// in that situation, but it may be if the membership cache didn't get updated.
|
||||
// the root cause of this needs to be addressed
|
||||
let joined_since_last_sync =
|
||||
membership_during_previous_sync.is_none_or(|content: RoomMemberEventContent| {
|
||||
content.membership != MembershipState::Join
|
||||
});
|
||||
|
||||
if joined_since_last_sync {
|
||||
trace!("user joined since last sync");
|
||||
}
|
||||
|
||||
Ok(joined_since_last_sync)
|
||||
}
|
||||
|
||||
/// Build the `summary` field of the room object, which includes
|
||||
/// the number of joined and invited users and the room's heroes.
|
||||
#[tracing::instrument(level = "debug", skip_all)]
|
||||
async fn build_room_summary(
|
||||
services: &Services,
|
||||
SyncContext { syncing_user, .. }: SyncContext<'_>,
|
||||
room_id: &RoomId,
|
||||
ShortStateHashes { current_shortstatehash, .. }: ShortStateHashes,
|
||||
timeline: &TimelinePdus,
|
||||
state_events: &[PduEvent],
|
||||
joined_since_last_sync: bool,
|
||||
) -> Result<Option<RoomSummary>> {
|
||||
// determine whether any events in the state or timeline are membership events.
|
||||
let are_syncing_membership_events = timeline
|
||||
.pdus
|
||||
.iter()
|
||||
.map(|(_, pdu)| pdu)
|
||||
.chain(state_events.iter())
|
||||
.any(|event| event.kind == RoomMember);
|
||||
|
||||
/*
|
||||
we only need to send an updated room summary if:
|
||||
1. there are membership events in the state or timeline, because they might have changed the
|
||||
membership counts or heroes, or
|
||||
2. the syncing user just joined this room, which usually implies #1 because their join event should be in the timeline.
|
||||
*/
|
||||
if !(are_syncing_membership_events || joined_since_last_sync) {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let joined_member_count = services
|
||||
.rooms
|
||||
.state_cache
|
||||
.room_joined_count(room_id)
|
||||
.unwrap_or(0);
|
||||
|
||||
let invited_member_count = services
|
||||
.rooms
|
||||
.state_cache
|
||||
.room_invited_count(room_id)
|
||||
.unwrap_or(0);
|
||||
|
||||
let has_name = services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.state_contains_type(current_shortstatehash, &StateEventType::RoomName);
|
||||
|
||||
let has_canonical_alias = services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.state_contains_type(current_shortstatehash, &StateEventType::RoomCanonicalAlias);
|
||||
|
||||
let (joined_member_count, invited_member_count, has_name, has_canonical_alias) =
|
||||
join4(joined_member_count, invited_member_count, has_name, has_canonical_alias).await;
|
||||
|
||||
// only send heroes if the room has neither a name nor a canonical alias
|
||||
let heroes = if !(has_name || has_canonical_alias) {
|
||||
Some(build_heroes(services, room_id, syncing_user, current_shortstatehash).await)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
trace!(
|
||||
?joined_member_count,
|
||||
?invited_member_count,
|
||||
heroes_length = heroes.as_ref().map(HashSet::len),
|
||||
"syncing updated summary"
|
||||
);
|
||||
|
||||
Ok(Some(RoomSummary {
|
||||
heroes: heroes
|
||||
.map(|heroes| heroes.into_iter().collect())
|
||||
.unwrap_or_default(),
|
||||
joined_member_count: Some(ruma_from_u64(joined_member_count)),
|
||||
invited_member_count: Some(ruma_from_u64(invited_member_count)),
|
||||
}))
|
||||
}
|
||||
|
||||
/// Fetch the user IDs to include in the `m.heroes` property of the room
|
||||
/// summary.
|
||||
async fn build_heroes(
|
||||
services: &Services,
|
||||
room_id: &RoomId,
|
||||
syncing_user: &UserId,
|
||||
current_shortstatehash: ShortStateHash,
|
||||
) -> HashSet<OwnedUserId> {
|
||||
const MAX_HERO_COUNT: usize = 5;
|
||||
|
||||
// fetch joined members from the state cache first
|
||||
let joined_members_stream = services
|
||||
.rooms
|
||||
.state_cache
|
||||
.room_members(room_id)
|
||||
.map(ToOwned::to_owned);
|
||||
|
||||
// then fetch invited members
|
||||
let invited_members_stream = services
|
||||
.rooms
|
||||
.state_cache
|
||||
.room_members_invited(room_id)
|
||||
.map(ToOwned::to_owned);
|
||||
|
||||
// then as a last resort fetch every membership event
|
||||
let all_members_stream = services
|
||||
.rooms
|
||||
.short
|
||||
.multi_get_statekey_from_short(
|
||||
services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.state_full_shortids(current_shortstatehash)
|
||||
.ignore_err()
|
||||
.ready_filter_map(|(key, _)| Some(key)),
|
||||
)
|
||||
.ignore_err()
|
||||
.ready_filter_map(|(event_type, state_key)| {
|
||||
if event_type == StateEventType::RoomMember {
|
||||
state_key.to_string().try_into().ok()
|
||||
} else {
|
||||
None
|
||||
}
|
||||
});
|
||||
|
||||
joined_members_stream
|
||||
.chain(invited_members_stream)
|
||||
.chain(all_members_stream)
|
||||
// the hero list should never include the syncing user
|
||||
.ready_filter(|user_id| user_id != syncing_user)
|
||||
.take(MAX_HERO_COUNT)
|
||||
.collect()
|
||||
.await
|
||||
}
|
||||
|
||||
/// Collect updates to users' device lists for E2EE.
|
||||
#[tracing::instrument(level = "debug", skip_all)]
|
||||
async fn build_device_list_updates(
|
||||
services: &Services,
|
||||
SyncContext {
|
||||
syncing_user,
|
||||
last_sync_end_count,
|
||||
current_count,
|
||||
..
|
||||
}: SyncContext<'_>,
|
||||
room_id: &RoomId,
|
||||
ShortStateHashes { current_shortstatehash, .. }: ShortStateHashes,
|
||||
state_events: &Vec<PduEvent>,
|
||||
joined_since_last_sync: bool,
|
||||
) -> Result<DeviceListUpdates> {
|
||||
let is_encrypted_room = services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.state_get(current_shortstatehash, &StateEventType::RoomEncryption, "")
|
||||
.is_ok();
|
||||
|
||||
// initial syncs don't include device updates, and rooms which aren't encrypted
|
||||
// don't affect them, so return early in either of those cases
|
||||
if last_sync_end_count.is_none() || !(is_encrypted_room.await) {
|
||||
return Ok(DeviceListUpdates::new());
|
||||
}
|
||||
|
||||
let mut device_list_updates = DeviceListUpdates::new();
|
||||
|
||||
// add users with changed keys to the `changed` list
|
||||
services
|
||||
.users
|
||||
.room_keys_changed(room_id, last_sync_end_count, Some(current_count))
|
||||
.map(at!(0))
|
||||
.map(ToOwned::to_owned)
|
||||
.ready_for_each(|user_id| {
|
||||
device_list_updates.changed.insert(user_id);
|
||||
})
|
||||
.await;
|
||||
|
||||
// add users who now share encrypted rooms to `changed` and
|
||||
// users who no longer share encrypted rooms to `left`
|
||||
for state_event in state_events {
|
||||
if state_event.kind == RoomMember {
|
||||
let Some(content): Option<RoomMemberEventContent> = state_event.get_content().ok()
|
||||
else {
|
||||
continue;
|
||||
};
|
||||
|
||||
let Some(user_id): Option<OwnedUserId> = state_event
|
||||
.state_key
|
||||
.as_ref()
|
||||
.and_then(|key| key.parse().ok())
|
||||
else {
|
||||
continue;
|
||||
};
|
||||
|
||||
{
|
||||
use MembershipState::*;
|
||||
|
||||
if matches!(content.membership, Leave | Join) {
|
||||
let shares_encrypted_room =
|
||||
share_encrypted_room(services, syncing_user, &user_id, Some(room_id))
|
||||
.await;
|
||||
match content.membership {
|
||||
| Leave if !shares_encrypted_room => {
|
||||
device_list_updates.left.insert(user_id);
|
||||
},
|
||||
| Join if joined_since_last_sync || shares_encrypted_room => {
|
||||
device_list_updates.changed.insert(user_id);
|
||||
},
|
||||
| _ => (),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !device_list_updates.is_empty() {
|
||||
trace!(
|
||||
changed = device_list_updates.changed.len(),
|
||||
left = device_list_updates.left.len(),
|
||||
"syncing device list updates"
|
||||
);
|
||||
}
|
||||
|
||||
Ok(device_list_updates)
|
||||
}
|
||||
349
src/api/client/sync/v3/left.rs
Normal file
349
src/api/client/sync/v3/left.rs
Normal file
@@ -0,0 +1,349 @@
|
||||
use conduwuit::{
|
||||
Event, PduCount, PduEvent, Result, at, debug_warn,
|
||||
pdu::EventHash,
|
||||
trace,
|
||||
utils::{self, IterStream, future::ReadyEqExt, stream::WidebandExt as _},
|
||||
};
|
||||
use futures::{StreamExt, future::join};
|
||||
use ruma::{
|
||||
EventId, OwnedRoomId, RoomId,
|
||||
api::client::sync::sync_events::v3::{LeftRoom, RoomAccountData, State, Timeline},
|
||||
events::{StateEventType, TimelineEventType},
|
||||
uint,
|
||||
};
|
||||
use serde_json::value::RawValue;
|
||||
use service::{Services, rooms::short::ShortStateHash};
|
||||
|
||||
use crate::client::{
|
||||
TimelinePdus, ignored_filter,
|
||||
sync::{
|
||||
load_timeline,
|
||||
v3::{
|
||||
DEFAULT_TIMELINE_LIMIT, SyncContext, prepare_lazily_loaded_members,
|
||||
state::build_state_initial,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
#[tracing::instrument(
|
||||
name = "left",
|
||||
level = "debug",
|
||||
skip_all,
|
||||
fields(
|
||||
room_id = %room_id,
|
||||
),
|
||||
)]
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub(super) async fn load_left_room(
|
||||
services: &Services,
|
||||
sync_context: SyncContext<'_>,
|
||||
ref room_id: OwnedRoomId,
|
||||
leave_membership_event: Option<PduEvent>,
|
||||
) -> Result<Option<LeftRoom>> {
|
||||
let SyncContext {
|
||||
syncing_user,
|
||||
last_sync_end_count,
|
||||
current_count,
|
||||
filter,
|
||||
..
|
||||
} = sync_context;
|
||||
|
||||
// the global count as of the moment the user left the room
|
||||
let Some(left_count) = services
|
||||
.rooms
|
||||
.state_cache
|
||||
.get_left_count(room_id, syncing_user)
|
||||
.await
|
||||
.ok()
|
||||
else {
|
||||
// if we get here, the membership cache is incorrect, likely due to a state
|
||||
// reset
|
||||
debug_warn!("attempting to sync left room but no left count exists");
|
||||
return Ok(None);
|
||||
};
|
||||
|
||||
// return early if we haven't gotten to this leave yet.
|
||||
// this can happen if the user leaves while a sync response is being generated
|
||||
if current_count < left_count {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
// return early if this is an incremental sync, and we've already synced this
|
||||
// leave to the user, and `include_leave` isn't set on the filter.
|
||||
if !filter.room.include_leave && last_sync_end_count >= Some(left_count) {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
if let Some(ref leave_membership_event) = leave_membership_event {
|
||||
debug_assert_eq!(
|
||||
leave_membership_event.kind,
|
||||
TimelineEventType::RoomMember,
|
||||
"leave PDU should be m.room.member"
|
||||
);
|
||||
}
|
||||
|
||||
let does_not_exist = services.rooms.metadata.exists(room_id).eq(&false).await;
|
||||
|
||||
let (timeline, state_events) = match leave_membership_event {
|
||||
| Some(leave_membership_event) if does_not_exist => {
|
||||
/*
|
||||
we have none PDUs with left beef for this room, likely because it was a rejected invite to a room
|
||||
which nobody on this homeserver is in. `leave_pdu` is the remote-assisted outlier leave event for the room,
|
||||
which is all we can send to the client.
|
||||
|
||||
if this is an initial sync, don't include this room at all to keep the client from asking for
|
||||
state that we don't have.
|
||||
*/
|
||||
|
||||
if last_sync_end_count.is_none() {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
trace!("syncing remote-assisted leave PDU");
|
||||
(TimelinePdus::default(), vec![leave_membership_event])
|
||||
},
|
||||
| Some(leave_membership_event) => {
|
||||
// we have this room in our DB, and can fetch the state and timeline from when
|
||||
// the user left.
|
||||
|
||||
let leave_state_key = syncing_user;
|
||||
debug_assert_eq!(
|
||||
Some(leave_state_key.as_str()),
|
||||
leave_membership_event.state_key(),
|
||||
"leave PDU should be for the user requesting the sync"
|
||||
);
|
||||
|
||||
// the shortstatehash of the state _immediately before_ the syncing user left
|
||||
// this room. the state represented here _does not_ include
|
||||
// `leave_membership_event`.
|
||||
let leave_shortstatehash = services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.pdu_shortstatehash(&leave_membership_event.event_id)
|
||||
.await?;
|
||||
|
||||
let prev_membership_event = services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.state_get(
|
||||
leave_shortstatehash,
|
||||
&StateEventType::RoomMember,
|
||||
leave_state_key.as_str(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
build_left_state_and_timeline(
|
||||
services,
|
||||
sync_context,
|
||||
room_id,
|
||||
leave_membership_event,
|
||||
leave_shortstatehash,
|
||||
prev_membership_event,
|
||||
)
|
||||
.await?
|
||||
},
|
||||
| None => {
|
||||
/*
|
||||
no leave event was actually sent in this room, but we still need to pretend
|
||||
like the user left it. this is usually because the room was banned by a server admin.
|
||||
|
||||
if this is an incremental sync, generate a fake leave event to make the room vanish from clients.
|
||||
otherwise we don't tell the client about this room at all.
|
||||
*/
|
||||
if last_sync_end_count.is_none() {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
trace!("syncing dummy leave event");
|
||||
(TimelinePdus::default(), vec![create_dummy_leave_event(
|
||||
services,
|
||||
sync_context,
|
||||
room_id,
|
||||
)])
|
||||
},
|
||||
};
|
||||
|
||||
let raw_timeline_pdus = timeline
|
||||
.pdus
|
||||
.into_iter()
|
||||
.stream()
|
||||
// filter out ignored events from the timeline
|
||||
.wide_filter_map(|item| ignored_filter(services, item, syncing_user))
|
||||
.map(at!(1))
|
||||
.map(Event::into_format)
|
||||
.collect::<Vec<_>>()
|
||||
.await;
|
||||
|
||||
Ok(Some(LeftRoom {
|
||||
account_data: RoomAccountData { events: Vec::new() },
|
||||
timeline: Timeline {
|
||||
limited: timeline.limited,
|
||||
prev_batch: Some(current_count.to_string()),
|
||||
events: raw_timeline_pdus,
|
||||
},
|
||||
state: State {
|
||||
events: state_events.into_iter().map(Event::into_format).collect(),
|
||||
},
|
||||
}))
|
||||
}
|
||||
|
||||
async fn build_left_state_and_timeline(
|
||||
services: &Services,
|
||||
sync_context: SyncContext<'_>,
|
||||
room_id: &RoomId,
|
||||
leave_membership_event: PduEvent,
|
||||
leave_shortstatehash: ShortStateHash,
|
||||
prev_membership_event: PduEvent,
|
||||
) -> Result<(TimelinePdus, Vec<PduEvent>)> {
|
||||
let SyncContext {
|
||||
syncing_user,
|
||||
last_sync_end_count,
|
||||
filter,
|
||||
..
|
||||
} = sync_context;
|
||||
|
||||
let timeline_start_count = if let Some(last_sync_end_count) = last_sync_end_count {
|
||||
// for incremental syncs, start the timeline after `since`
|
||||
PduCount::Normal(last_sync_end_count)
|
||||
} else {
|
||||
// for initial syncs, start the timeline after the previous membership
|
||||
// event. we don't want to include the membership event itself
|
||||
// because clients get confused when they see a `join`
|
||||
// membership event in a `leave` room.
|
||||
services
|
||||
.rooms
|
||||
.timeline
|
||||
.get_pdu_count(&prev_membership_event.event_id)
|
||||
.await?
|
||||
};
|
||||
|
||||
// end the timeline at the user's leave event
|
||||
let timeline_end_count = services
|
||||
.rooms
|
||||
.timeline
|
||||
.get_pdu_count(leave_membership_event.event_id())
|
||||
.await?;
|
||||
|
||||
// limit the timeline using the same logic as for joined rooms
|
||||
let timeline_limit = filter
|
||||
.room
|
||||
.timeline
|
||||
.limit
|
||||
.and_then(|limit| limit.try_into().ok())
|
||||
.unwrap_or(DEFAULT_TIMELINE_LIMIT);
|
||||
|
||||
let timeline = load_timeline(
|
||||
services,
|
||||
syncing_user,
|
||||
room_id,
|
||||
Some(timeline_start_count),
|
||||
Some(timeline_end_count),
|
||||
timeline_limit,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let timeline_start_shortstatehash = async {
|
||||
if let Some((_, pdu)) = timeline.pdus.front() {
|
||||
if let Ok(shortstatehash) = services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.pdu_shortstatehash(&pdu.event_id)
|
||||
.await
|
||||
{
|
||||
return shortstatehash;
|
||||
}
|
||||
}
|
||||
|
||||
// the timeline generally should not be empty (see the TODO further down),
|
||||
// but in case it is we use `leave_shortstatehash` as the state to
|
||||
// send
|
||||
leave_shortstatehash
|
||||
};
|
||||
|
||||
let lazily_loaded_members =
|
||||
prepare_lazily_loaded_members(services, sync_context, room_id, timeline.senders());
|
||||
|
||||
let (timeline_start_shortstatehash, lazily_loaded_members) =
|
||||
join(timeline_start_shortstatehash, lazily_loaded_members).await;
|
||||
|
||||
// TODO: calculate incremental state for incremental syncs.
|
||||
// always calculating initial state _works_ but returns more data and does
|
||||
// more processing than strictly necessary.
|
||||
let mut state = build_state_initial(
|
||||
services,
|
||||
syncing_user,
|
||||
timeline_start_shortstatehash,
|
||||
lazily_loaded_members.as_ref(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
/*
|
||||
remove membership events for the syncing user from state.
|
||||
usually, `state` should include a `join` membership event and `timeline` should include a `leave` one.
|
||||
however, the matrix-js-sdk gets confused when this happens (see [1]) and doesn't process the room leave,
|
||||
so we have to filter out the membership from `state`.
|
||||
|
||||
NOTE: we are sending more information than synapse does in this scenario, because we always
|
||||
calculate `state` for initial syncs, even when the sync being performed is incremental.
|
||||
however, the specification does not forbid sending extraneous events in `state`.
|
||||
|
||||
TODO: there is an additional bug at play here. sometimes `load_joined_room` syncs the `leave` event
|
||||
before `load_left_room` does, which means the `timeline` we sync immediately after a leave is empty.
|
||||
this shouldn't happen -- `timeline` should always include the `leave` event. this is probably
|
||||
a race condition with the membership state cache.
|
||||
|
||||
[1]: https://github.com/matrix-org/matrix-js-sdk/issues/5071
|
||||
*/
|
||||
|
||||
// `state` should only ever include one membership event for the syncing user
|
||||
let membership_event_index = state.iter().position(|pdu| {
|
||||
*pdu.event_type() == TimelineEventType::RoomMember
|
||||
&& pdu.state_key() == Some(syncing_user.as_str())
|
||||
});
|
||||
|
||||
if let Some(index) = membership_event_index {
|
||||
// the ordering of events in `state` does not matter
|
||||
state.swap_remove(index);
|
||||
}
|
||||
|
||||
trace!(
|
||||
?timeline_start_count,
|
||||
?timeline_end_count,
|
||||
"syncing {} timeline events (limited = {}) and {} state events",
|
||||
timeline.pdus.len(),
|
||||
timeline.limited,
|
||||
state.len()
|
||||
);
|
||||
|
||||
Ok((timeline, state))
|
||||
}
|
||||
|
||||
fn create_dummy_leave_event(
|
||||
services: &Services,
|
||||
SyncContext { syncing_user, .. }: SyncContext<'_>,
|
||||
room_id: &RoomId,
|
||||
) -> PduEvent {
|
||||
// TODO: because this event ID is random, it could cause caching issues with
|
||||
// clients. perhaps a database table could be created to hold these dummy
|
||||
// events, or they could be stored as outliers?
|
||||
PduEvent {
|
||||
event_id: EventId::new(services.globals.server_name()),
|
||||
sender: syncing_user.to_owned(),
|
||||
origin: None,
|
||||
origin_server_ts: utils::millis_since_unix_epoch()
|
||||
.try_into()
|
||||
.expect("Timestamp is valid js_int value"),
|
||||
kind: TimelineEventType::RoomMember,
|
||||
content: RawValue::from_string(r#"{"membership": "leave"}"#.to_owned()).unwrap(),
|
||||
state_key: Some(syncing_user.as_str().into()),
|
||||
unsigned: None,
|
||||
// The following keys are dropped on conversion
|
||||
room_id: Some(room_id.to_owned()),
|
||||
prev_events: vec![],
|
||||
depth: uint!(1),
|
||||
auth_events: vec![],
|
||||
redacts: None,
|
||||
hashes: EventHash { sha256: String::new() },
|
||||
signatures: None,
|
||||
}
|
||||
}
|
||||
502
src/api/client/sync/v3/mod.rs
Normal file
502
src/api/client/sync/v3/mod.rs
Normal file
@@ -0,0 +1,502 @@
|
||||
mod joined;
|
||||
mod left;
|
||||
mod state;
|
||||
|
||||
use std::{
|
||||
cmp::{self},
|
||||
collections::{BTreeMap, HashMap, HashSet},
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use axum::extract::State;
|
||||
use axum_client_ip::InsecureClientIp;
|
||||
use conduwuit::{
|
||||
Result, extract_variant,
|
||||
utils::{
|
||||
ReadyExt, TryFutureExtExt,
|
||||
stream::{BroadbandExt, Tools, WidebandExt},
|
||||
},
|
||||
warn,
|
||||
};
|
||||
use conduwuit_service::Services;
|
||||
use futures::{
|
||||
FutureExt, StreamExt, TryFutureExt,
|
||||
future::{OptionFuture, join3, join4, join5},
|
||||
};
|
||||
use ruma::{
|
||||
DeviceId, OwnedUserId, RoomId, UserId,
|
||||
api::client::{
|
||||
filter::FilterDefinition,
|
||||
sync::sync_events::{
|
||||
self, DeviceLists,
|
||||
v3::{
|
||||
Filter, GlobalAccountData, InviteState, InvitedRoom, KnockState, KnockedRoom,
|
||||
Presence, Rooms, ToDevice,
|
||||
},
|
||||
},
|
||||
uiaa::UiaaResponse,
|
||||
},
|
||||
events::{
|
||||
AnyRawAccountDataEvent,
|
||||
presence::{PresenceEvent, PresenceEventContent},
|
||||
},
|
||||
serde::Raw,
|
||||
};
|
||||
use service::rooms::lazy_loading::{self, MemberSet, Options as _};
|
||||
|
||||
use super::{load_timeline, share_encrypted_room};
|
||||
use crate::{
|
||||
Ruma, RumaResponse,
|
||||
client::{
|
||||
is_ignored_invite,
|
||||
sync::v3::{joined::load_joined_room, left::load_left_room},
|
||||
},
|
||||
};
|
||||
|
||||
/// The default maximum number of events to return in the `timeline` key of
|
||||
/// joined and left rooms. If the number of events sent since the last sync
|
||||
/// exceeds this number, the `timeline` will be `limited`.
|
||||
const DEFAULT_TIMELINE_LIMIT: usize = 30;
|
||||
|
||||
/// A collection of updates to users' device lists, used for E2EE.
|
||||
struct DeviceListUpdates {
|
||||
changed: HashSet<OwnedUserId>,
|
||||
left: HashSet<OwnedUserId>,
|
||||
}
|
||||
|
||||
impl DeviceListUpdates {
|
||||
fn new() -> Self {
|
||||
Self {
|
||||
changed: HashSet::new(),
|
||||
left: HashSet::new(),
|
||||
}
|
||||
}
|
||||
|
||||
fn merge(&mut self, other: Self) {
|
||||
self.changed.extend(other.changed);
|
||||
self.left.extend(other.left);
|
||||
}
|
||||
|
||||
fn is_empty(&self) -> bool { self.changed.is_empty() && self.left.is_empty() }
|
||||
}
|
||||
|
||||
impl From<DeviceListUpdates> for DeviceLists {
|
||||
fn from(val: DeviceListUpdates) -> Self {
|
||||
Self {
|
||||
changed: val.changed.into_iter().collect(),
|
||||
left: val.left.into_iter().collect(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// References to common data needed to calculate the sync response.
|
||||
#[derive(Clone, Copy)]
|
||||
struct SyncContext<'a> {
|
||||
/// The ID of the user requesting this sync.
|
||||
syncing_user: &'a UserId,
|
||||
/// The ID of the device requesting this sync, which will belong to
|
||||
/// `syncing_user`.
|
||||
syncing_device: &'a DeviceId,
|
||||
/// The global count at the end of the previous sync response.
|
||||
/// The previous sync's `current_count` will become the next sync's
|
||||
/// `last_sync_end_count`. This will be None if no `since` query parameter
|
||||
/// was specified, indicating an initial sync.
|
||||
last_sync_end_count: Option<u64>,
|
||||
/// The global count as of when we started building the sync response.
|
||||
/// This is used as an upper bound when querying the database to ensure the
|
||||
/// response represents a snapshot in time and doesn't include data which
|
||||
/// appeared while the response was being built.
|
||||
current_count: u64,
|
||||
/// The `full_state` query parameter, used when syncing state for joined and
|
||||
/// left rooms.
|
||||
full_state: bool,
|
||||
/// The sync filter, which the client uses to specify what data should be
|
||||
/// included in the sync response.
|
||||
filter: &'a FilterDefinition,
|
||||
}
|
||||
|
||||
impl<'a> SyncContext<'a> {
|
||||
fn lazy_loading_context(&self, room_id: &'a RoomId) -> lazy_loading::Context<'a> {
|
||||
lazy_loading::Context {
|
||||
user_id: self.syncing_user,
|
||||
device_id: Some(self.syncing_device),
|
||||
room_id,
|
||||
token: self.last_sync_end_count,
|
||||
options: Some(&self.filter.room.state.lazy_load_options),
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn lazy_loading_enabled(&self) -> bool {
|
||||
(self.filter.room.state.lazy_load_options.is_enabled()
|
||||
|| self.filter.room.timeline.lazy_load_options.is_enabled())
|
||||
&& !self.full_state
|
||||
}
|
||||
}
|
||||
|
||||
type PresenceUpdates = HashMap<OwnedUserId, PresenceEventContent>;
|
||||
|
||||
/// # `GET /_matrix/client/r0/sync`
|
||||
///
|
||||
/// Synchronize the client's state with the latest state on the server.
|
||||
///
|
||||
/// - This endpoint takes a `since` parameter which should be the `next_batch`
|
||||
/// value from a previous request for incremental syncs.
|
||||
///
|
||||
/// Calling this endpoint without a `since` parameter returns:
|
||||
/// - Some of the most recent events of each timeline
|
||||
/// - Notification counts for each room
|
||||
/// - Joined and invited member counts, heroes
|
||||
/// - All state events
|
||||
///
|
||||
/// Calling this endpoint with a `since` parameter from a previous `next_batch`
|
||||
/// returns: For joined rooms:
|
||||
/// - Some of the most recent events of each timeline that happened after since
|
||||
/// - If user joined the room after since: All state events (unless lazy loading
|
||||
/// is activated) and all device list updates in that room
|
||||
/// - If the user was already in the room: A list of all events that are in the
|
||||
/// state now, but were not in the state at `since`
|
||||
/// - If the state we send contains a member event: Joined and invited member
|
||||
/// counts, heroes
|
||||
/// - Device list updates that happened after `since`
|
||||
/// - If there are events in the timeline we send or the user send updated his
|
||||
/// read mark: Notification counts
|
||||
/// - EDUs that are active now (read receipts, typing updates, presence)
|
||||
/// - TODO: Allow multiple sync streams to support Pantalaimon
|
||||
///
|
||||
/// For invited rooms:
|
||||
/// - If the user was invited after `since`: A subset of the state of the room
|
||||
/// at the point of the invite
|
||||
///
|
||||
/// For left rooms:
|
||||
/// - If the user left after `since`: `prev_batch` token, empty state (TODO:
|
||||
/// subset of the state at the point of the leave)
|
||||
#[tracing::instrument(
|
||||
name = "sync",
|
||||
level = "debug",
|
||||
skip_all,
|
||||
fields(
|
||||
since = %body.body.since.as_deref().unwrap_or_default(),
|
||||
)
|
||||
)]
|
||||
pub(crate) async fn sync_events_route(
|
||||
State(services): State<crate::State>,
|
||||
InsecureClientIp(client_ip): InsecureClientIp,
|
||||
body: Ruma<sync_events::v3::Request>,
|
||||
) -> Result<sync_events::v3::Response, RumaResponse<UiaaResponse>> {
|
||||
let (sender_user, sender_device) = body.sender();
|
||||
|
||||
// Presence update
|
||||
if services.config.allow_local_presence {
|
||||
services
|
||||
.presence
|
||||
.ping_presence(sender_user, &body.body.set_presence)
|
||||
.await?;
|
||||
}
|
||||
|
||||
// Increment the "device last active" metadata
|
||||
services
|
||||
.users
|
||||
.update_device_last_seen(sender_user, Some(sender_device), client_ip)
|
||||
.await;
|
||||
|
||||
// Setup watchers, so if there's no response, we can wait for them
|
||||
let watcher = services.sync.watch(sender_user, sender_device);
|
||||
|
||||
let response = build_sync_events(&services, &body).await?;
|
||||
if body.body.full_state
|
||||
|| !(response.rooms.is_empty()
|
||||
&& response.presence.is_empty()
|
||||
&& response.account_data.is_empty()
|
||||
&& response.device_lists.is_empty()
|
||||
&& response.to_device.is_empty())
|
||||
{
|
||||
return Ok(response);
|
||||
}
|
||||
|
||||
// Hang a few seconds so requests are not spammed
|
||||
// Stop hanging if new info arrives
|
||||
let default = Duration::from_secs(30);
|
||||
let duration = cmp::min(body.body.timeout.unwrap_or(default), default);
|
||||
_ = tokio::time::timeout(duration, watcher).await;
|
||||
|
||||
// Retry returning data
|
||||
build_sync_events(&services, &body).await
|
||||
}
|
||||
|
||||
pub(crate) async fn build_sync_events(
|
||||
services: &Services,
|
||||
body: &Ruma<sync_events::v3::Request>,
|
||||
) -> Result<sync_events::v3::Response, RumaResponse<UiaaResponse>> {
|
||||
let (syncing_user, syncing_device) = body.sender();
|
||||
|
||||
let current_count = services.globals.current_count()?;
|
||||
|
||||
// the `since` token is the last sync end count stringified
|
||||
let last_sync_end_count = body
|
||||
.body
|
||||
.since
|
||||
.as_ref()
|
||||
.and_then(|string| string.parse().ok());
|
||||
|
||||
let full_state = body.body.full_state;
|
||||
|
||||
// FilterDefinition is very large (0x1000 bytes), let's put it on the heap
|
||||
let filter = Box::new(match body.body.filter.as_ref() {
|
||||
// use the default filter if none was specified
|
||||
| None => FilterDefinition::default(),
|
||||
// use inline filters directly
|
||||
| Some(Filter::FilterDefinition(filter)) => filter.clone(),
|
||||
// look up filter IDs from the database
|
||||
| Some(Filter::FilterId(filter_id)) => services
|
||||
.users
|
||||
.get_filter(syncing_user, filter_id)
|
||||
.await
|
||||
.unwrap_or_default(),
|
||||
});
|
||||
|
||||
let context = SyncContext {
|
||||
syncing_user,
|
||||
syncing_device,
|
||||
last_sync_end_count,
|
||||
current_count,
|
||||
full_state,
|
||||
filter: &filter,
|
||||
};
|
||||
|
||||
let joined_rooms = services
|
||||
.rooms
|
||||
.state_cache
|
||||
.rooms_joined(syncing_user)
|
||||
.map(ToOwned::to_owned)
|
||||
.broad_filter_map(|room_id| async {
|
||||
let joined_room = load_joined_room(services, context, room_id.clone()).await;
|
||||
|
||||
match joined_room {
|
||||
| Ok((room, updates)) => Some((room_id, room, updates)),
|
||||
| Err(err) => {
|
||||
warn!(?err, ?room_id, "error loading joined room {}", room_id);
|
||||
None
|
||||
},
|
||||
}
|
||||
})
|
||||
.ready_fold(
|
||||
(BTreeMap::new(), DeviceListUpdates::new()),
|
||||
|(mut joined_rooms, mut all_updates), (room_id, joined_room, updates)| {
|
||||
all_updates.merge(updates);
|
||||
|
||||
if !joined_room.is_empty() {
|
||||
joined_rooms.insert(room_id, joined_room);
|
||||
}
|
||||
|
||||
(joined_rooms, all_updates)
|
||||
},
|
||||
);
|
||||
|
||||
let left_rooms = services
|
||||
.rooms
|
||||
.state_cache
|
||||
.rooms_left(syncing_user)
|
||||
.broad_filter_map(|(room_id, leave_pdu)| {
|
||||
load_left_room(services, context, room_id.clone(), leave_pdu)
|
||||
.map_ok(move |left_room| (room_id, left_room))
|
||||
.ok()
|
||||
})
|
||||
.ready_filter_map(|(room_id, left_room)| left_room.map(|left_room| (room_id, left_room)))
|
||||
.collect();
|
||||
|
||||
let invited_rooms = services
|
||||
.rooms
|
||||
.state_cache
|
||||
.rooms_invited(syncing_user)
|
||||
.wide_filter_map(async |(room_id, invite_state)| {
|
||||
if is_ignored_invite(services, syncing_user, &room_id).await {
|
||||
None
|
||||
} else {
|
||||
Some((room_id, invite_state))
|
||||
}
|
||||
})
|
||||
.fold_default(|mut invited_rooms: BTreeMap<_, _>, (room_id, invite_state)| async move {
|
||||
let invite_count = services
|
||||
.rooms
|
||||
.state_cache
|
||||
.get_invite_count(&room_id, syncing_user)
|
||||
.await
|
||||
.ok();
|
||||
|
||||
// only sync this invite if it was sent after the last /sync call
|
||||
if last_sync_end_count < invite_count {
|
||||
let invited_room = InvitedRoom {
|
||||
invite_state: InviteState { events: invite_state },
|
||||
};
|
||||
|
||||
invited_rooms.insert(room_id, invited_room);
|
||||
}
|
||||
invited_rooms
|
||||
});
|
||||
|
||||
let knocked_rooms = services
|
||||
.rooms
|
||||
.state_cache
|
||||
.rooms_knocked(syncing_user)
|
||||
.fold_default(|mut knocked_rooms: BTreeMap<_, _>, (room_id, knock_state)| async move {
|
||||
let knock_count = services
|
||||
.rooms
|
||||
.state_cache
|
||||
.get_knock_count(&room_id, syncing_user)
|
||||
.await
|
||||
.ok();
|
||||
|
||||
// only sync this knock if it was sent after the last /sync call
|
||||
if last_sync_end_count < knock_count {
|
||||
let knocked_room = KnockedRoom {
|
||||
knock_state: KnockState { events: knock_state },
|
||||
};
|
||||
|
||||
knocked_rooms.insert(room_id, knocked_room);
|
||||
}
|
||||
knocked_rooms
|
||||
});
|
||||
|
||||
let presence_updates: OptionFuture<_> = services
|
||||
.config
|
||||
.allow_local_presence
|
||||
.then(|| process_presence_updates(services, last_sync_end_count, syncing_user))
|
||||
.into();
|
||||
|
||||
let account_data = services
|
||||
.account_data
|
||||
.changes_since(None, syncing_user, last_sync_end_count, Some(current_count))
|
||||
.ready_filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Global))
|
||||
.collect();
|
||||
|
||||
// Look for device list updates of this account
|
||||
let keys_changed = services
|
||||
.users
|
||||
.keys_changed(syncing_user, last_sync_end_count, Some(current_count))
|
||||
.map(ToOwned::to_owned)
|
||||
.collect::<HashSet<_>>();
|
||||
|
||||
let to_device_events = services
|
||||
.users
|
||||
.get_to_device_events(
|
||||
syncing_user,
|
||||
syncing_device,
|
||||
last_sync_end_count,
|
||||
Some(current_count),
|
||||
)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let device_one_time_keys_count = services
|
||||
.users
|
||||
.count_one_time_keys(syncing_user, syncing_device);
|
||||
|
||||
// Remove all to-device events the device received *last time*
|
||||
let remove_to_device_events =
|
||||
services
|
||||
.users
|
||||
.remove_to_device_events(syncing_user, syncing_device, last_sync_end_count);
|
||||
|
||||
let rooms = join4(joined_rooms, left_rooms, invited_rooms, knocked_rooms);
|
||||
let ephemeral = join3(remove_to_device_events, to_device_events, presence_updates);
|
||||
let top = join5(account_data, ephemeral, device_one_time_keys_count, keys_changed, rooms)
|
||||
.boxed()
|
||||
.await;
|
||||
|
||||
let (account_data, ephemeral, device_one_time_keys_count, keys_changed, rooms) = top;
|
||||
let ((), to_device_events, presence_updates) = ephemeral;
|
||||
let (joined_rooms, left_rooms, invited_rooms, knocked_rooms) = rooms;
|
||||
let (joined_rooms, mut device_list_updates) = joined_rooms;
|
||||
device_list_updates.changed.extend(keys_changed);
|
||||
|
||||
let response = sync_events::v3::Response {
|
||||
account_data: GlobalAccountData { events: account_data },
|
||||
device_lists: device_list_updates.into(),
|
||||
device_one_time_keys_count,
|
||||
// Fallback keys are not yet supported
|
||||
device_unused_fallback_key_types: None,
|
||||
next_batch: current_count.to_string(),
|
||||
presence: Presence {
|
||||
events: presence_updates
|
||||
.into_iter()
|
||||
.flat_map(IntoIterator::into_iter)
|
||||
.map(|(sender, content)| PresenceEvent { content, sender })
|
||||
.map(|ref event| Raw::new(event))
|
||||
.filter_map(Result::ok)
|
||||
.collect(),
|
||||
},
|
||||
rooms: Rooms {
|
||||
leave: left_rooms,
|
||||
join: joined_rooms,
|
||||
invite: invited_rooms,
|
||||
knock: knocked_rooms,
|
||||
},
|
||||
to_device: ToDevice { events: to_device_events },
|
||||
};
|
||||
|
||||
Ok(response)
|
||||
}
|
||||
|
||||
#[tracing::instrument(name = "presence", level = "debug", skip_all)]
|
||||
async fn process_presence_updates(
|
||||
services: &Services,
|
||||
last_sync_end_count: Option<u64>,
|
||||
syncing_user: &UserId,
|
||||
) -> PresenceUpdates {
|
||||
services
|
||||
.presence
|
||||
.presence_since(last_sync_end_count.unwrap_or(0)) // send all presences on initial sync
|
||||
.filter(|(user_id, ..)| {
|
||||
services
|
||||
.rooms
|
||||
.state_cache
|
||||
.user_sees_user(syncing_user, user_id)
|
||||
})
|
||||
.filter_map(|(user_id, _, presence_bytes)| {
|
||||
services
|
||||
.presence
|
||||
.from_json_bytes_to_event(presence_bytes, user_id)
|
||||
.map_ok(move |event| (user_id, event))
|
||||
.ok()
|
||||
})
|
||||
.map(|(user_id, event)| (user_id.to_owned(), event.content))
|
||||
.collect()
|
||||
.await
|
||||
}
|
||||
|
||||
/// Using the provided sync context and an iterator of user IDs in the
|
||||
/// `timeline`, return a HashSet of user IDs whose membership events should be
|
||||
/// sent to the client if lazy-loading is enabled.
|
||||
#[allow(clippy::let_and_return)]
|
||||
async fn prepare_lazily_loaded_members(
|
||||
services: &Services,
|
||||
sync_context: SyncContext<'_>,
|
||||
room_id: &RoomId,
|
||||
timeline_members: impl Iterator<Item = OwnedUserId>,
|
||||
) -> Option<MemberSet> {
|
||||
let lazy_loading_context = &sync_context.lazy_loading_context(room_id);
|
||||
|
||||
// reset lazy loading state on initial sync.
|
||||
// do this even if lazy loading is disabled so future lazy loads
|
||||
// will have the correct members.
|
||||
if sync_context.last_sync_end_count.is_none() {
|
||||
services
|
||||
.rooms
|
||||
.lazy_loading
|
||||
.reset(lazy_loading_context)
|
||||
.await;
|
||||
}
|
||||
|
||||
// filter the input members through `retain_lazy_members`, which
|
||||
// contains the actual lazy loading logic.
|
||||
let lazily_loaded_members =
|
||||
OptionFuture::from(sync_context.lazy_loading_enabled().then(|| {
|
||||
services
|
||||
.rooms
|
||||
.lazy_loading
|
||||
.retain_lazy_members(timeline_members.collect(), lazy_loading_context)
|
||||
}))
|
||||
.await;
|
||||
|
||||
lazily_loaded_members
|
||||
}
|
||||
280
src/api/client/sync/v3/state.rs
Normal file
280
src/api/client/sync/v3/state.rs
Normal file
@@ -0,0 +1,280 @@
|
||||
use std::{collections::BTreeSet, ops::ControlFlow};
|
||||
|
||||
use conduwuit::{
|
||||
Result, at, is_equal_to,
|
||||
matrix::{
|
||||
Event,
|
||||
pdu::{PduCount, PduEvent},
|
||||
},
|
||||
utils::{
|
||||
BoolExt, IterStream, ReadyExt, TryFutureExtExt,
|
||||
stream::{BroadbandExt, TryIgnore},
|
||||
},
|
||||
};
|
||||
use conduwuit_service::{
|
||||
Services,
|
||||
rooms::{lazy_loading::MemberSet, short::ShortStateHash},
|
||||
};
|
||||
use futures::{FutureExt, StreamExt};
|
||||
use itertools::Itertools;
|
||||
use ruma::{OwnedEventId, RoomId, UserId, events::StateEventType};
|
||||
use service::rooms::short::ShortEventId;
|
||||
use tracing::trace;
|
||||
|
||||
use crate::client::TimelinePdus;
|
||||
|
||||
/// Calculate the state events to include in an initial sync response.
|
||||
///
|
||||
/// If lazy-loading is enabled (`lazily_loaded_members` is Some), the returned
|
||||
/// Vec will include the membership events of exclusively the members in
|
||||
/// `lazily_loaded_members`.
|
||||
#[tracing::instrument(
|
||||
name = "initial",
|
||||
level = "trace",
|
||||
skip_all,
|
||||
fields(current_shortstatehash)
|
||||
)]
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub(super) async fn build_state_initial(
|
||||
services: &Services,
|
||||
sender_user: &UserId,
|
||||
timeline_start_shortstatehash: ShortStateHash,
|
||||
lazily_loaded_members: Option<&MemberSet>,
|
||||
) -> Result<Vec<PduEvent>> {
|
||||
// load the keys and event IDs of the state events at the start of the timeline
|
||||
let (shortstatekeys, event_ids): (Vec<_>, Vec<_>) = services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.state_full_ids(timeline_start_shortstatehash)
|
||||
.unzip()
|
||||
.await;
|
||||
|
||||
trace!("performing initial sync of {} state events", event_ids.len());
|
||||
|
||||
services
|
||||
.rooms
|
||||
.short
|
||||
// look up the full state keys
|
||||
.multi_get_statekey_from_short(shortstatekeys.into_iter().stream())
|
||||
.zip(event_ids.into_iter().stream())
|
||||
.ready_filter_map(|item| Some((item.0.ok()?, item.1)))
|
||||
.ready_filter_map(|((event_type, state_key), event_id)| {
|
||||
if let Some(lazily_loaded_members) = lazily_loaded_members {
|
||||
/*
|
||||
if lazy loading is enabled, filter out membership events which aren't for a user
|
||||
included in `lazily_loaded_members` or for the user requesting the sync.
|
||||
*/
|
||||
let event_is_redundant = event_type == StateEventType::RoomMember
|
||||
&& state_key.as_str().try_into().is_ok_and(|user_id: &UserId| {
|
||||
sender_user != user_id && !lazily_loaded_members.contains(user_id)
|
||||
});
|
||||
|
||||
event_is_redundant.or_some(event_id)
|
||||
} else {
|
||||
Some(event_id)
|
||||
}
|
||||
})
|
||||
.broad_filter_map(|event_id: OwnedEventId| async move {
|
||||
services.rooms.timeline.get_pdu(&event_id).await.ok()
|
||||
})
|
||||
.collect()
|
||||
.map(Ok)
|
||||
.await
|
||||
}
|
||||
|
||||
/// Calculate the state events to include in an incremental sync response.
|
||||
///
|
||||
/// If lazy-loading is enabled (`lazily_loaded_members` is Some), the returned
|
||||
/// Vec will include the membership events of all the members in
|
||||
/// `lazily_loaded_members`.
|
||||
#[tracing::instrument(name = "incremental", level = "trace", skip_all)]
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub(super) async fn build_state_incremental<'a>(
|
||||
services: &Services,
|
||||
sender_user: &'a UserId,
|
||||
room_id: &RoomId,
|
||||
last_sync_end_count: PduCount,
|
||||
last_sync_end_shortstatehash: ShortStateHash,
|
||||
timeline_start_shortstatehash: ShortStateHash,
|
||||
timeline_end_shortstatehash: ShortStateHash,
|
||||
timeline: &TimelinePdus,
|
||||
lazily_loaded_members: Option<&'a MemberSet>,
|
||||
) -> Result<Vec<PduEvent>> {
|
||||
/*
|
||||
NB: a limited sync is one where `timeline.limited == true`. Synapse calls this a "gappy" sync internally.
|
||||
|
||||
The algorithm implemented in this function is, currently, quite different from the algorithm vaguely described
|
||||
by the Matrix specification. This is because the specification's description of the `state` property does not accurately
|
||||
reflect how Synapse behaves, and therefore how client SDKs behave. Notable differences include:
|
||||
1. We do not compute the delta using the naive approach of "every state event from the end of the last sync
|
||||
up to the start of this sync's timeline". see below for details.
|
||||
2. If lazy-loading is enabled, we include lazily-loaded membership events. The specific users to include are determined
|
||||
elsewhere and supplied to this function in the `lazily_loaded_members` parameter.
|
||||
*/
|
||||
|
||||
/*
|
||||
the `state` property of an incremental sync which isn't limited are _usually_ empty.
|
||||
(note: the specification says that the `state` property is _always_ empty for limited syncs, which is incorrect.)
|
||||
however, if an event in the timeline (`timeline.pdus`) merges a split in the room's DAG (i.e. has multiple `prev_events`),
|
||||
the state at the _end_ of the timeline may include state events which were merged in and don't exist in the state
|
||||
at the _start_ of the timeline. because this is uncommon, we check here to see if any events in the timeline
|
||||
merged a split in the DAG.
|
||||
|
||||
see: https://github.com/element-hq/synapse/issues/16941
|
||||
*/
|
||||
|
||||
let timeline_is_linear = timeline.pdus.is_empty() || {
|
||||
let last_pdu_of_last_sync = services
|
||||
.rooms
|
||||
.timeline
|
||||
.pdus_rev(Some(sender_user), room_id, Some(last_sync_end_count.saturating_add(1)))
|
||||
.boxed()
|
||||
.next()
|
||||
.await
|
||||
.transpose()
|
||||
.expect("last sync should have had some PDUs")
|
||||
.map(at!(1));
|
||||
|
||||
// make sure the prev_events of each pdu in the timeline refer only to the
|
||||
// previous pdu
|
||||
timeline
|
||||
.pdus
|
||||
.iter()
|
||||
.try_fold(last_pdu_of_last_sync.map(|pdu| pdu.event_id), |prev_event_id, (_, pdu)| {
|
||||
if let Ok(pdu_prev_event_id) = pdu.prev_events.iter().exactly_one() {
|
||||
if prev_event_id
|
||||
.as_ref()
|
||||
.is_none_or(is_equal_to!(pdu_prev_event_id))
|
||||
{
|
||||
return ControlFlow::Continue(Some(pdu_prev_event_id.to_owned()));
|
||||
}
|
||||
}
|
||||
|
||||
trace!(
|
||||
"pdu {:?} has split prev_events (expected {:?}): {:?}",
|
||||
pdu.event_id, prev_event_id, pdu.prev_events
|
||||
);
|
||||
ControlFlow::Break(())
|
||||
})
|
||||
.is_continue()
|
||||
};
|
||||
|
||||
if timeline_is_linear && !timeline.limited {
|
||||
// if there are no splits in the DAG and the timeline isn't limited, then
|
||||
// `state` will always be empty unless lazy loading is enabled.
|
||||
|
||||
if let Some(lazily_loaded_members) = lazily_loaded_members {
|
||||
if !timeline.pdus.is_empty() {
|
||||
// lazy loading is enabled, so we return the membership events which were
|
||||
// requested by the caller.
|
||||
let lazy_membership_events: Vec<_> = lazily_loaded_members
|
||||
.iter()
|
||||
.stream()
|
||||
.broad_filter_map(|user_id| async move {
|
||||
if user_id == sender_user {
|
||||
return None;
|
||||
}
|
||||
|
||||
services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.state_get(
|
||||
timeline_start_shortstatehash,
|
||||
&StateEventType::RoomMember,
|
||||
user_id.as_str(),
|
||||
)
|
||||
.ok()
|
||||
.await
|
||||
})
|
||||
.collect()
|
||||
.await;
|
||||
|
||||
if !lazy_membership_events.is_empty() {
|
||||
trace!(
|
||||
"syncing lazy membership events for members: {:?}",
|
||||
lazy_membership_events
|
||||
.iter()
|
||||
.map(|pdu| pdu.state_key().unwrap())
|
||||
.collect::<Vec<_>>()
|
||||
);
|
||||
}
|
||||
return Ok(lazy_membership_events);
|
||||
}
|
||||
}
|
||||
|
||||
// lazy loading is disabled, `state` is empty.
|
||||
return Ok(vec![]);
|
||||
}
|
||||
|
||||
/*
|
||||
at this point, either the timeline is `limited` or the DAG has a split in it. this necessitates
|
||||
computing the incremental state (which may be empty).
|
||||
|
||||
NOTE: this code path does not use the `lazy_membership_events` parameter. any changes to membership will be included
|
||||
in the incremental state. therefore, the incremental state may include "redundant" membership events,
|
||||
which we do not filter out because A. the spec forbids lazy-load filtering if the timeline is `limited`,
|
||||
and B. DAG splits which require sending extra membership state events are (probably) uncommon enough that
|
||||
the performance penalty is acceptable.
|
||||
*/
|
||||
|
||||
trace!(?timeline_is_linear, ?timeline.limited, "computing state for incremental sync");
|
||||
|
||||
// fetch the shorteventids of state events in the timeline
|
||||
let state_events_in_timeline: BTreeSet<ShortEventId> = services
|
||||
.rooms
|
||||
.short
|
||||
.multi_get_or_create_shorteventid(timeline.pdus.iter().filter_map(|(_, pdu)| {
|
||||
if pdu.state_key().is_some() {
|
||||
Some(pdu.event_id.as_ref())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}))
|
||||
.collect()
|
||||
.await;
|
||||
|
||||
trace!("{} state events in timeline", state_events_in_timeline.len());
|
||||
|
||||
/*
|
||||
fetch the state events which were added since the last sync.
|
||||
|
||||
specifically we fetch the difference between the state at the last sync and the state at the _end_
|
||||
of the timeline, and then we filter out state events in the timeline itself using the shorteventids we fetched.
|
||||
this is necessary to account for splits in the DAG, as explained above.
|
||||
*/
|
||||
let state_diff = services
|
||||
.rooms
|
||||
.short
|
||||
.multi_get_eventid_from_short::<'_, OwnedEventId, _>(
|
||||
services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.state_added((last_sync_end_shortstatehash, timeline_end_shortstatehash))
|
||||
.await?
|
||||
.stream()
|
||||
.ready_filter_map(|(_, shorteventid)| {
|
||||
if state_events_in_timeline.contains(&shorteventid) {
|
||||
None
|
||||
} else {
|
||||
Some(shorteventid)
|
||||
}
|
||||
}),
|
||||
)
|
||||
.ignore_err();
|
||||
|
||||
// finally, fetch the PDU contents and collect them into a vec
|
||||
let state_diff_pdus = state_diff
|
||||
.broad_filter_map(|event_id| async move {
|
||||
services
|
||||
.rooms
|
||||
.timeline
|
||||
.get_non_outlier_pdu(&event_id)
|
||||
.await
|
||||
.ok()
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
.await;
|
||||
|
||||
trace!(?state_diff_pdus, "collected state PDUs for incremental sync");
|
||||
Ok(state_diff_pdus)
|
||||
}
|
||||
@@ -1,848 +0,0 @@
|
||||
use std::{
|
||||
cmp::{self, Ordering},
|
||||
collections::{BTreeMap, BTreeSet, HashMap, HashSet},
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use axum::extract::State;
|
||||
use conduwuit::{
|
||||
Err, Error, Event, PduCount, Result, at, debug, error, extract_variant,
|
||||
matrix::TypeStateKey,
|
||||
utils::{
|
||||
BoolExt, IterStream, ReadyExt, TryFutureExtExt,
|
||||
math::{ruma_from_usize, usize_from_ruma, usize_from_u64_truncated},
|
||||
stream::WidebandExt,
|
||||
},
|
||||
warn,
|
||||
};
|
||||
use conduwuit_service::{
|
||||
Services,
|
||||
rooms::read_receipt::pack_receipts,
|
||||
sync::{into_db_key, into_snake_key},
|
||||
};
|
||||
use futures::{FutureExt, StreamExt, TryFutureExt};
|
||||
use ruma::{
|
||||
MilliSecondsSinceUnixEpoch, OwnedEventId, OwnedRoomId, RoomId, UInt, UserId,
|
||||
api::client::sync::sync_events::{
|
||||
self, DeviceLists, UnreadNotificationsCount,
|
||||
v4::{SlidingOp, SlidingSyncRoomHero},
|
||||
},
|
||||
directory::RoomTypeFilter,
|
||||
events::{
|
||||
AnyRawAccountDataEvent, AnySyncEphemeralRoomEvent, StateEventType,
|
||||
TimelineEventType::*,
|
||||
room::member::{MembershipState, RoomMemberEventContent},
|
||||
},
|
||||
serde::Raw,
|
||||
uint,
|
||||
};
|
||||
|
||||
use super::{load_timeline, share_encrypted_room};
|
||||
use crate::{
|
||||
Ruma,
|
||||
client::{DEFAULT_BUMP_TYPES, ignored_filter, is_ignored_invite},
|
||||
};
|
||||
|
||||
type TodoRooms = BTreeMap<OwnedRoomId, (BTreeSet<TypeStateKey>, usize, u64)>;
|
||||
const SINGLE_CONNECTION_SYNC: &str = "single_connection_sync";
|
||||
|
||||
#[allow(clippy::cognitive_complexity)]
|
||||
/// POST `/_matrix/client/unstable/org.matrix.msc3575/sync`
|
||||
///
|
||||
/// Sliding Sync endpoint (future endpoint: `/_matrix/client/v4/sync`)
|
||||
pub(crate) async fn sync_events_v4_route(
|
||||
State(services): State<crate::State>,
|
||||
body: Ruma<sync_events::v4::Request>,
|
||||
) -> Result<sync_events::v4::Response> {
|
||||
debug_assert!(DEFAULT_BUMP_TYPES.is_sorted(), "DEFAULT_BUMP_TYPES is not sorted");
|
||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
||||
let mut body = body.body;
|
||||
|
||||
// Setup watchers, so if there's no response, we can wait for them
|
||||
let watcher = services.sync.watch(sender_user, sender_device);
|
||||
|
||||
let next_batch = services.globals.next_count()?;
|
||||
|
||||
let conn_id = body
|
||||
.conn_id
|
||||
.clone()
|
||||
.unwrap_or_else(|| SINGLE_CONNECTION_SYNC.to_owned());
|
||||
|
||||
let globalsince = body
|
||||
.pos
|
||||
.as_ref()
|
||||
.and_then(|string| string.parse().ok())
|
||||
.unwrap_or(0);
|
||||
|
||||
let db_key = into_db_key(sender_user, sender_device, conn_id.clone());
|
||||
if globalsince != 0 && !services.sync.remembered(&db_key) {
|
||||
debug!("Restarting sync stream because it was gone from the database");
|
||||
return Err!(Request(UnknownPos("Connection data lost since last time")));
|
||||
}
|
||||
|
||||
if globalsince == 0 {
|
||||
services.sync.forget_sync_request_connection(&db_key);
|
||||
}
|
||||
|
||||
// Get sticky parameters from cache
|
||||
let snake_key = into_snake_key(sender_user, sender_device, conn_id.clone());
|
||||
let known_rooms = services
|
||||
.sync
|
||||
.update_sync_request_with_cache(&snake_key, &mut body);
|
||||
|
||||
let all_joined_rooms: Vec<_> = services
|
||||
.rooms
|
||||
.state_cache
|
||||
.rooms_joined(sender_user)
|
||||
.map(ToOwned::to_owned)
|
||||
.collect()
|
||||
.await;
|
||||
|
||||
let all_invited_rooms: Vec<_> = services
|
||||
.rooms
|
||||
.state_cache
|
||||
.rooms_invited(sender_user)
|
||||
.wide_filter_map(async |(room_id, invite_state)| {
|
||||
if is_ignored_invite(&services, sender_user, &room_id).await {
|
||||
None
|
||||
} else {
|
||||
Some((room_id, invite_state))
|
||||
}
|
||||
})
|
||||
.map(|r| r.0)
|
||||
.collect()
|
||||
.await;
|
||||
|
||||
let all_knocked_rooms: Vec<_> = services
|
||||
.rooms
|
||||
.state_cache
|
||||
.rooms_knocked(sender_user)
|
||||
.map(|r| r.0)
|
||||
.collect()
|
||||
.await;
|
||||
|
||||
let all_invited_rooms: Vec<&RoomId> = all_invited_rooms.iter().map(AsRef::as_ref).collect();
|
||||
let all_knocked_rooms: Vec<&RoomId> = all_knocked_rooms.iter().map(AsRef::as_ref).collect();
|
||||
|
||||
let all_rooms: Vec<&RoomId> = all_joined_rooms
|
||||
.iter()
|
||||
.map(AsRef::as_ref)
|
||||
.chain(all_invited_rooms.iter().map(AsRef::as_ref))
|
||||
.chain(all_knocked_rooms.iter().map(AsRef::as_ref))
|
||||
.collect();
|
||||
|
||||
let all_joined_rooms = all_joined_rooms.iter().map(AsRef::as_ref).collect();
|
||||
let all_invited_rooms = all_invited_rooms.iter().map(AsRef::as_ref).collect();
|
||||
|
||||
if body.extensions.to_device.enabled.unwrap_or(false) {
|
||||
services
|
||||
.users
|
||||
.remove_to_device_events(sender_user, sender_device, globalsince)
|
||||
.await;
|
||||
}
|
||||
|
||||
let mut left_encrypted_users = HashSet::new(); // Users that have left any encrypted rooms the sender was in
|
||||
let mut device_list_changes = HashSet::new();
|
||||
let mut device_list_left = HashSet::new();
|
||||
|
||||
let mut receipts = sync_events::v4::Receipts { rooms: BTreeMap::new() };
|
||||
|
||||
let mut account_data = sync_events::v4::AccountData {
|
||||
global: Vec::new(),
|
||||
rooms: BTreeMap::new(),
|
||||
};
|
||||
if body.extensions.account_data.enabled.unwrap_or(false) {
|
||||
account_data.global = services
|
||||
.account_data
|
||||
.changes_since(None, sender_user, globalsince, Some(next_batch))
|
||||
.ready_filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Global))
|
||||
.collect()
|
||||
.await;
|
||||
|
||||
if let Some(rooms) = body.extensions.account_data.rooms {
|
||||
for room in rooms {
|
||||
account_data.rooms.insert(
|
||||
room.clone(),
|
||||
services
|
||||
.account_data
|
||||
.changes_since(Some(&room), sender_user, globalsince, Some(next_batch))
|
||||
.ready_filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Room))
|
||||
.collect()
|
||||
.await,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if body.extensions.e2ee.enabled.unwrap_or(false) {
|
||||
// Look for device list updates of this account
|
||||
device_list_changes.extend(
|
||||
services
|
||||
.users
|
||||
.keys_changed(sender_user, globalsince, None)
|
||||
.map(ToOwned::to_owned)
|
||||
.collect::<Vec<_>>()
|
||||
.await,
|
||||
);
|
||||
|
||||
for room_id in &all_joined_rooms {
|
||||
let room_id: &&RoomId = room_id;
|
||||
let Ok(current_shortstatehash) =
|
||||
services.rooms.state.get_room_shortstatehash(room_id).await
|
||||
else {
|
||||
error!("Room {room_id} has no state");
|
||||
continue;
|
||||
};
|
||||
|
||||
let since_shortstatehash = services
|
||||
.rooms
|
||||
.user
|
||||
.get_token_shortstatehash(room_id, globalsince)
|
||||
.await
|
||||
.ok();
|
||||
|
||||
let encrypted_room = services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.state_get(current_shortstatehash, &StateEventType::RoomEncryption, "")
|
||||
.await
|
||||
.is_ok();
|
||||
|
||||
if let Some(since_shortstatehash) = since_shortstatehash {
|
||||
// Skip if there are only timeline changes
|
||||
if since_shortstatehash == current_shortstatehash {
|
||||
continue;
|
||||
}
|
||||
|
||||
let since_encryption = services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.state_get(since_shortstatehash, &StateEventType::RoomEncryption, "")
|
||||
.await;
|
||||
|
||||
let since_sender_member: Option<RoomMemberEventContent> = services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.state_get_content(
|
||||
since_shortstatehash,
|
||||
&StateEventType::RoomMember,
|
||||
sender_user.as_str(),
|
||||
)
|
||||
.ok()
|
||||
.await;
|
||||
|
||||
let joined_since_last_sync = since_sender_member
|
||||
.as_ref()
|
||||
.is_none_or(|member| member.membership != MembershipState::Join);
|
||||
|
||||
let new_encrypted_room = encrypted_room && since_encryption.is_err();
|
||||
|
||||
if encrypted_room {
|
||||
let current_state_ids: HashMap<_, OwnedEventId> = services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.state_full_ids(current_shortstatehash)
|
||||
.collect()
|
||||
.await;
|
||||
|
||||
let since_state_ids: HashMap<_, _> = services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.state_full_ids(since_shortstatehash)
|
||||
.collect()
|
||||
.await;
|
||||
|
||||
for (key, id) in current_state_ids {
|
||||
if since_state_ids.get(&key) != Some(&id) {
|
||||
let Ok(pdu) = services.rooms.timeline.get_pdu(&id).await else {
|
||||
error!("Pdu in state not found: {id}");
|
||||
continue;
|
||||
};
|
||||
if pdu.kind == RoomMember {
|
||||
if let Some(Ok(user_id)) =
|
||||
pdu.state_key.as_deref().map(UserId::parse)
|
||||
{
|
||||
if user_id == sender_user {
|
||||
continue;
|
||||
}
|
||||
|
||||
let content: RoomMemberEventContent = pdu.get_content()?;
|
||||
match content.membership {
|
||||
| MembershipState::Join => {
|
||||
// A new user joined an encrypted room
|
||||
if !share_encrypted_room(
|
||||
&services,
|
||||
sender_user,
|
||||
user_id,
|
||||
Some(room_id),
|
||||
)
|
||||
.await
|
||||
{
|
||||
device_list_changes.insert(user_id.to_owned());
|
||||
}
|
||||
},
|
||||
| MembershipState::Leave => {
|
||||
// Write down users that have left encrypted rooms we
|
||||
// are in
|
||||
left_encrypted_users.insert(user_id.to_owned());
|
||||
},
|
||||
| _ => {},
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if joined_since_last_sync || new_encrypted_room {
|
||||
// If the user is in a new encrypted room, give them all joined users
|
||||
device_list_changes.extend(
|
||||
services
|
||||
.rooms
|
||||
.state_cache
|
||||
.room_members(room_id)
|
||||
// Don't send key updates from the sender to the sender
|
||||
.ready_filter(|&user_id| sender_user != user_id)
|
||||
// Only send keys if the sender doesn't share an encrypted room with the target
|
||||
// already
|
||||
.filter_map(|user_id| {
|
||||
share_encrypted_room(&services, sender_user, user_id, Some(room_id))
|
||||
.map(|res| res.or_some(user_id.to_owned()))
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
.await,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
// Look for device list updates in this room
|
||||
device_list_changes.extend(
|
||||
services
|
||||
.users
|
||||
.room_keys_changed(room_id, globalsince, None)
|
||||
.map(|(user_id, _)| user_id)
|
||||
.map(ToOwned::to_owned)
|
||||
.collect::<Vec<_>>()
|
||||
.await,
|
||||
);
|
||||
}
|
||||
|
||||
for user_id in left_encrypted_users {
|
||||
let dont_share_encrypted_room =
|
||||
!share_encrypted_room(&services, sender_user, &user_id, None).await;
|
||||
|
||||
// If the user doesn't share an encrypted room with the target anymore, we need
|
||||
// to tell them
|
||||
if dont_share_encrypted_room {
|
||||
device_list_left.insert(user_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut lists = BTreeMap::new();
|
||||
let mut todo_rooms: TodoRooms = BTreeMap::new(); // and required state
|
||||
|
||||
for (list_id, list) in &body.lists {
|
||||
let active_rooms = match list.filters.clone().and_then(|f| f.is_invite) {
|
||||
| Some(true) => &all_invited_rooms,
|
||||
| Some(false) => &all_joined_rooms,
|
||||
| None => &all_rooms,
|
||||
};
|
||||
|
||||
let active_rooms = match list.filters.clone().map(|f| f.not_room_types) {
|
||||
| Some(filter) if filter.is_empty() => active_rooms.clone(),
|
||||
| Some(value) => filter_rooms(&services, active_rooms, &value, true).await,
|
||||
| None => active_rooms.clone(),
|
||||
};
|
||||
|
||||
let active_rooms = match list.filters.clone().map(|f| f.room_types) {
|
||||
| Some(filter) if filter.is_empty() => active_rooms.clone(),
|
||||
| Some(value) => filter_rooms(&services, &active_rooms, &value, false).await,
|
||||
| None => active_rooms,
|
||||
};
|
||||
|
||||
let mut new_known_rooms: BTreeSet<OwnedRoomId> = BTreeSet::new();
|
||||
|
||||
let ranges = list.ranges.clone();
|
||||
lists.insert(list_id.clone(), sync_events::v4::SyncList {
|
||||
ops: ranges
|
||||
.into_iter()
|
||||
.map(|mut r| {
|
||||
r.0 = r.0.clamp(
|
||||
uint!(0),
|
||||
UInt::try_from(active_rooms.len().saturating_sub(1)).unwrap_or(UInt::MAX),
|
||||
);
|
||||
r.1 = r.1.clamp(
|
||||
r.0,
|
||||
UInt::try_from(active_rooms.len().saturating_sub(1)).unwrap_or(UInt::MAX),
|
||||
);
|
||||
|
||||
let room_ids = if !active_rooms.is_empty() {
|
||||
active_rooms[usize_from_ruma(r.0)..=usize_from_ruma(r.1)].to_vec()
|
||||
} else {
|
||||
Vec::new()
|
||||
};
|
||||
|
||||
new_known_rooms.extend(room_ids.clone().into_iter().map(ToOwned::to_owned));
|
||||
for room_id in &room_ids {
|
||||
let todo_room = todo_rooms.entry((*room_id).to_owned()).or_insert((
|
||||
BTreeSet::new(),
|
||||
0_usize,
|
||||
u64::MAX,
|
||||
));
|
||||
|
||||
let limit: usize = list
|
||||
.room_details
|
||||
.timeline_limit
|
||||
.map(u64::from)
|
||||
.map_or(10, usize_from_u64_truncated)
|
||||
.min(100);
|
||||
|
||||
todo_room.0.extend(
|
||||
list.room_details
|
||||
.required_state
|
||||
.iter()
|
||||
.map(|(ty, sk)| (ty.clone(), sk.as_str().into())),
|
||||
);
|
||||
|
||||
todo_room.1 = todo_room.1.max(limit);
|
||||
// 0 means unknown because it got out of date
|
||||
todo_room.2 = todo_room.2.min(
|
||||
known_rooms
|
||||
.get(list_id.as_str())
|
||||
.and_then(|k| k.get(*room_id))
|
||||
.copied()
|
||||
.unwrap_or(0),
|
||||
);
|
||||
}
|
||||
sync_events::v4::SyncOp {
|
||||
op: SlidingOp::Sync,
|
||||
range: Some(r),
|
||||
index: None,
|
||||
room_ids: room_ids.into_iter().map(ToOwned::to_owned).collect(),
|
||||
room_id: None,
|
||||
}
|
||||
})
|
||||
.collect(),
|
||||
count: ruma_from_usize(active_rooms.len()),
|
||||
});
|
||||
|
||||
if let Some(conn_id) = &body.conn_id {
|
||||
let db_key = into_db_key(sender_user, sender_device, conn_id);
|
||||
services.sync.update_sync_known_rooms(
|
||||
&db_key,
|
||||
list_id.clone(),
|
||||
new_known_rooms,
|
||||
globalsince,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
let mut known_subscription_rooms = BTreeSet::new();
|
||||
for (room_id, room) in &body.room_subscriptions {
|
||||
if !services.rooms.metadata.exists(room_id).await
|
||||
|| services.rooms.metadata.is_disabled(room_id).await
|
||||
|| services.rooms.metadata.is_banned(room_id).await
|
||||
{
|
||||
continue;
|
||||
}
|
||||
let todo_room =
|
||||
todo_rooms
|
||||
.entry(room_id.clone())
|
||||
.or_insert((BTreeSet::new(), 0_usize, u64::MAX));
|
||||
|
||||
let limit: usize = room
|
||||
.timeline_limit
|
||||
.map(u64::from)
|
||||
.map_or(10, usize_from_u64_truncated)
|
||||
.min(100);
|
||||
|
||||
todo_room.0.extend(
|
||||
room.required_state
|
||||
.iter()
|
||||
.map(|(ty, sk)| (ty.clone(), sk.as_str().into())),
|
||||
);
|
||||
todo_room.1 = todo_room.1.max(limit);
|
||||
// 0 means unknown because it got out of date
|
||||
todo_room.2 = todo_room.2.min(
|
||||
known_rooms
|
||||
.get("subscriptions")
|
||||
.and_then(|k| k.get(room_id))
|
||||
.copied()
|
||||
.unwrap_or(0),
|
||||
);
|
||||
known_subscription_rooms.insert(room_id.clone());
|
||||
}
|
||||
|
||||
for r in body.unsubscribe_rooms {
|
||||
known_subscription_rooms.remove(&r);
|
||||
body.room_subscriptions.remove(&r);
|
||||
}
|
||||
|
||||
if let Some(conn_id) = &body.conn_id {
|
||||
let db_key = into_db_key(sender_user, sender_device, conn_id);
|
||||
services.sync.update_sync_known_rooms(
|
||||
&db_key,
|
||||
"subscriptions".to_owned(),
|
||||
known_subscription_rooms,
|
||||
globalsince,
|
||||
);
|
||||
}
|
||||
|
||||
if let Some(conn_id) = body.conn_id.clone() {
|
||||
let db_key = into_db_key(sender_user, sender_device, conn_id);
|
||||
services
|
||||
.sync
|
||||
.update_sync_subscriptions(&db_key, body.room_subscriptions);
|
||||
}
|
||||
|
||||
let mut rooms = BTreeMap::new();
|
||||
for (room_id, (required_state_request, timeline_limit, roomsince)) in &todo_rooms {
|
||||
let roomsincecount = PduCount::Normal(*roomsince);
|
||||
|
||||
let mut timestamp: Option<_> = None;
|
||||
let mut invite_state = None;
|
||||
let (timeline_pdus, limited);
|
||||
let new_room_id: &RoomId = (*room_id).as_ref();
|
||||
if all_invited_rooms.contains(&new_room_id) {
|
||||
// TODO: figure out a timestamp we can use for remote invites
|
||||
invite_state = services
|
||||
.rooms
|
||||
.state_cache
|
||||
.invite_state(sender_user, room_id)
|
||||
.await
|
||||
.ok();
|
||||
|
||||
(timeline_pdus, limited) = (Vec::new(), true);
|
||||
} else {
|
||||
(timeline_pdus, limited) = match load_timeline(
|
||||
&services,
|
||||
sender_user,
|
||||
room_id,
|
||||
roomsincecount,
|
||||
None,
|
||||
*timeline_limit,
|
||||
)
|
||||
.await
|
||||
{
|
||||
| Ok(value) => value,
|
||||
| Err(err) => {
|
||||
warn!("Encountered missing timeline in {}, error {}", room_id, err);
|
||||
continue;
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
account_data.rooms.insert(
|
||||
room_id.to_owned(),
|
||||
services
|
||||
.account_data
|
||||
.changes_since(Some(room_id), sender_user, *roomsince, Some(next_batch))
|
||||
.ready_filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Room))
|
||||
.collect()
|
||||
.await,
|
||||
);
|
||||
|
||||
let last_privateread_update = services
|
||||
.rooms
|
||||
.read_receipt
|
||||
.last_privateread_update(sender_user, room_id)
|
||||
.await > *roomsince;
|
||||
|
||||
let private_read_event = if last_privateread_update {
|
||||
services
|
||||
.rooms
|
||||
.read_receipt
|
||||
.private_read_get(room_id, sender_user)
|
||||
.await
|
||||
.ok()
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let mut vector: Vec<Raw<AnySyncEphemeralRoomEvent>> = services
|
||||
.rooms
|
||||
.read_receipt
|
||||
.readreceipts_since(room_id, *roomsince)
|
||||
.filter_map(|(read_user, _ts, v)| async move {
|
||||
services
|
||||
.users
|
||||
.user_is_ignored(read_user, sender_user)
|
||||
.await
|
||||
.or_some(v)
|
||||
})
|
||||
.collect()
|
||||
.await;
|
||||
|
||||
if let Some(private_read_event) = private_read_event {
|
||||
vector.push(private_read_event);
|
||||
}
|
||||
|
||||
let receipt_size = vector.len();
|
||||
receipts
|
||||
.rooms
|
||||
.insert(room_id.clone(), pack_receipts(Box::new(vector.into_iter())));
|
||||
|
||||
if roomsince != &0
|
||||
&& timeline_pdus.is_empty()
|
||||
&& account_data.rooms.get(room_id).is_some_and(Vec::is_empty)
|
||||
&& receipt_size == 0
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
let prev_batch = timeline_pdus
|
||||
.first()
|
||||
.map_or(Ok::<_, Error>(None), |(pdu_count, _)| {
|
||||
Ok(Some(match pdu_count {
|
||||
| PduCount::Backfilled(_) => {
|
||||
error!("timeline in backfill state?!");
|
||||
"0".to_owned()
|
||||
},
|
||||
| PduCount::Normal(c) => c.to_string(),
|
||||
}))
|
||||
})?
|
||||
.or_else(|| {
|
||||
if roomsince != &0 {
|
||||
Some(roomsince.to_string())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
});
|
||||
|
||||
let room_events: Vec<_> = timeline_pdus
|
||||
.iter()
|
||||
.stream()
|
||||
.filter_map(|item| ignored_filter(&services, item.clone(), sender_user))
|
||||
.map(at!(1))
|
||||
.map(Event::into_format)
|
||||
.collect()
|
||||
.await;
|
||||
|
||||
for (_, pdu) in timeline_pdus {
|
||||
let ts = MilliSecondsSinceUnixEpoch(pdu.origin_server_ts);
|
||||
if DEFAULT_BUMP_TYPES.binary_search(&pdu.kind).is_ok()
|
||||
&& timestamp.is_none_or(|time| time <= ts)
|
||||
{
|
||||
timestamp = Some(ts);
|
||||
}
|
||||
}
|
||||
|
||||
let required_state = required_state_request
|
||||
.iter()
|
||||
.stream()
|
||||
.filter_map(|state| async move {
|
||||
services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.room_state_get(room_id, &state.0, &state.1)
|
||||
.await
|
||||
.map(Event::into_format)
|
||||
.ok()
|
||||
})
|
||||
.collect()
|
||||
.await;
|
||||
|
||||
// Heroes
|
||||
let heroes: Vec<_> = services
|
||||
.rooms
|
||||
.state_cache
|
||||
.room_members(room_id)
|
||||
.ready_filter(|&member| member != sender_user)
|
||||
.filter_map(|user_id| {
|
||||
services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.get_member(room_id, user_id)
|
||||
.map_ok(|memberevent| SlidingSyncRoomHero {
|
||||
user_id: user_id.into(),
|
||||
name: memberevent.displayname,
|
||||
avatar: memberevent.avatar_url,
|
||||
})
|
||||
.ok()
|
||||
})
|
||||
.take(5)
|
||||
.collect()
|
||||
.await;
|
||||
|
||||
let name = match heroes.len().cmp(&(1_usize)) {
|
||||
| Ordering::Greater => {
|
||||
let firsts = heroes[1..]
|
||||
.iter()
|
||||
.map(|h| h.name.clone().unwrap_or_else(|| h.user_id.to_string()))
|
||||
.collect::<Vec<_>>()
|
||||
.join(", ");
|
||||
|
||||
let last = heroes[0]
|
||||
.name
|
||||
.clone()
|
||||
.unwrap_or_else(|| heroes[0].user_id.to_string());
|
||||
|
||||
Some(format!("{firsts} and {last}"))
|
||||
},
|
||||
| Ordering::Equal => Some(
|
||||
heroes[0]
|
||||
.name
|
||||
.clone()
|
||||
.unwrap_or_else(|| heroes[0].user_id.to_string()),
|
||||
),
|
||||
| Ordering::Less => None,
|
||||
};
|
||||
|
||||
let heroes_avatar = if heroes.len() == 1 {
|
||||
heroes[0].avatar.clone()
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
rooms.insert(room_id.clone(), sync_events::v4::SlidingSyncRoom {
|
||||
name: services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.get_name(room_id)
|
||||
.await
|
||||
.ok()
|
||||
.or(name),
|
||||
avatar: match heroes_avatar {
|
||||
| Some(heroes_avatar) => ruma::JsOption::Some(heroes_avatar),
|
||||
| _ => match services.rooms.state_accessor.get_avatar(room_id).await {
|
||||
| ruma::JsOption::Some(avatar) => ruma::JsOption::from_option(avatar.url),
|
||||
| ruma::JsOption::Null => ruma::JsOption::Null,
|
||||
| ruma::JsOption::Undefined => ruma::JsOption::Undefined,
|
||||
},
|
||||
},
|
||||
initial: Some(roomsince == &0),
|
||||
is_dm: None,
|
||||
invite_state,
|
||||
unread_notifications: UnreadNotificationsCount {
|
||||
highlight_count: Some(
|
||||
services
|
||||
.rooms
|
||||
.user
|
||||
.highlight_count(sender_user, room_id)
|
||||
.await
|
||||
.try_into()
|
||||
.expect("notification count can't go that high"),
|
||||
),
|
||||
notification_count: Some(
|
||||
services
|
||||
.rooms
|
||||
.user
|
||||
.notification_count(sender_user, room_id)
|
||||
.await
|
||||
.try_into()
|
||||
.expect("notification count can't go that high"),
|
||||
),
|
||||
},
|
||||
timeline: room_events,
|
||||
required_state,
|
||||
prev_batch,
|
||||
limited,
|
||||
joined_count: Some(
|
||||
services
|
||||
.rooms
|
||||
.state_cache
|
||||
.room_joined_count(room_id)
|
||||
.await
|
||||
.unwrap_or(0)
|
||||
.try_into()
|
||||
.unwrap_or_else(|_| uint!(0)),
|
||||
),
|
||||
invited_count: Some(
|
||||
services
|
||||
.rooms
|
||||
.state_cache
|
||||
.room_invited_count(room_id)
|
||||
.await
|
||||
.unwrap_or(0)
|
||||
.try_into()
|
||||
.unwrap_or_else(|_| uint!(0)),
|
||||
),
|
||||
num_live: None, // Count events in timeline greater than global sync counter
|
||||
timestamp,
|
||||
heroes: Some(heroes),
|
||||
});
|
||||
}
|
||||
|
||||
if rooms.iter().all(|(id, r)| {
|
||||
r.timeline.is_empty() && r.required_state.is_empty() && !receipts.rooms.contains_key(id)
|
||||
}) {
|
||||
// Hang a few seconds so requests are not spammed
|
||||
// Stop hanging if new info arrives
|
||||
let default = Duration::from_secs(30);
|
||||
let duration = cmp::min(body.timeout.unwrap_or(default), default);
|
||||
_ = tokio::time::timeout(duration, watcher).await;
|
||||
}
|
||||
|
||||
Ok(sync_events::v4::Response {
|
||||
initial: globalsince == 0,
|
||||
txn_id: body.txn_id.clone(),
|
||||
pos: next_batch.to_string(),
|
||||
lists,
|
||||
rooms,
|
||||
extensions: sync_events::v4::Extensions {
|
||||
to_device: if body.extensions.to_device.enabled.unwrap_or(false) {
|
||||
Some(sync_events::v4::ToDevice {
|
||||
events: services
|
||||
.users
|
||||
.get_to_device_events(
|
||||
sender_user,
|
||||
sender_device,
|
||||
Some(globalsince),
|
||||
Some(next_batch),
|
||||
)
|
||||
.collect()
|
||||
.await,
|
||||
next_batch: next_batch.to_string(),
|
||||
})
|
||||
} else {
|
||||
None
|
||||
},
|
||||
e2ee: sync_events::v4::E2EE {
|
||||
device_lists: DeviceLists {
|
||||
changed: device_list_changes.into_iter().collect(),
|
||||
left: device_list_left.into_iter().collect(),
|
||||
},
|
||||
device_one_time_keys_count: services
|
||||
.users
|
||||
.count_one_time_keys(sender_user, sender_device)
|
||||
.await,
|
||||
// Fallback keys are not yet supported
|
||||
device_unused_fallback_key_types: None,
|
||||
},
|
||||
account_data,
|
||||
receipts,
|
||||
typing: sync_events::v4::Typing { rooms: BTreeMap::new() },
|
||||
},
|
||||
delta_token: None,
|
||||
})
|
||||
}
|
||||
|
||||
async fn filter_rooms<'a>(
|
||||
services: &Services,
|
||||
rooms: &[&'a RoomId],
|
||||
filter: &[RoomTypeFilter],
|
||||
negate: bool,
|
||||
) -> Vec<&'a RoomId> {
|
||||
rooms
|
||||
.iter()
|
||||
.stream()
|
||||
.filter_map(|r| async move {
|
||||
let room_type = services.rooms.state_accessor.get_room_type(r).await;
|
||||
|
||||
if room_type.as_ref().is_err_and(|e| !e.is_not_found()) {
|
||||
return None;
|
||||
}
|
||||
|
||||
let room_type_filter = RoomTypeFilter::from(room_type.ok());
|
||||
|
||||
let include = if negate {
|
||||
!filter.contains(&room_type_filter)
|
||||
} else {
|
||||
filter.is_empty() || filter.contains(&room_type_filter)
|
||||
};
|
||||
|
||||
include.then_some(r)
|
||||
})
|
||||
.collect()
|
||||
.await
|
||||
}
|
||||
@@ -1,11 +1,12 @@
|
||||
use std::{
|
||||
cmp::{self, Ordering},
|
||||
collections::{BTreeMap, BTreeSet, HashMap, HashSet},
|
||||
collections::{BTreeMap, BTreeSet, HashMap, HashSet, VecDeque},
|
||||
ops::Deref,
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use axum::extract::State;
|
||||
use axum_client_ip::InsecureClientIp;
|
||||
use conduwuit::{
|
||||
Err, Error, Result, at, error, extract_variant, is_equal_to,
|
||||
matrix::{Event, TypeStateKey, pdu::PduCount},
|
||||
@@ -31,6 +32,7 @@
|
||||
events::{
|
||||
AnyRawAccountDataEvent, AnySyncEphemeralRoomEvent, StateEventType, TimelineEventType,
|
||||
room::member::{MembershipState, RoomMemberEventContent},
|
||||
typing::TypingEventContent,
|
||||
},
|
||||
serde::Raw,
|
||||
uint,
|
||||
@@ -39,7 +41,9 @@
|
||||
use super::share_encrypted_room;
|
||||
use crate::{
|
||||
Ruma,
|
||||
client::{DEFAULT_BUMP_TYPES, ignored_filter, is_ignored_invite, sync::load_timeline},
|
||||
client::{
|
||||
DEFAULT_BUMP_TYPES, TimelinePdus, ignored_filter, is_ignored_invite, sync::load_timeline,
|
||||
},
|
||||
};
|
||||
|
||||
type SyncInfo<'a> = (&'a UserId, &'a DeviceId, u64, &'a sync_events::v5::Request);
|
||||
@@ -58,11 +62,18 @@
|
||||
/// [MSC4186]: https://github.com/matrix-org/matrix-spec-proposals/pull/4186
|
||||
pub(crate) async fn sync_events_v5_route(
|
||||
State(ref services): State<crate::State>,
|
||||
InsecureClientIp(client_ip): InsecureClientIp,
|
||||
body: Ruma<sync_events::v5::Request>,
|
||||
) -> Result<sync_events::v5::Response> {
|
||||
debug_assert!(DEFAULT_BUMP_TYPES.is_sorted(), "DEFAULT_BUMP_TYPES is not sorted");
|
||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
||||
|
||||
services
|
||||
.users
|
||||
.update_device_last_seen(sender_user, Some(sender_device), client_ip)
|
||||
.await;
|
||||
|
||||
let mut body = body.body;
|
||||
|
||||
// Setup watchers, so if there's no response, we can wait for them
|
||||
@@ -210,6 +221,9 @@ pub(crate) async fn sync_events_v5_route(
|
||||
_ = tokio::time::timeout(duration, watcher).await;
|
||||
}
|
||||
|
||||
let typing = collect_typing_events(services, sender_user, &body, &todo_rooms).await?;
|
||||
response.extensions.typing = typing;
|
||||
|
||||
trace!(
|
||||
rooms = ?response.rooms.len(),
|
||||
account_data = ?response.extensions.account_data.rooms.len(),
|
||||
@@ -293,6 +307,8 @@ async fn handle_lists<'a, Rooms, AllRooms>(
|
||||
Rooms: Iterator<Item = &'a RoomId> + Clone + Send + 'a,
|
||||
AllRooms: Iterator<Item = &'a RoomId> + Clone + Send + 'a,
|
||||
{
|
||||
// TODO MSC4186: Implement remaining list filters: is_dm, is_encrypted,
|
||||
// room_types.
|
||||
for (list_id, list) in &body.lists {
|
||||
let active_rooms: Vec<_> = match list.filters.as_ref().and_then(|f| f.is_invite) {
|
||||
| None => all_rooms.clone().collect(),
|
||||
@@ -409,13 +425,13 @@ async fn process_rooms<'a, Rooms>(
|
||||
.await
|
||||
.ok();
|
||||
|
||||
(timeline_pdus, limited) = (Vec::new(), true);
|
||||
(timeline_pdus, limited) = (VecDeque::new(), true);
|
||||
} else {
|
||||
(timeline_pdus, limited) = match load_timeline(
|
||||
TimelinePdus { pdus: timeline_pdus, limited } = match load_timeline(
|
||||
services,
|
||||
sender_user,
|
||||
room_id,
|
||||
roomsincecount,
|
||||
Some(roomsincecount),
|
||||
Some(PduCount::from(next_batch)),
|
||||
*timeline_limit,
|
||||
)
|
||||
@@ -434,7 +450,7 @@ async fn process_rooms<'a, Rooms>(
|
||||
room_id.to_owned(),
|
||||
services
|
||||
.account_data
|
||||
.changes_since(Some(room_id), sender_user, *roomsince, Some(next_batch))
|
||||
.changes_since(Some(room_id), sender_user, Some(*roomsince), Some(next_batch))
|
||||
.ready_filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Room))
|
||||
.collect()
|
||||
.await,
|
||||
@@ -460,11 +476,11 @@ async fn process_rooms<'a, Rooms>(
|
||||
let mut receipts: Vec<Raw<AnySyncEphemeralRoomEvent>> = services
|
||||
.rooms
|
||||
.read_receipt
|
||||
.readreceipts_since(room_id, *roomsince)
|
||||
.readreceipts_since(room_id, Some(*roomsince))
|
||||
.filter_map(|(read_user, _ts, v)| async move {
|
||||
services
|
||||
.users
|
||||
.user_is_ignored(read_user, sender_user)
|
||||
.user_is_ignored(&read_user, sender_user)
|
||||
.await
|
||||
.or_some(v)
|
||||
})
|
||||
@@ -499,7 +515,7 @@ async fn process_rooms<'a, Rooms>(
|
||||
}
|
||||
|
||||
let prev_batch = timeline_pdus
|
||||
.first()
|
||||
.front()
|
||||
.map_or(Ok::<_, Error>(None), |(pdu_count, _)| {
|
||||
Ok(Some(match pdu_count {
|
||||
| PduCount::Backfilled(_) => {
|
||||
@@ -672,6 +688,62 @@ async fn process_rooms<'a, Rooms>(
|
||||
}
|
||||
Ok(rooms)
|
||||
}
|
||||
|
||||
async fn collect_typing_events(
|
||||
services: &Services,
|
||||
sender_user: &UserId,
|
||||
body: &sync_events::v5::Request,
|
||||
todo_rooms: &TodoRooms,
|
||||
) -> Result<sync_events::v5::response::Typing> {
|
||||
if !body.extensions.typing.enabled.unwrap_or(false) {
|
||||
return Ok(sync_events::v5::response::Typing::default());
|
||||
}
|
||||
let rooms: Vec<_> = body.extensions.typing.rooms.clone().unwrap_or_else(|| {
|
||||
body.room_subscriptions
|
||||
.keys()
|
||||
.map(ToOwned::to_owned)
|
||||
.collect()
|
||||
});
|
||||
let lists: Vec<_> = body
|
||||
.extensions
|
||||
.typing
|
||||
.lists
|
||||
.clone()
|
||||
.unwrap_or_else(|| body.lists.keys().map(ToOwned::to_owned).collect::<Vec<_>>());
|
||||
|
||||
if rooms.is_empty() && lists.is_empty() {
|
||||
return Ok(sync_events::v5::response::Typing::default());
|
||||
}
|
||||
|
||||
let mut typing_response = sync_events::v5::response::Typing::default();
|
||||
for (room_id, (_, _, roomsince)) in todo_rooms {
|
||||
if services.rooms.typing.last_typing_update(room_id).await? <= *roomsince {
|
||||
continue;
|
||||
}
|
||||
|
||||
match services
|
||||
.rooms
|
||||
.typing
|
||||
.typing_users_for_user(room_id, sender_user)
|
||||
.await
|
||||
{
|
||||
| Ok(typing_users) => {
|
||||
typing_response.rooms.insert(
|
||||
room_id.to_owned(), // Already OwnedRoomId
|
||||
Raw::new(&sync_events::v5::response::SyncTypingEvent {
|
||||
content: TypingEventContent::new(typing_users),
|
||||
})?,
|
||||
);
|
||||
},
|
||||
| Err(e) => {
|
||||
warn!(%room_id, "Failed to get typing events for room: {}", e);
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
Ok(typing_response)
|
||||
}
|
||||
|
||||
async fn collect_account_data(
|
||||
services: &Services,
|
||||
(sender_user, _, globalsince, body): (&UserId, &DeviceId, u64, &sync_events::v5::Request),
|
||||
@@ -687,7 +759,7 @@ async fn collect_account_data(
|
||||
|
||||
account_data.global = services
|
||||
.account_data
|
||||
.changes_since(None, sender_user, globalsince, None)
|
||||
.changes_since(None, sender_user, Some(globalsince), None)
|
||||
.ready_filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Global))
|
||||
.collect()
|
||||
.await;
|
||||
@@ -698,7 +770,7 @@ async fn collect_account_data(
|
||||
room.clone(),
|
||||
services
|
||||
.account_data
|
||||
.changes_since(Some(room), sender_user, globalsince, None)
|
||||
.changes_since(Some(room), sender_user, Some(globalsince), None)
|
||||
.ready_filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Room))
|
||||
.collect()
|
||||
.await,
|
||||
@@ -732,7 +804,7 @@ async fn collect_e2ee<'a, Rooms>(
|
||||
device_list_changes.extend(
|
||||
services
|
||||
.users
|
||||
.keys_changed(sender_user, globalsince, None)
|
||||
.keys_changed(sender_user, Some(globalsince), None)
|
||||
.map(ToOwned::to_owned)
|
||||
.collect::<Vec<_>>()
|
||||
.await,
|
||||
@@ -868,7 +940,7 @@ async fn collect_e2ee<'a, Rooms>(
|
||||
device_list_changes.extend(
|
||||
services
|
||||
.users
|
||||
.room_keys_changed(room_id, globalsince, None)
|
||||
.room_keys_changed(room_id, Some(globalsince), None)
|
||||
.map(|(user_id, _)| user_id)
|
||||
.map(ToOwned::to_owned)
|
||||
.collect::<Vec<_>>()
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
use axum::extract::State;
|
||||
use axum_client_ip::InsecureClientIp;
|
||||
use conduwuit::{Err, Result, utils, utils::math::Tried};
|
||||
use ruma::api::client::typing::create_typing_event;
|
||||
|
||||
@@ -9,10 +10,15 @@
|
||||
/// Sets the typing state of the sender user.
|
||||
pub(crate) async fn create_typing_event_route(
|
||||
State(services): State<crate::State>,
|
||||
InsecureClientIp(ip): InsecureClientIp,
|
||||
body: Ruma<create_typing_event::v3::Request>,
|
||||
) -> Result<create_typing_event::v3::Response> {
|
||||
use create_typing_event::v3::Typing;
|
||||
let sender_user = body.sender_user();
|
||||
services
|
||||
.users
|
||||
.update_device_last_seen(sender_user, body.sender_device.as_deref(), ip)
|
||||
.await;
|
||||
|
||||
if sender_user != body.user_id && body.appservice_info.is_none() {
|
||||
return Err!(Request(Forbidden("You cannot update typing status of other users.")));
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user